summaryrefslogtreecommitdiff
path: root/bdb
diff options
context:
space:
mode:
authorunknown <ram@mysql.r18.ru>2002-10-30 15:57:05 +0400
committerunknown <ram@mysql.r18.ru>2002-10-30 15:57:05 +0400
commit155e78f014de1a2e259ae5119f4621fbb210a784 (patch)
tree6881a3cca88bea0bb9eeffd5aae34be437152786 /bdb
parentb8798d25ab71436bf690ee8ae48285a655c5487e (diff)
downloadmariadb-git-155e78f014de1a2e259ae5119f4621fbb210a784.tar.gz
BDB 4.1.24
BitKeeper/deleted/.del-ex_access.wpj~3df6ae8c99bf7c5f: Delete: bdb/build_vxworks/ex_access/ex_access.wpj BitKeeper/deleted/.del-ex_btrec.wpj~a7622f1c6f432dc6: Delete: bdb/build_vxworks/ex_btrec/ex_btrec.wpj BitKeeper/deleted/.del-ex_dbclient.wpj~7345440f3b204cdd: Delete: bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj BitKeeper/deleted/.del-ex_env.wpj~fbe1ab10b04e8b74: Delete: bdb/build_vxworks/ex_env/ex_env.wpj BitKeeper/deleted/.del-ex_mpool.wpj~4479cfd5c45f327d: Delete: bdb/build_vxworks/ex_mpool/ex_mpool.wpj BitKeeper/deleted/.del-ex_tpcb.wpj~f78093006e14bf41: Delete: bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj BitKeeper/deleted/.del-db_buildall.dsp~bd749ff6da11682: Delete: bdb/build_win32/db_buildall.dsp BitKeeper/deleted/.del-cxx_app.cpp~ad8df8e0791011ed: Delete: bdb/cxx/cxx_app.cpp BitKeeper/deleted/.del-cxx_log.cpp~a50ff3118fe06952: Delete: bdb/cxx/cxx_log.cpp BitKeeper/deleted/.del-cxx_table.cpp~ecd751e79b055556: Delete: bdb/cxx/cxx_table.cpp BitKeeper/deleted/.del-namemap.txt~796a3acd3885d8fd: Delete: bdb/cxx/namemap.txt BitKeeper/deleted/.del-Design.fileop~3ca4da68f1727373: Delete: bdb/db/Design.fileop BitKeeper/deleted/.del-db185_int.h~61bee3736e7959ef: Delete: bdb/db185/db185_int.h BitKeeper/deleted/.del-acconfig.h~411e8854d67ad8b5: Delete: bdb/dist/acconfig.h BitKeeper/deleted/.del-mutex.m4~a13383cde18a64e1: Delete: bdb/dist/aclocal/mutex.m4 BitKeeper/deleted/.del-options.m4~b9d0ca637213750a: Delete: bdb/dist/aclocal/options.m4 BitKeeper/deleted/.del-programs.m4~3ce7890b47732b30: Delete: bdb/dist/aclocal/programs.m4 BitKeeper/deleted/.del-tcl.m4~f944e2db93c3b6db: Delete: bdb/dist/aclocal/tcl.m4 BitKeeper/deleted/.del-types.m4~59cae158c9a32cff: Delete: bdb/dist/aclocal/types.m4 BitKeeper/deleted/.del-script~d38f6d3a4f159cb4: Delete: bdb/dist/build/script BitKeeper/deleted/.del-configure.in~ac795a92c8fe049c: Delete: bdb/dist/configure.in BitKeeper/deleted/.del-ltconfig~66bbd007d8024af: Delete: bdb/dist/ltconfig BitKeeper/deleted/.del-rec_ctemp~a28554362534f00a: Delete: bdb/dist/rec_ctemp BitKeeper/deleted/.del-s_tcl~2ffe4326459fcd9f: Delete: bdb/dist/s_tcl BitKeeper/deleted/.del-.IGNORE_ME~d8148b08fa7d5d15: Delete: bdb/dist/template/.IGNORE_ME BitKeeper/deleted/.del-btree.h~179f2aefec1753d: Delete: bdb/include/btree.h BitKeeper/deleted/.del-cxx_int.h~6b649c04766508f8: Delete: bdb/include/cxx_int.h BitKeeper/deleted/.del-db.src~6b433ae615b16a8d: Delete: bdb/include/db.src BitKeeper/deleted/.del-db_185.h~ad8b373d9391d35c: Delete: bdb/include/db_185.h BitKeeper/deleted/.del-db_am.h~a714912b6b75932f: Delete: bdb/include/db_am.h BitKeeper/deleted/.del-db_cxx.h~fcafadf45f5d19e9: Delete: bdb/include/db_cxx.h BitKeeper/deleted/.del-db_dispatch.h~6844f20f7eb46904: Delete: bdb/include/db_dispatch.h BitKeeper/deleted/.del-db_int.src~419a3f48b6a01da7: Delete: bdb/include/db_int.src BitKeeper/deleted/.del-db_join.h~76f9747a42c3399a: Delete: bdb/include/db_join.h BitKeeper/deleted/.del-db_page.h~e302ca3a4db3abdc: Delete: bdb/include/db_page.h BitKeeper/deleted/.del-db_server_int.h~e1d20b6ba3bca1ab: Delete: bdb/include/db_server_int.h BitKeeper/deleted/.del-db_shash.h~5fbf2d696fac90f3: Delete: bdb/include/db_shash.h BitKeeper/deleted/.del-db_swap.h~1e60887550864a59: Delete: bdb/include/db_swap.h BitKeeper/deleted/.del-db_upgrade.h~c644eee73701fc8d: Delete: bdb/include/db_upgrade.h BitKeeper/deleted/.del-db_verify.h~b8d6c297c61f342e: Delete: bdb/include/db_verify.h BitKeeper/deleted/.del-debug.h~dc2b4f2cf27ccebc: Delete: bdb/include/debug.h BitKeeper/deleted/.del-hash.h~2aaa548b28882dfb: Delete: bdb/include/hash.h BitKeeper/deleted/.del-lock.h~a761c1b7de57b77f: Delete: bdb/include/lock.h BitKeeper/deleted/.del-log.h~ff20184238e35e4d: Delete: bdb/include/log.h BitKeeper/deleted/.del-mp.h~7e317597622f3411: Delete: bdb/include/mp.h BitKeeper/deleted/.del-mutex.h~d3ae7a2977a68137: Delete: bdb/include/mutex.h BitKeeper/deleted/.del-os.h~91867cc8757cd0e3: Delete: bdb/include/os.h BitKeeper/deleted/.del-os_jump.h~e1b939fa5151d4be: Delete: bdb/include/os_jump.h BitKeeper/deleted/.del-qam.h~6fad0c1b5723d597: Delete: bdb/include/qam.h BitKeeper/deleted/.del-queue.h~4c72c0826c123d5: Delete: bdb/include/queue.h BitKeeper/deleted/.del-region.h~513fe04d977ca0fc: Delete: bdb/include/region.h BitKeeper/deleted/.del-shqueue.h~525fc3e6c2025c36: Delete: bdb/include/shqueue.h BitKeeper/deleted/.del-tcl_db.h~c536fd61a844f23f: Delete: bdb/include/tcl_db.h BitKeeper/deleted/.del-txn.h~c8d94b221ec147e4: Delete: bdb/include/txn.h BitKeeper/deleted/.del-xa.h~ecc466493aae9d9a: Delete: bdb/include/xa.h BitKeeper/deleted/.del-DbRecoveryInit.java~756b52601a0b9023: Delete: bdb/java/src/com/sleepycat/db/DbRecoveryInit.java BitKeeper/deleted/.del-DbTxnRecover.java~74607cba7ab89d6d: Delete: bdb/java/src/com/sleepycat/db/DbTxnRecover.java BitKeeper/deleted/.del-lock_conflict.c~fc5e0f14cf597a2b: Delete: bdb/lock/lock_conflict.c BitKeeper/deleted/.del-log.src~53ac9e7b5cb023f2: Delete: bdb/log/log.src BitKeeper/deleted/.del-log_findckp.c~24287f008916e81f: Delete: bdb/log/log_findckp.c BitKeeper/deleted/.del-log_rec.c~d51711f2cac09297: Delete: bdb/log/log_rec.c BitKeeper/deleted/.del-log_register.c~b40bb4efac75ca15: Delete: bdb/log/log_register.c BitKeeper/deleted/.del-Design~b3d0f179f2767b: Delete: bdb/mp/Design BitKeeper/deleted/.del-os_finit.c~95dbefc6fe79b26c: Delete: bdb/os/os_finit.c BitKeeper/deleted/.del-os_abs.c~df95d1e7db81924: Delete: bdb/os_vxworks/os_abs.c BitKeeper/deleted/.del-os_finit.c~803b484bdb9d0122: Delete: bdb/os_vxworks/os_finit.c BitKeeper/deleted/.del-os_map.c~3a6d7926398b76d3: Delete: bdb/os_vxworks/os_map.c BitKeeper/deleted/.del-os_finit.c~19a227c6d3c78ad: Delete: bdb/os_win32/os_finit.c BitKeeper/deleted/.del-log-corruption.patch~1cf2ecc7c6408d5d: Delete: bdb/patches/log-corruption.patch BitKeeper/deleted/.del-Btree.pm~af6d0c5eaed4a98e: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm BitKeeper/deleted/.del-BerkeleyDB.pm~7244036d4482643: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pm BitKeeper/deleted/.del-BerkeleyDB.pod~e7b18fd6132448e3: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod BitKeeper/deleted/.del-Hash.pm~10292a26c06a5c95: Delete: bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm BitKeeper/deleted/.del-BerkeleyDB.pod.P~79f76a1495eda203: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.pod.P BitKeeper/deleted/.del-BerkeleyDB.xs~80c99afbd98e392c: Delete: bdb/perl.BerkeleyDB/BerkeleyDB.xs BitKeeper/deleted/.del-Changes~729c1891efa60de9: Delete: bdb/perl.BerkeleyDB/Changes BitKeeper/deleted/.del-MANIFEST~63a1e34aecf157a0: Delete: bdb/perl.BerkeleyDB/MANIFEST BitKeeper/deleted/.del-Makefile.PL~c68797707d8df87a: Delete: bdb/perl.BerkeleyDB/Makefile.PL BitKeeper/deleted/.del-README~5f2f579b1a241407: Delete: bdb/perl.BerkeleyDB/README BitKeeper/deleted/.del-Todo~dca3c66c193adda9: Delete: bdb/perl.BerkeleyDB/Todo BitKeeper/deleted/.del-config.in~ae81681e450e0999: Delete: bdb/perl.BerkeleyDB/config.in BitKeeper/deleted/.del-dbinfo~28ad67d83be4f68e: Delete: bdb/perl.BerkeleyDB/dbinfo BitKeeper/deleted/.del-mkconsts~543ab60669c7a04e: Delete: bdb/perl.BerkeleyDB/mkconsts BitKeeper/deleted/.del-mkpod~182c0ca54e439afb: Delete: bdb/perl.BerkeleyDB/mkpod BitKeeper/deleted/.del-5.004~e008cb5a48805543: Delete: bdb/perl.BerkeleyDB/patches/5.004 BitKeeper/deleted/.del-irix_6_5.pl~61662bb08afcdec8: Delete: bdb/perl.BerkeleyDB/hints/irix_6_5.pl BitKeeper/deleted/.del-solaris.pl~6771e7182394e152: Delete: bdb/perl.BerkeleyDB/hints/solaris.pl BitKeeper/deleted/.del-typemap~783b8f5295b05f3d: Delete: bdb/perl.BerkeleyDB/typemap BitKeeper/deleted/.del-5.004_01~6081ce2fff7b0bc: Delete: bdb/perl.BerkeleyDB/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~87214eac35ad9e6: Delete: bdb/perl.BerkeleyDB/patches/5.004_02 BitKeeper/deleted/.del-5.004_03~9a672becec7cb40f: Delete: bdb/perl.BerkeleyDB/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~e326cb51af09d154: Delete: bdb/perl.BerkeleyDB/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~7ab457a1e41a92fe: Delete: bdb/perl.BerkeleyDB/patches/5.004_05 BitKeeper/deleted/.del-5.005~f9e2d59b5964cd4b: Delete: bdb/perl.BerkeleyDB/patches/5.005 BitKeeper/deleted/.del-5.005_01~3eb9fb7b5842ea8e: Delete: bdb/perl.BerkeleyDB/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~67477ce0bef717cb: Delete: bdb/perl.BerkeleyDB/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~c4c29a1fb21e290a: Delete: bdb/perl.BerkeleyDB/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~e1fb9897d124ee22: Delete: bdb/perl.BerkeleyDB/patches/5.6.0 BitKeeper/deleted/.del-btree.t~e4a1a3c675ddc406: Delete: bdb/perl.BerkeleyDB/t/btree.t BitKeeper/deleted/.del-db-3.0.t~d2c60991d84558f2: Delete: bdb/perl.BerkeleyDB/t/db-3.0.t BitKeeper/deleted/.del-db-3.1.t~6ee88cd13f55e018: Delete: bdb/perl.BerkeleyDB/t/db-3.1.t BitKeeper/deleted/.del-db-3.2.t~f73b6461f98fd1cf: Delete: bdb/perl.BerkeleyDB/t/db-3.2.t BitKeeper/deleted/.del-destroy.t~cc6a2ae1980a2ecd: Delete: bdb/perl.BerkeleyDB/t/destroy.t BitKeeper/deleted/.del-env.t~a8604a4499c4bd07: Delete: bdb/perl.BerkeleyDB/t/env.t BitKeeper/deleted/.del-examples.t~2571b77c3cc75574: Delete: bdb/perl.BerkeleyDB/t/examples.t BitKeeper/deleted/.del-examples.t.T~8228bdd75ac78b88: Delete: bdb/perl.BerkeleyDB/t/examples.t.T BitKeeper/deleted/.del-examples3.t.T~66a186897a87026d: Delete: bdb/perl.BerkeleyDB/t/examples3.t.T BitKeeper/deleted/.del-examples3.t~fe3822ba2f2d7f83: Delete: bdb/perl.BerkeleyDB/t/examples3.t BitKeeper/deleted/.del-filter.t~f87b045c1b708637: Delete: bdb/perl.BerkeleyDB/t/filter.t BitKeeper/deleted/.del-hash.t~616bfb4d644de3a3: Delete: bdb/perl.BerkeleyDB/t/hash.t BitKeeper/deleted/.del-join.t~29fc39f74a83ca22: Delete: bdb/perl.BerkeleyDB/t/join.t BitKeeper/deleted/.del-mldbm.t~31f5015341eea040: Delete: bdb/perl.BerkeleyDB/t/mldbm.t BitKeeper/deleted/.del-queue.t~8f338034ce44a641: Delete: bdb/perl.BerkeleyDB/t/queue.t BitKeeper/deleted/.del-recno.t~d4ddbd3743add63e: Delete: bdb/perl.BerkeleyDB/t/recno.t BitKeeper/deleted/.del-strict.t~6885cdd2ea71ca2d: Delete: bdb/perl.BerkeleyDB/t/strict.t BitKeeper/deleted/.del-subdb.t~aab62a5d5864c603: Delete: bdb/perl.BerkeleyDB/t/subdb.t BitKeeper/deleted/.del-txn.t~65033b8558ae1216: Delete: bdb/perl.BerkeleyDB/t/txn.t BitKeeper/deleted/.del-unknown.t~f3710458682665e1: Delete: bdb/perl.BerkeleyDB/t/unknown.t BitKeeper/deleted/.del-Changes~436f74a5c414c65b: Delete: bdb/perl.DB_File/Changes BitKeeper/deleted/.del-DB_File.pm~ae0951c6c7665a82: Delete: bdb/perl.DB_File/DB_File.pm BitKeeper/deleted/.del-DB_File.xs~89e49a0b5556f1d8: Delete: bdb/perl.DB_File/DB_File.xs BitKeeper/deleted/.del-DB_File_BS~290fad5dbbb87069: Delete: bdb/perl.DB_File/DB_File_BS BitKeeper/deleted/.del-MANIFEST~90ee581572bdd4ac: Delete: bdb/perl.DB_File/MANIFEST BitKeeper/deleted/.del-Makefile.PL~ac0567bb5a377e38: Delete: bdb/perl.DB_File/Makefile.PL BitKeeper/deleted/.del-README~77e924a5a9bae6b3: Delete: bdb/perl.DB_File/README BitKeeper/deleted/.del-config.in~ab4c2792b86a810b: Delete: bdb/perl.DB_File/config.in BitKeeper/deleted/.del-dbinfo~461c43b30fab2cb: Delete: bdb/perl.DB_File/dbinfo BitKeeper/deleted/.del-dynixptx.pl~50dcddfae25d17e9: Delete: bdb/perl.DB_File/hints/dynixptx.pl BitKeeper/deleted/.del-typemap~55cffb3288a9e587: Delete: bdb/perl.DB_File/typemap BitKeeper/deleted/.del-version.c~a4df0e646f8b3975: Delete: bdb/perl.DB_File/version.c BitKeeper/deleted/.del-5.004_01~d6830d0082702af7: Delete: bdb/perl.DB_File/patches/5.004_01 BitKeeper/deleted/.del-5.004_02~78b082dc80c91031: Delete: bdb/perl.DB_File/patches/5.004_02 BitKeeper/deleted/.del-5.004~4411ec2e3c9e008b: Delete: bdb/perl.DB_File/patches/5.004 BitKeeper/deleted/.del-sco.pl~1e795fe14fe4dcfe: Delete: bdb/perl.DB_File/hints/sco.pl BitKeeper/deleted/.del-5.004_03~33f274648b160d95: Delete: bdb/perl.DB_File/patches/5.004_03 BitKeeper/deleted/.del-5.004_04~8f3d1b3cf18bb20a: Delete: bdb/perl.DB_File/patches/5.004_04 BitKeeper/deleted/.del-5.004_05~9c0f02e7331e142: Delete: bdb/perl.DB_File/patches/5.004_05 BitKeeper/deleted/.del-5.005~c2108cb2e3c8d951: Delete: bdb/perl.DB_File/patches/5.005 BitKeeper/deleted/.del-5.005_01~3b45e9673afc4cfa: Delete: bdb/perl.DB_File/patches/5.005_01 BitKeeper/deleted/.del-5.005_02~9fe5766bb02a4522: Delete: bdb/perl.DB_File/patches/5.005_02 BitKeeper/deleted/.del-5.005_03~ffa1c38c19ae72ea: Delete: bdb/perl.DB_File/patches/5.005_03 BitKeeper/deleted/.del-5.6.0~373be3a5ce47be85: Delete: bdb/perl.DB_File/patches/5.6.0 BitKeeper/deleted/.del-db-btree.t~3231595a1c241eb3: Delete: bdb/perl.DB_File/t/db-btree.t BitKeeper/deleted/.del-db-hash.t~7c4ad0c795c7fad2: Delete: bdb/perl.DB_File/t/db-hash.t BitKeeper/deleted/.del-db-recno.t~6c2d3d80b9ba4a50: Delete: bdb/perl.DB_File/t/db-recno.t BitKeeper/deleted/.del-db_server.sed~cdb00ebcd48a64e2: Delete: bdb/rpc_server/db_server.sed BitKeeper/deleted/.del-db_server_proc.c~d46c8f409c3747f4: Delete: bdb/rpc_server/db_server_proc.c BitKeeper/deleted/.del-db_server_svc.sed~3f5e59f334fa4607: Delete: bdb/rpc_server/db_server_svc.sed BitKeeper/deleted/.del-db_server_util.c~a809f3a4629acda: Delete: bdb/rpc_server/db_server_util.c BitKeeper/deleted/.del-log.tcl~ff1b41f1355b97d7: Delete: bdb/test/log.tcl BitKeeper/deleted/.del-mpool.tcl~b0df4dc1b04db26c: Delete: bdb/test/mpool.tcl BitKeeper/deleted/.del-mutex.tcl~52fd5c73a150565: Delete: bdb/test/mutex.tcl BitKeeper/deleted/.del-txn.tcl~c4ff071550b5446e: Delete: bdb/test/txn.tcl BitKeeper/deleted/.del-README~e800a12a5392010a: Delete: bdb/test/upgrade/README BitKeeper/deleted/.del-pack-2.6.6.pl~89d5076d758d3e98: Delete: bdb/test/upgrade/generate-2.X/pack-2.6.6.pl BitKeeper/deleted/.del-test-2.6.patch~4a52dc83d447547b: Delete: bdb/test/upgrade/generate-2.X/test-2.6.patch
Diffstat (limited to 'bdb')
-rw-r--r--bdb/LICENSE4
-rw-r--r--bdb/btree/bt_compare.c14
-rw-r--r--bdb/btree/bt_conv.c30
-rw-r--r--bdb/btree/bt_curadj.c55
-rw-r--r--bdb/btree/bt_cursor.c1193
-rw-r--r--bdb/btree/bt_delete.c186
-rw-r--r--bdb/btree/bt_method.c33
-rw-r--r--bdb/btree/bt_open.c425
-rw-r--r--bdb/btree/bt_put.c165
-rw-r--r--bdb/btree/bt_rec.c494
-rw-r--r--bdb/btree/bt_reclaim.c45
-rw-r--r--bdb/btree/bt_recno.c430
-rw-r--r--bdb/btree/bt_rsearch.c85
-rw-r--r--bdb/btree/bt_search.c92
-rw-r--r--bdb/btree/bt_split.c323
-rw-r--r--bdb/btree/bt_stat.c203
-rw-r--r--bdb/btree/bt_upgrade.c24
-rw-r--r--bdb/btree/bt_verify.c526
-rw-r--r--bdb/btree/btree.src158
-rw-r--r--bdb/build_vxworks/BerkeleyDB.wpj5378
-rw-r--r--bdb/build_vxworks/BerkeleyDB.wsp17
-rw-r--r--bdb/build_vxworks/BerkeleyDB/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/BerkeleyDB/component.cdf1220
-rwxr-xr-xbdb/build_vxworks/BerkeleyDB/component.wpj6764
-rw-r--r--bdb/build_vxworks/db_archive/db_archive.c195
-rwxr-xr-xbdb/build_vxworks/db_archive/db_archive.wpj160
-rw-r--r--bdb/build_vxworks/db_archive/db_archive/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_archive/db_archive/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_archive/db_archive/component.wpj475
-rw-r--r--bdb/build_vxworks/db_checkpoint/db_checkpoint.c258
-rwxr-xr-xbdb/build_vxworks/db_checkpoint/db_checkpoint.wpj160
-rw-r--r--bdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj475
-rw-r--r--bdb/build_vxworks/db_config.h388
-rw-r--r--bdb/build_vxworks/db_deadlock/db_deadlock.c249
-rwxr-xr-xbdb/build_vxworks/db_deadlock/db_deadlock.wpj160
-rw-r--r--bdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_deadlock/db_deadlock/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_deadlock/db_deadlock/component.wpj475
-rw-r--r--bdb/build_vxworks/db_dump/db_dump.c626
-rwxr-xr-xbdb/build_vxworks/db_dump/db_dump.wpj160
-rw-r--r--bdb/build_vxworks/db_dump/db_dump/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_dump/db_dump/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_dump/db_dump/component.wpj475
-rw-r--r--bdb/build_vxworks/db_load/db_load.c1247
-rwxr-xr-xbdb/build_vxworks/db_load/db_load.wpj160
-rw-r--r--bdb/build_vxworks/db_load/db_load/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_load/db_load/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_load/db_load/component.wpj475
-rw-r--r--bdb/build_vxworks/db_printlog/db_printlog.c375
-rwxr-xr-xbdb/build_vxworks/db_printlog/db_printlog.wpj160
-rw-r--r--bdb/build_vxworks/db_printlog/db_printlog/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_printlog/db_printlog/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_printlog/db_printlog/component.wpj475
-rw-r--r--bdb/build_vxworks/db_recover/db_recover.c328
-rwxr-xr-xbdb/build_vxworks/db_recover/db_recover.wpj160
-rw-r--r--bdb/build_vxworks/db_recover/db_recover/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_recover/db_recover/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_recover/db_recover/component.wpj475
-rw-r--r--bdb/build_vxworks/db_stat/db_stat.c1282
-rwxr-xr-xbdb/build_vxworks/db_stat/db_stat.wpj160
-rw-r--r--bdb/build_vxworks/db_stat/db_stat/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_stat/db_stat/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_stat/db_stat/component.wpj475
-rw-r--r--bdb/build_vxworks/db_upgrade/db_upgrade.c205
-rwxr-xr-xbdb/build_vxworks/db_upgrade/db_upgrade.wpj160
-rw-r--r--bdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_upgrade/db_upgrade/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_upgrade/db_upgrade/component.wpj475
-rw-r--r--bdb/build_vxworks/db_verify/db_verify.c263
-rwxr-xr-xbdb/build_vxworks/db_verify/db_verify.wpj160
-rw-r--r--bdb/build_vxworks/db_verify/db_verify/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/db_verify/db_verify/component.cdf30
-rwxr-xr-xbdb/build_vxworks/db_verify/db_verify/component.wpj475
-rw-r--r--bdb/build_vxworks/dbdemo/README39
-rw-r--r--bdb/build_vxworks/dbdemo/dbdemo.c178
-rwxr-xr-xbdb/build_vxworks/dbdemo/dbdemo.wpj160
-rw-r--r--bdb/build_vxworks/dbdemo/dbdemo/Makefile.custom51
-rwxr-xr-xbdb/build_vxworks/dbdemo/dbdemo/component.cdf30
-rwxr-xr-xbdb/build_vxworks/dbdemo/dbdemo/component.wpj475
-rw-r--r--bdb/build_vxworks/ex_access/ex_access.wpj244
-rw-r--r--bdb/build_vxworks/ex_btrec/ex_btrec.wpj250
-rw-r--r--bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj266
-rw-r--r--bdb/build_vxworks/ex_env/ex_env.wpj248
-rw-r--r--bdb/build_vxworks/ex_mpool/ex_mpool.wpj248
-rw-r--r--bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj261
-rw-r--r--bdb/build_win32/Berkeley_DB.dsw219
-rw-r--r--bdb/build_win32/app_dsp.src27
-rw-r--r--bdb/build_win32/build_all.dsp96
-rw-r--r--bdb/build_win32/db_buildall.dsp128
-rw-r--r--bdb/build_win32/db_config.h411
-rw-r--r--bdb/build_win32/db_cxx.h796
-rw-r--r--bdb/build_win32/db_java_xa.dsp85
-rw-r--r--bdb/build_win32/db_java_xaj.mak21
-rw-r--r--bdb/build_win32/db_lib.dsp92
-rw-r--r--bdb/build_win32/db_perf.dsp216
-rw-r--r--bdb/build_win32/db_test.dsp19
-rw-r--r--bdb/build_win32/db_test.src97
-rw-r--r--bdb/build_win32/dbkill.cpp14
-rw-r--r--bdb/build_win32/dynamic_dsp.src83
-rw-r--r--bdb/build_win32/java_dsp.src60
-rw-r--r--bdb/build_win32/libdb.def275
-rw-r--r--bdb/build_win32/libdb_tcl.def10
-rw-r--r--bdb/build_win32/libdbrc.src2
-rw-r--r--bdb/build_win32/static_dsp.src84
-rw-r--r--bdb/build_win32/tcl_dsp.src19
-rw-r--r--bdb/clib/getcwd.c20
-rw-r--r--bdb/clib/getopt.c19
-rw-r--r--bdb/clib/memcmp.c4
-rw-r--r--bdb/clib/memmove.c4
-rw-r--r--bdb/clib/raise.c8
-rw-r--r--bdb/clib/snprintf.c35
-rw-r--r--bdb/clib/strcasecmp.c34
-rw-r--r--bdb/clib/strdup.c67
-rw-r--r--bdb/clib/strerror.c4
-rw-r--r--bdb/clib/vsnprintf.c6
-rw-r--r--bdb/common/db_byteorder.c50
-rw-r--r--bdb/common/db_err.c359
-rw-r--r--bdb/common/db_getlong.c73
-rw-r--r--bdb/common/db_idspace.c93
-rw-r--r--bdb/common/db_log2.c5
-rw-r--r--bdb/common/util_arg.c126
-rw-r--r--bdb/common/util_cache.c92
-rw-r--r--bdb/common/util_log.c9
-rw-r--r--bdb/common/util_sig.c7
-rw-r--r--bdb/cxx/cxx_app.cpp671
-rw-r--r--bdb/cxx/cxx_db.cpp605
-rw-r--r--bdb/cxx/cxx_dbc.cpp115
-rw-r--r--bdb/cxx/cxx_dbt.cpp61
-rw-r--r--bdb/cxx/cxx_env.cpp802
-rw-r--r--bdb/cxx/cxx_except.cpp254
-rw-r--r--bdb/cxx/cxx_lock.cpp86
-rw-r--r--bdb/cxx/cxx_log.cpp125
-rw-r--r--bdb/cxx/cxx_logc.cpp65
-rw-r--r--bdb/cxx/cxx_mpool.cpp208
-rw-r--r--bdb/cxx/cxx_table.cpp808
-rw-r--r--bdb/cxx/cxx_txn.cpp143
-rw-r--r--bdb/cxx/namemap.txt21
-rw-r--r--bdb/db/Design.fileop452
-rw-r--r--bdb/db/crdel.src85
-rw-r--r--bdb/db/crdel_rec.c577
-rw-r--r--bdb/db/db.c2087
-rw-r--r--bdb/db/db.src133
-rw-r--r--bdb/db/db_am.c926
-rw-r--r--bdb/db/db_cam.c1538
-rw-r--r--bdb/db/db_conv.c290
-rw-r--r--bdb/db/db_dispatch.c1305
-rw-r--r--bdb/db/db_dup.c118
-rw-r--r--bdb/db/db_iface.c504
-rw-r--r--bdb/db/db_join.c250
-rw-r--r--bdb/db/db_meta.c287
-rw-r--r--bdb/db/db_method.c288
-rw-r--r--bdb/db/db_open.c705
-rw-r--r--bdb/db/db_overflow.c213
-rw-r--r--bdb/db/db_pr.c444
-rw-r--r--bdb/db/db_rec.c456
-rw-r--r--bdb/db/db_reclaim.c228
-rw-r--r--bdb/db/db_remove.c318
-rw-r--r--bdb/db/db_rename.c297
-rw-r--r--bdb/db/db_ret.c36
-rw-r--r--bdb/db/db_truncate.c95
-rw-r--r--bdb/db/db_upg.c27
-rw-r--r--bdb/db/db_upg_opd.c79
-rw-r--r--bdb/db/db_vrfy.c704
-rw-r--r--bdb/db/db_vrfyutil.c118
-rw-r--r--bdb/db185/db185.c97
-rw-r--r--bdb/db185/db185_int.in (renamed from bdb/db185/db185_int.h)10
-rw-r--r--bdb/db_archive/db_archive.c74
-rw-r--r--bdb/db_checkpoint/db_checkpoint.c106
-rw-r--r--bdb/db_deadlock/db_deadlock.c118
-rw-r--r--bdb/db_dump/db_dump.c254
-rw-r--r--bdb/db_dump185/db_dump185.c28
-rw-r--r--bdb/db_load/db_load.c594
-rw-r--r--bdb/db_printlog/README11
-rw-r--r--bdb/db_printlog/db_printlog.c280
-rw-r--r--bdb/db_printlog/dbname.awk8
-rw-r--r--bdb/db_printlog/logstat.awk36
-rw-r--r--bdb/db_printlog/status.awk28
-rw-r--r--bdb/db_recover/db_recover.c115
-rw-r--r--bdb/db_stat/db_stat.c696
-rw-r--r--bdb/db_upgrade/db_upgrade.c79
-rw-r--r--bdb/db_verify/db_verify.c180
-rw-r--r--bdb/dbinc/btree.h (renamed from bdb/include/btree.h)25
-rw-r--r--bdb/dbinc/crypto.h78
-rw-r--r--bdb/dbinc/cxx_common.h45
-rw-r--r--bdb/dbinc/cxx_except.h141
-rw-r--r--bdb/dbinc/cxx_int.h (renamed from bdb/include/cxx_int.h)27
-rw-r--r--bdb/dbinc/db.in1883
-rw-r--r--bdb/dbinc/db_185.in (renamed from bdb/include/db_185.h)14
-rw-r--r--bdb/dbinc/db_am.h127
-rw-r--r--bdb/dbinc/db_cxx.in795
-rw-r--r--bdb/dbinc/db_dispatch.h (renamed from bdb/include/db_dispatch.h)44
-rw-r--r--bdb/dbinc/db_int.in (renamed from bdb/include/db_int.src)314
-rw-r--r--bdb/dbinc/db_join.h (renamed from bdb/include/db_join.h)5
-rw-r--r--bdb/dbinc/db_page.h (renamed from bdb/include/db_page.h)185
-rw-r--r--bdb/dbinc/db_server_int.h (renamed from bdb/include/db_server_int.h)69
-rw-r--r--bdb/dbinc/db_shash.h (renamed from bdb/include/db_shash.h)8
-rw-r--r--bdb/dbinc/db_swap.h (renamed from bdb/include/db_swap.h)25
-rw-r--r--bdb/dbinc/db_upgrade.h (renamed from bdb/include/db_upgrade.h)76
-rw-r--r--bdb/dbinc/db_verify.h (renamed from bdb/include/db_verify.h)40
-rw-r--r--bdb/dbinc/debug.h198
-rw-r--r--bdb/dbinc/fop.h16
-rw-r--r--bdb/dbinc/globals.h83
-rw-r--r--bdb/dbinc/hash.h (renamed from bdb/include/hash.h)21
-rw-r--r--bdb/dbinc/hmac.h32
-rw-r--r--bdb/dbinc/lock.h (renamed from bdb/include/lock.h)110
-rw-r--r--bdb/dbinc/log.h273
-rw-r--r--bdb/dbinc/mp.h293
-rw-r--r--bdb/dbinc/mutex.h (renamed from bdb/include/mutex.h)257
-rw-r--r--bdb/dbinc/os.h (renamed from bdb/include/os.h)22
-rw-r--r--bdb/dbinc/qam.h (renamed from bdb/include/qam.h)34
-rw-r--r--bdb/dbinc/queue.h (renamed from bdb/include/queue.h)0
-rw-r--r--bdb/dbinc/region.h (renamed from bdb/include/region.h)48
-rw-r--r--bdb/dbinc/rep.h184
-rw-r--r--bdb/dbinc/shqueue.h (renamed from bdb/include/shqueue.h)6
-rw-r--r--bdb/dbinc/tcl_db.h (renamed from bdb/include/tcl_db.h)74
-rw-r--r--bdb/dbinc/txn.h (renamed from bdb/include/txn.h)93
-rw-r--r--bdb/dbinc/xa.h (renamed from bdb/include/xa.h)4
-rw-r--r--bdb/dbinc_auto/btree_auto.h128
-rw-r--r--bdb/dbinc_auto/btree_ext.h132
-rw-r--r--bdb/dbinc_auto/clib_ext.h49
-rw-r--r--bdb/dbinc_auto/common_ext.h44
-rw-r--r--bdb/dbinc_auto/crdel_auto.h16
-rw-r--r--bdb/dbinc_auto/crypto_ext.h37
-rw-r--r--bdb/dbinc_auto/db_auto.h118
-rw-r--r--bdb/dbinc_auto/db_ext.h224
-rw-r--r--bdb/dbinc_auto/db_server.h1006
-rw-r--r--bdb/dbinc_auto/dbreg_auto.h19
-rw-r--r--bdb/dbinc_auto/dbreg_ext.h43
-rw-r--r--bdb/dbinc_auto/env_ext.h39
-rw-r--r--bdb/dbinc_auto/ext_185_def.in12
-rw-r--r--bdb/dbinc_auto/ext_185_prot.in19
-rw-r--r--bdb/dbinc_auto/ext_def.in61
-rw-r--r--bdb/dbinc_auto/ext_prot.in70
-rw-r--r--bdb/dbinc_auto/fileops_auto.h60
-rw-r--r--bdb/dbinc_auto/fileops_ext.h52
-rw-r--r--bdb/dbinc_auto/hash_auto.h132
-rw-r--r--bdb/dbinc_auto/hash_ext.h125
-rw-r--r--bdb/dbinc_auto/hmac_ext.h20
-rw-r--r--bdb/dbinc_auto/int_def.in1328
-rw-r--r--bdb/dbinc_auto/lock_ext.h41
-rw-r--r--bdb/dbinc_auto/log_ext.h32
-rw-r--r--bdb/dbinc_auto/mp_ext.h44
-rw-r--r--bdb/dbinc_auto/mutex_ext.h35
-rw-r--r--bdb/dbinc_auto/os_ext.h74
-rw-r--r--bdb/dbinc_auto/qam_auto.h70
-rw-r--r--bdb/dbinc_auto/qam_ext.h70
-rw-r--r--bdb/dbinc_auto/rep_ext.h30
-rw-r--r--bdb/dbinc_auto/rpc_client_ext.h167
-rw-r--r--bdb/dbinc_auto/rpc_defs.in4
-rw-r--r--bdb/dbinc_auto/rpc_server_ext.h126
-rw-r--r--bdb/dbinc_auto/tcl_ext.h82
-rw-r--r--bdb/dbinc_auto/txn_auto.h55
-rw-r--r--bdb/dbinc_auto/txn_ext.h70
-rw-r--r--bdb/dbinc_auto/xa_ext.h20
-rw-r--r--bdb/dbm/dbm.c50
-rw-r--r--bdb/dbreg/dbreg.c450
-rw-r--r--bdb/dbreg/dbreg.src49
-rw-r--r--bdb/dbreg/dbreg_auto.c358
-rw-r--r--bdb/dbreg/dbreg_rec.c362
-rw-r--r--bdb/dbreg/dbreg_util.c797
-rw-r--r--bdb/dist/Makefile.in1677
-rw-r--r--bdb/dist/RELEASE15
-rw-r--r--bdb/dist/acconfig.h109
-rw-r--r--bdb/dist/aclocal/config.ac51
-rw-r--r--bdb/dist/aclocal/cxx.ac17
-rw-r--r--bdb/dist/aclocal/gcc.ac36
-rw-r--r--bdb/dist/aclocal/libtool.ac3633
-rw-r--r--bdb/dist/aclocal/mutex.ac611
-rw-r--r--bdb/dist/aclocal/mutex.m4407
-rw-r--r--bdb/dist/aclocal/options.ac197
-rw-r--r--bdb/dist/aclocal/options.m4121
-rw-r--r--bdb/dist/aclocal/programs.ac80
-rw-r--r--bdb/dist/aclocal/programs.m448
-rw-r--r--bdb/dist/aclocal/sosuffix.ac69
-rw-r--r--bdb/dist/aclocal/tcl.ac136
-rw-r--r--bdb/dist/aclocal/tcl.m4126
-rw-r--r--bdb/dist/aclocal/types.ac146
-rw-r--r--bdb/dist/aclocal/types.m4139
-rw-r--r--bdb/dist/aclocal_java/ac_check_class.ac107
-rw-r--r--bdb/dist/aclocal_java/ac_check_classpath.ac23
-rw-r--r--bdb/dist/aclocal_java/ac_check_junit.ac54
-rw-r--r--bdb/dist/aclocal_java/ac_check_rqrd_class.ac26
-rw-r--r--bdb/dist/aclocal_java/ac_java_options.ac32
-rw-r--r--bdb/dist/aclocal_java/ac_jni_include_dirs.ac112
-rw-r--r--bdb/dist/aclocal_java/ac_prog_jar.ac36
-rw-r--r--bdb/dist/aclocal_java/ac_prog_java.ac77
-rw-r--r--bdb/dist/aclocal_java/ac_prog_java_works.ac97
-rw-r--r--bdb/dist/aclocal_java/ac_prog_javac.ac43
-rw-r--r--bdb/dist/aclocal_java/ac_prog_javac_works.ac35
-rw-r--r--bdb/dist/aclocal_java/ac_prog_javadoc.ac37
-rw-r--r--bdb/dist/aclocal_java/ac_prog_javah.ac26
-rw-r--r--bdb/dist/aclocal_java/ac_try_compile_java.ac39
-rw-r--r--bdb/dist/aclocal_java/ac_try_run_javac.ac40
-rw-r--r--bdb/dist/buildrel (renamed from bdb/dist/build/script)69
-rwxr-xr-xbdb/dist/config.guess1354
-rwxr-xr-xbdb/dist/config.sub1460
-rw-r--r--bdb/dist/configure.ac608
-rw-r--r--bdb/dist/configure.in591
-rw-r--r--bdb/dist/db.ecd.in64
-rw-r--r--bdb/dist/db.spec.in52
-rw-r--r--bdb/dist/gen_inc.awk73
-rw-r--r--bdb/dist/gen_rec.awk651
-rw-r--r--bdb/dist/gen_rpc.awk1172
-rwxr-xr-xbdb/dist/install-sh251
-rw-r--r--bdb/dist/ltconfig3136
-rw-r--r--bdb/dist/ltmain.sh2554
-rw-r--r--bdb/dist/pubdef.in350
-rw-r--r--bdb/dist/s_all11
-rwxr-xr-xbdb/dist/s_config36
-rw-r--r--bdb/dist/s_crypto57
-rwxr-xr-xbdb/dist/s_include181
-rwxr-xr-xbdb/dist/s_java280
-rwxr-xr-xbdb/dist/s_javah55
-rwxr-xr-xbdb/dist/s_perm60
-rwxr-xr-xbdb/dist/s_readme27
-rwxr-xr-xbdb/dist/s_recover75
-rw-r--r--bdb/dist/s_rpc137
-rwxr-xr-xbdb/dist/s_symlink51
-rwxr-xr-xbdb/dist/s_tags25
-rwxr-xr-xbdb/dist/s_tcl53
-rwxr-xr-xbdb/dist/s_test92
-rw-r--r--bdb/dist/s_vxworks306
-rwxr-xr-xbdb/dist/s_win3287
-rw-r--r--bdb/dist/s_win32_dsp25
-rw-r--r--bdb/dist/srcfiles.in581
-rw-r--r--bdb/dist/template/.IGNORE_ME3
-rw-r--r--bdb/dist/template/rec_ctemp (renamed from bdb/dist/rec_ctemp)18
-rw-r--r--bdb/dist/template/rec_dbreg75
-rw-r--r--bdb/dist/template/rec_fileops323
-rw-r--r--bdb/dist/vx_2.0/BerkeleyDB.wpj251
-rw-r--r--bdb/dist/vx_2.0/wpj.in160
-rw-r--r--bdb/dist/vx_3.1/Makefile.custom51
-rw-r--r--bdb/dist/vx_3.1/cdf.112
-rw-r--r--bdb/dist/vx_3.1/cdf.29
-rw-r--r--bdb/dist/vx_3.1/cdf.32
-rw-r--r--bdb/dist/vx_3.1/component.cdf30
-rw-r--r--bdb/dist/vx_3.1/component.wpj475
-rw-r--r--bdb/dist/vx_3.1/wpj.122
-rw-r--r--bdb/dist/vx_3.1/wpj.2130
-rw-r--r--bdb/dist/vx_3.1/wpj.3128
-rw-r--r--bdb/dist/vx_3.1/wpj.4135
-rw-r--r--bdb/dist/vx_3.1/wpj.522
-rwxr-xr-xbdb/dist/vx_buildcd119
-rw-r--r--bdb/dist/vx_config.in381
-rw-r--r--bdb/dist/vx_setup/CONFIG.in10
-rw-r--r--bdb/dist/vx_setup/LICENSE.TXT3
-rw-r--r--bdb/dist/vx_setup/MESSAGES.TCL651
-rw-r--r--bdb/dist/vx_setup/README.in7
-rw-r--r--bdb/dist/vx_setup/SETUP.BMPbin0 -> 187962 bytes
-rw-r--r--bdb/dist/vx_setup/vx_allfile.in5
-rw-r--r--bdb/dist/vx_setup/vx_demofile.in3
-rw-r--r--bdb/dist/vx_setup/vx_setup.in13
-rw-r--r--bdb/dist/win_config.in439
-rw-r--r--bdb/dist/win_exports.in134
-rw-r--r--bdb/env/db_salloc.c34
-rw-r--r--bdb/env/db_shash.c9
-rw-r--r--bdb/env/env_file.c166
-rw-r--r--bdb/env/env_method.c354
-rw-r--r--bdb/env/env_method.c.b643
-rw-r--r--bdb/env/env_open.c781
-rw-r--r--bdb/env/env_recover.c747
-rw-r--r--bdb/env/env_region.c247
-rw-r--r--bdb/examples_c/README8
-rw-r--r--bdb/examples_c/bench_001.c382
-rw-r--r--bdb/examples_c/ex_access.c95
-rw-r--r--bdb/examples_c/ex_apprec/auto_rebuild9
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec.c267
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec.h24
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec.src41
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec_auto.c188
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec_auto.h13
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec_rec.c115
-rw-r--r--bdb/examples_c/ex_apprec/ex_apprec_template75
-rw-r--r--bdb/examples_c/ex_btrec.c72
-rw-r--r--bdb/examples_c/ex_dbclient.c82
-rw-r--r--bdb/examples_c/ex_env.c77
-rw-r--r--bdb/examples_c/ex_lock.c60
-rw-r--r--bdb/examples_c/ex_mpool.c151
-rw-r--r--bdb/examples_c/ex_repquote/ex_repquote.h69
-rw-r--r--bdb/examples_c/ex_repquote/ex_rq_client.c250
-rw-r--r--bdb/examples_c/ex_repquote/ex_rq_main.c303
-rw-r--r--bdb/examples_c/ex_repquote/ex_rq_master.c165
-rw-r--r--bdb/examples_c/ex_repquote/ex_rq_net.c692
-rw-r--r--bdb/examples_c/ex_repquote/ex_rq_util.c412
-rw-r--r--bdb/examples_c/ex_thread.c143
-rw-r--r--bdb/examples_c/ex_tpcb.c317
-rw-r--r--bdb/examples_c/ex_tpcb.h4
-rw-r--r--bdb/examples_cxx/AccessExample.cpp41
-rw-r--r--bdb/examples_cxx/BtRecExample.cpp59
-rw-r--r--bdb/examples_cxx/EnvExample.cpp39
-rw-r--r--bdb/examples_cxx/LockExample.cpp48
-rw-r--r--bdb/examples_cxx/MpoolExample.cpp114
-rw-r--r--bdb/examples_cxx/TpcbExample.cpp213
-rw-r--r--bdb/fileops/fileops.src111
-rw-r--r--bdb/fileops/fileops_auto.c1371
-rw-r--r--bdb/fileops/fop_basic.c275
-rw-r--r--bdb/fileops/fop_rec.c308
-rw-r--r--bdb/fileops/fop_util.c928
-rw-r--r--bdb/hash/hash.c1386
-rw-r--r--bdb/hash/hash.src219
-rw-r--r--bdb/hash/hash_conv.c32
-rw-r--r--bdb/hash/hash_dup.c396
-rw-r--r--bdb/hash/hash_func.c11
-rw-r--r--bdb/hash/hash_meta.c56
-rw-r--r--bdb/hash/hash_method.c12
-rw-r--r--bdb/hash/hash_open.c558
-rw-r--r--bdb/hash/hash_page.c799
-rw-r--r--bdb/hash/hash_rec.c500
-rw-r--r--bdb/hash/hash_reclaim.c59
-rw-r--r--bdb/hash/hash_stat.c137
-rw-r--r--bdb/hash/hash_upgrade.c23
-rw-r--r--bdb/hash/hash_verify.c238
-rw-r--r--bdb/hmac/hmac.c207
-rw-r--r--bdb/hmac/sha1.c294
-rw-r--r--bdb/hsearch/hsearch.c24
-rw-r--r--bdb/include/db.src1383
-rw-r--r--bdb/include/db_am.h131
-rw-r--r--bdb/include/db_cxx.h652
-rw-r--r--bdb/include/debug.h104
-rw-r--r--bdb/include/log.h209
-rw-r--r--bdb/include/mp.h244
-rw-r--r--bdb/include/os_jump.h34
-rw-r--r--bdb/java/src/com/sleepycat/db/Db.java769
-rw-r--r--bdb/java/src/com/sleepycat/db/DbAppDispatch.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbAppendRecno.java8
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreeCompare.java8
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreePrefix.java8
-rw-r--r--bdb/java/src/com/sleepycat/db/DbBtreeStat.java54
-rw-r--r--bdb/java/src/com/sleepycat/db/DbClient.java21
-rw-r--r--bdb/java/src/com/sleepycat/db/DbDeadlockException.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbDupCompare.java8
-rw-r--r--bdb/java/src/com/sleepycat/db/DbEnv.java138
-rw-r--r--bdb/java/src/com/sleepycat/db/DbEnvFeedback.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbErrcall.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbException.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbFeedback.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbHash.java8
-rw-r--r--bdb/java/src/com/sleepycat/db/DbHashStat.java47
-rw-r--r--bdb/java/src/com/sleepycat/db/DbKeyRange.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLock.java11
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java57
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLockRequest.java67
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLockStat.java48
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLogStat.java50
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLogc.java39
-rw-r--r--bdb/java/src/com/sleepycat/db/DbLsn.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMemoryException.java27
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMpoolFStat.java30
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMpoolStat.java10
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java46
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMultipleIterator.java51
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java56
-rw-r--r--bdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java51
-rw-r--r--bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbPreplist.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbQueueStat.java39
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRecoveryInit.java23
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRepStat.java43
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRepTransport.java19
-rw-r--r--bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java6
-rw-r--r--bdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxn.java42
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxnRecover.java22
-rw-r--r--bdb/java/src/com/sleepycat/db/DbTxnStat.java49
-rw-r--r--bdb/java/src/com/sleepycat/db/DbUtil.java98
-rw-r--r--bdb/java/src/com/sleepycat/db/Dbc.java10
-rw-r--r--bdb/java/src/com/sleepycat/db/Dbt.java210
-rw-r--r--bdb/java/src/com/sleepycat/db/xa/DbXAResource.java190
-rw-r--r--bdb/java/src/com/sleepycat/db/xa/DbXid.java49
-rw-r--r--bdb/java/src/com/sleepycat/examples/AccessExample.java19
-rw-r--r--bdb/java/src/com/sleepycat/examples/BtRecExample.java44
-rw-r--r--bdb/java/src/com/sleepycat/examples/BulkAccessExample.java198
-rw-r--r--bdb/java/src/com/sleepycat/examples/EnvExample.java4
-rw-r--r--bdb/java/src/com/sleepycat/examples/LockExample.java6
-rw-r--r--bdb/java/src/com/sleepycat/examples/TpcbExample.java48
-rw-r--r--bdb/libdb_java/checkapi.prl4
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Db.h267
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbEnv.h238
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbLock.h10
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbLogc.h37
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbLsn.h2
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbTxn.h22
-rw-r--r--bdb/libdb_java/com_sleepycat_db_DbUtil.h22
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Dbc.h10
-rw-r--r--bdb/libdb_java/com_sleepycat_db_Dbt.h134
-rw-r--r--bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h95
-rw-r--r--bdb/libdb_java/java_Db.c734
-rw-r--r--bdb/libdb_java/java_DbEnv.c1302
-rw-r--r--bdb/libdb_java/java_DbLock.c33
-rw-r--r--bdb/libdb_java/java_DbLogc.c110
-rw-r--r--bdb/libdb_java/java_DbLsn.c12
-rw-r--r--bdb/libdb_java/java_DbTxn.c71
-rw-r--r--bdb/libdb_java/java_DbUtil.c27
-rw-r--r--bdb/libdb_java/java_DbXAResource.c288
-rw-r--r--bdb/libdb_java/java_Dbc.c158
-rw-r--r--bdb/libdb_java/java_Dbt.c141
-rw-r--r--bdb/libdb_java/java_info.c816
-rw-r--r--bdb/libdb_java/java_info.h103
-rw-r--r--bdb/libdb_java/java_locked.c375
-rw-r--r--bdb/libdb_java/java_locked.h116
-rw-r--r--bdb/libdb_java/java_stat_auto.c207
-rw-r--r--bdb/libdb_java/java_stat_auto.h9
-rw-r--r--bdb/libdb_java/java_util.c584
-rw-r--r--bdb/libdb_java/java_util.h304
-rw-r--r--bdb/lock/Design10
-rw-r--r--bdb/lock/lock.c1085
-rw-r--r--bdb/lock/lock_conflict.c34
-rw-r--r--bdb/lock/lock_deadlock.c611
-rw-r--r--bdb/lock/lock_method.c177
-rw-r--r--bdb/lock/lock_region.c234
-rw-r--r--bdb/lock/lock_stat.c370
-rw-r--r--bdb/lock/lock_util.c12
-rw-r--r--bdb/log/log.c881
-rw-r--r--bdb/log/log.src46
-rw-r--r--bdb/log/log_archive.c263
-rw-r--r--bdb/log/log_compare.c6
-rw-r--r--bdb/log/log_findckp.c135
-rw-r--r--bdb/log/log_get.c1185
-rw-r--r--bdb/log/log_method.c113
-rw-r--r--bdb/log/log_put.c1038
-rw-r--r--bdb/log/log_rec.c647
-rw-r--r--bdb/log/log_register.c433
-rw-r--r--bdb/mp/Design52
-rw-r--r--bdb/mp/mp_alloc.c430
-rw-r--r--bdb/mp/mp_bh.c568
-rw-r--r--bdb/mp/mp_fget.c763
-rw-r--r--bdb/mp/mp_fopen.c1167
-rw-r--r--bdb/mp/mp_fput.c196
-rw-r--r--bdb/mp/mp_fset.c63
-rw-r--r--bdb/mp/mp_method.c109
-rw-r--r--bdb/mp/mp_region.c211
-rw-r--r--bdb/mp/mp_register.c33
-rw-r--r--bdb/mp/mp_stat.c325
-rw-r--r--bdb/mp/mp_sync.c909
-rw-r--r--bdb/mp/mp_trickle.c136
-rw-r--r--bdb/mutex/mut_fcntl.c48
-rw-r--r--bdb/mutex/mut_pthread.c145
-rw-r--r--bdb/mutex/mut_tas.c71
-rw-r--r--bdb/mutex/mut_win32.c257
-rw-r--r--bdb/mutex/mutex.c248
-rw-r--r--bdb/mutex/tm.c627
-rw-r--r--bdb/mutex/uts4_cc.s6
-rw-r--r--bdb/os/os_abs.c4
-rw-r--r--bdb/os/os_alloc.c308
-rw-r--r--bdb/os/os_clock.c92
-rw-r--r--bdb/os/os_config.c31
-rw-r--r--bdb/os/os_dir.c26
-rw-r--r--bdb/os/os_errno.c36
-rw-r--r--bdb/os/os_fid.c22
-rw-r--r--bdb/os/os_finit.c111
-rw-r--r--bdb/os/os_fsync.c15
-rw-r--r--bdb/os/os_handle.c60
-rw-r--r--bdb/os/os_id.c47
-rw-r--r--bdb/os/os_map.c45
-rw-r--r--bdb/os/os_method.c140
-rw-r--r--bdb/os/os_oflags.c20
-rw-r--r--bdb/os/os_open.c47
-rw-r--r--bdb/os/os_region.c17
-rw-r--r--bdb/os/os_rename.c27
-rw-r--r--bdb/os/os_root.c4
-rw-r--r--bdb/os/os_rpath.c4
-rw-r--r--bdb/os/os_rw.c23
-rw-r--r--bdb/os/os_seek.c15
-rw-r--r--bdb/os/os_sleep.c15
-rw-r--r--bdb/os/os_spin.c38
-rw-r--r--bdb/os/os_stat.c41
-rw-r--r--bdb/os/os_tmpdir.c10
-rw-r--r--bdb/os/os_unlink.c83
-rw-r--r--bdb/os_vxworks/os_finit.c57
-rw-r--r--bdb/os_vxworks/os_vx_abs.c (renamed from bdb/os_vxworks/os_abs.c)12
-rw-r--r--bdb/os_vxworks/os_vx_config.c31
-rw-r--r--bdb/os_vxworks/os_vx_map.c (renamed from bdb/os_vxworks/os_map.c)25
-rw-r--r--bdb/os_win32/os_abs.c4
-rw-r--r--bdb/os_win32/os_clock.c37
-rw-r--r--bdb/os_win32/os_config.c29
-rw-r--r--bdb/os_win32/os_dir.c30
-rw-r--r--bdb/os_win32/os_errno.c17
-rw-r--r--bdb/os_win32/os_fid.c20
-rw-r--r--bdb/os_win32/os_finit.c60
-rw-r--r--bdb/os_win32/os_fsync.c59
-rw-r--r--bdb/os_win32/os_handle.c126
-rw-r--r--bdb/os_win32/os_map.c118
-rw-r--r--bdb/os_win32/os_open.c98
-rw-r--r--bdb/os_win32/os_rename.c73
-rw-r--r--bdb/os_win32/os_rw.c182
-rw-r--r--bdb/os_win32/os_seek.c71
-rw-r--r--bdb/os_win32/os_sleep.c9
-rw-r--r--bdb/os_win32/os_spin.c20
-rw-r--r--bdb/os_win32/os_stat.c100
-rw-r--r--bdb/os_win32/os_type.c7
-rw-r--r--bdb/patches/log-corruption.patch62
-rw-r--r--bdb/perl.BerkeleyDB/mkconsts211
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.0.t128
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.1.t172
-rw-r--r--bdb/perl.BerkeleyDB/t/db-3.2.t90
-rw-r--r--bdb/perl.BerkeleyDB/t/mldbm.t166
-rw-r--r--bdb/perl.DB_File/Makefile.PL187
-rw-r--r--bdb/perl.DB_File/t/db-recno.t899
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB.pm (renamed from bdb/perl.BerkeleyDB/BerkeleyDB.pm)395
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB.pod (renamed from bdb/perl.BerkeleyDB/BerkeleyDB.pod)79
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB.pod.P (renamed from bdb/perl.BerkeleyDB/BerkeleyDB.pod.P)79
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB.xs (renamed from bdb/perl.BerkeleyDB/BerkeleyDB.xs)1912
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm (renamed from bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm)0
-rw-r--r--bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm (renamed from bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm)0
-rw-r--r--bdb/perl/BerkeleyDB/Changes (renamed from bdb/perl.BerkeleyDB/Changes)199
-rw-r--r--bdb/perl/BerkeleyDB/MANIFEST (renamed from bdb/perl.BerkeleyDB/MANIFEST)7
-rw-r--r--bdb/perl/BerkeleyDB/Makefile.PL (renamed from bdb/perl.BerkeleyDB/Makefile.PL)15
-rw-r--r--bdb/perl/BerkeleyDB/README (renamed from bdb/perl.BerkeleyDB/README)92
-rw-r--r--bdb/perl/BerkeleyDB/Todo (renamed from bdb/perl.BerkeleyDB/Todo)0
-rw-r--r--bdb/perl/BerkeleyDB/config.in (renamed from bdb/perl.BerkeleyDB/config.in)12
-rw-r--r--bdb/perl/BerkeleyDB/constants.h4046
-rw-r--r--bdb/perl/BerkeleyDB/constants.xs87
-rwxr-xr-xbdb/perl/BerkeleyDB/dbinfo (renamed from bdb/perl.BerkeleyDB/dbinfo)11
-rw-r--r--bdb/perl/BerkeleyDB/hints/dec_osf.pl1
-rw-r--r--bdb/perl/BerkeleyDB/hints/irix_6_5.pl (renamed from bdb/perl.BerkeleyDB/hints/irix_6_5.pl)0
-rw-r--r--bdb/perl/BerkeleyDB/hints/solaris.pl (renamed from bdb/perl.BerkeleyDB/hints/solaris.pl)0
-rw-r--r--bdb/perl/BerkeleyDB/mkconsts770
-rwxr-xr-xbdb/perl/BerkeleyDB/mkpod (renamed from bdb/perl.BerkeleyDB/mkpod)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004 (renamed from bdb/perl.BerkeleyDB/patches/5.004)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004_01 (renamed from bdb/perl.BerkeleyDB/patches/5.004_01)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004_02 (renamed from bdb/perl.BerkeleyDB/patches/5.004_02)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004_03 (renamed from bdb/perl.BerkeleyDB/patches/5.004_03)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004_04 (renamed from bdb/perl.BerkeleyDB/patches/5.004_04)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.004_05 (renamed from bdb/perl.BerkeleyDB/patches/5.004_05)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.005 (renamed from bdb/perl.BerkeleyDB/patches/5.005)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.005_01 (renamed from bdb/perl.BerkeleyDB/patches/5.005_01)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.005_02 (renamed from bdb/perl.BerkeleyDB/patches/5.005_02)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.005_03 (renamed from bdb/perl.BerkeleyDB/patches/5.005_03)0
-rw-r--r--bdb/perl/BerkeleyDB/patches/5.6.0 (renamed from bdb/perl.BerkeleyDB/patches/5.6.0)0
-rw-r--r--bdb/perl/BerkeleyDB/ppport.h329
-rw-r--r--bdb/perl/BerkeleyDB/scan229
-rw-r--r--bdb/perl/BerkeleyDB/t/btree.t (renamed from bdb/perl.BerkeleyDB/t/btree.t)191
-rw-r--r--bdb/perl/BerkeleyDB/t/destroy.t (renamed from bdb/perl.BerkeleyDB/t/destroy.t)70
-rw-r--r--bdb/perl/BerkeleyDB/t/env.t (renamed from bdb/perl.BerkeleyDB/t/env.t)118
-rw-r--r--bdb/perl/BerkeleyDB/t/examples.t (renamed from bdb/perl.BerkeleyDB/t/examples.t)83
-rw-r--r--bdb/perl/BerkeleyDB/t/examples.t.T (renamed from bdb/perl.BerkeleyDB/t/examples.t.T)83
-rw-r--r--bdb/perl/BerkeleyDB/t/examples3.t (renamed from bdb/perl.BerkeleyDB/t/examples3.t)83
-rw-r--r--bdb/perl/BerkeleyDB/t/examples3.t.T (renamed from bdb/perl.BerkeleyDB/t/examples3.t.T)83
-rw-r--r--bdb/perl/BerkeleyDB/t/filter.t (renamed from bdb/perl.BerkeleyDB/t/filter.t)29
-rw-r--r--bdb/perl/BerkeleyDB/t/hash.t (renamed from bdb/perl.BerkeleyDB/t/hash.t)169
-rw-r--r--bdb/perl/BerkeleyDB/t/join.t (renamed from bdb/perl.BerkeleyDB/t/join.t)51
-rw-r--r--bdb/perl/BerkeleyDB/t/mldbm.t161
-rw-r--r--bdb/perl/BerkeleyDB/t/queue.t (renamed from bdb/perl.BerkeleyDB/t/queue.t)196
-rw-r--r--bdb/perl/BerkeleyDB/t/recno.t (renamed from bdb/perl.BerkeleyDB/t/recno.t)256
-rw-r--r--bdb/perl/BerkeleyDB/t/strict.t (renamed from bdb/perl.BerkeleyDB/t/strict.t)62
-rw-r--r--bdb/perl/BerkeleyDB/t/subdb.t (renamed from bdb/perl.BerkeleyDB/t/subdb.t)57
-rw-r--r--bdb/perl/BerkeleyDB/t/txn.t (renamed from bdb/perl.BerkeleyDB/t/txn.t)154
-rw-r--r--bdb/perl/BerkeleyDB/t/unknown.t (renamed from bdb/perl.BerkeleyDB/t/unknown.t)38
-rw-r--r--bdb/perl/BerkeleyDB/t/util.pm220
-rw-r--r--bdb/perl/BerkeleyDB/typemap (renamed from bdb/perl.BerkeleyDB/typemap)40
-rw-r--r--bdb/perl/DB_File/Changes (renamed from bdb/perl.DB_File/Changes)489
-rw-r--r--bdb/perl/DB_File/DB_File.pm (renamed from bdb/perl.DB_File/DB_File.pm)415
-rw-r--r--bdb/perl/DB_File/DB_File.xs (renamed from bdb/perl.DB_File/DB_File.xs)663
-rw-r--r--bdb/perl/DB_File/DB_File_BS (renamed from bdb/perl.DB_File/DB_File_BS)0
-rw-r--r--bdb/perl/DB_File/MANIFEST (renamed from bdb/perl.DB_File/MANIFEST)21
-rw-r--r--bdb/perl/DB_File/Makefile.PL330
-rw-r--r--bdb/perl/DB_File/README (renamed from bdb/perl.DB_File/README)108
-rw-r--r--bdb/perl/DB_File/config.in (renamed from bdb/perl.DB_File/config.in)6
-rw-r--r--bdb/perl/DB_File/dbinfo (renamed from bdb/perl.DB_File/dbinfo)11
-rw-r--r--bdb/perl/DB_File/fallback.h455
-rw-r--r--bdb/perl/DB_File/fallback.xs88
-rw-r--r--bdb/perl/DB_File/hints/dynixptx.pl (renamed from bdb/perl.DB_File/hints/dynixptx.pl)0
-rw-r--r--bdb/perl/DB_File/hints/sco.pl (renamed from bdb/perl.DB_File/hints/sco.pl)0
-rw-r--r--bdb/perl/DB_File/patches/5.004 (renamed from bdb/perl.DB_File/patches/5.004)0
-rw-r--r--bdb/perl/DB_File/patches/5.004_01 (renamed from bdb/perl.DB_File/patches/5.004_01)0
-rw-r--r--bdb/perl/DB_File/patches/5.004_02 (renamed from bdb/perl.DB_File/patches/5.004_02)0
-rw-r--r--bdb/perl/DB_File/patches/5.004_03 (renamed from bdb/perl.DB_File/patches/5.004_03)0
-rw-r--r--bdb/perl/DB_File/patches/5.004_04 (renamed from bdb/perl.DB_File/patches/5.004_04)0
-rw-r--r--bdb/perl/DB_File/patches/5.004_05 (renamed from bdb/perl.DB_File/patches/5.004_05)0
-rw-r--r--bdb/perl/DB_File/patches/5.005 (renamed from bdb/perl.DB_File/patches/5.005)0
-rw-r--r--bdb/perl/DB_File/patches/5.005_01 (renamed from bdb/perl.DB_File/patches/5.005_01)0
-rw-r--r--bdb/perl/DB_File/patches/5.005_02 (renamed from bdb/perl.DB_File/patches/5.005_02)0
-rw-r--r--bdb/perl/DB_File/patches/5.005_03 (renamed from bdb/perl.DB_File/patches/5.005_03)0
-rw-r--r--bdb/perl/DB_File/patches/5.6.0 (renamed from bdb/perl.DB_File/patches/5.6.0)0
-rw-r--r--bdb/perl/DB_File/ppport.h329
-rw-r--r--bdb/perl/DB_File/t/db-btree.t (renamed from bdb/perl.DB_File/t/db-btree.t)531
-rw-r--r--bdb/perl/DB_File/t/db-hash.t (renamed from bdb/perl.DB_File/t/db-hash.t)344
-rw-r--r--bdb/perl/DB_File/t/db-recno.t1428
-rw-r--r--bdb/perl/DB_File/typemap (renamed from bdb/perl.DB_File/typemap)22
-rw-r--r--bdb/perl/DB_File/version.c (renamed from bdb/perl.DB_File/version.c)7
-rw-r--r--bdb/qam/qam.c1070
-rw-r--r--bdb/qam/qam.src71
-rw-r--r--bdb/qam/qam_conv.c15
-rw-r--r--bdb/qam/qam_files.c293
-rw-r--r--bdb/qam/qam_method.c381
-rw-r--r--bdb/qam/qam_open.c309
-rw-r--r--bdb/qam/qam_rec.c424
-rw-r--r--bdb/qam/qam_stat.c90
-rw-r--r--bdb/qam/qam_upgrade.c9
-rw-r--r--bdb/qam/qam_verify.c50
-rw-r--r--bdb/rep/rep_method.c1144
-rw-r--r--bdb/rep/rep_record.c1510
-rw-r--r--bdb/rep/rep_region.c187
-rw-r--r--bdb/rep/rep_util.c867
-rw-r--r--bdb/rpc_client/client.c321
-rw-r--r--bdb/rpc_client/gen_client_ret.c516
-rw-r--r--bdb/rpc_server/c/db_server_proc.c2500
-rw-r--r--bdb/rpc_server/c/db_server_proc.sed772
-rw-r--r--bdb/rpc_server/c/db_server_svc.c435
-rw-r--r--bdb/rpc_server/c/db_server_util.c (renamed from bdb/rpc_server/db_server_util.c)295
-rw-r--r--bdb/rpc_server/c/db_server_xdr.c1512
-rw-r--r--bdb/rpc_server/c/gen_db_server.c1169
-rw-r--r--bdb/rpc_server/clsrv.html436
-rw-r--r--bdb/rpc_server/cxx/db_server_cxxproc.cpp2200
-rw-r--r--bdb/rpc_server/cxx/db_server_cxxutil.cpp746
-rw-r--r--bdb/rpc_server/db_server.sed5
-rw-r--r--bdb/rpc_server/db_server_proc.c1546
-rw-r--r--bdb/rpc_server/db_server_svc.sed5
-rw-r--r--bdb/rpc_server/java/DbDispatcher.java590
-rw-r--r--bdb/rpc_server/java/DbServer.java301
-rw-r--r--bdb/rpc_server/java/FreeList.java102
-rw-r--r--bdb/rpc_server/java/LocalIterator.java23
-rw-r--r--bdb/rpc_server/java/README24
-rw-r--r--bdb/rpc_server/java/RpcDb.java694
-rw-r--r--bdb/rpc_server/java/RpcDbEnv.java269
-rw-r--r--bdb/rpc_server/java/RpcDbTxn.java123
-rw-r--r--bdb/rpc_server/java/RpcDbc.java238
-rw-r--r--bdb/rpc_server/java/Timer.java22
-rw-r--r--bdb/rpc_server/java/gen/DbServerStub.java495
-rw-r--r--bdb/rpc_server/java/gen/__db_associate_msg.java41
-rw-r--r--bdb/rpc_server/java/gen/__db_associate_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_bt_minkey_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_bt_minkey_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_close_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_close_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_create_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_create_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_cursor_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__db_cursor_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_del_msg.java53
-rw-r--r--bdb/rpc_server/java/gen/__db_del_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_encrypt_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__db_encrypt_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_extentsize_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_extentsize_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_flags_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_flags_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_get_msg.java68
-rw-r--r--bdb/rpc_server/java/gen/__db_get_reply.java38
-rw-r--r--bdb/rpc_server/java/gen/__db_h_ffactor_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_h_ffactor_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_h_nelem_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_h_nelem_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_join_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__db_join_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_key_range_msg.java53
-rw-r--r--bdb/rpc_server/java/gen/__db_key_range_reply.java41
-rw-r--r--bdb/rpc_server/java/gen/__db_lorder_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_lorder_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_open_msg.java50
-rw-r--r--bdb/rpc_server/java/gen/__db_open_reply.java44
-rw-r--r--bdb/rpc_server/java/gen/__db_pagesize_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_pagesize_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_pget_msg.java83
-rw-r--r--bdb/rpc_server/java/gen/__db_pget_reply.java41
-rw-r--r--bdb/rpc_server/java/gen/__db_put_msg.java68
-rw-r--r--bdb/rpc_server/java/gen/__db_put_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_re_delim_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_re_delim_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_re_len_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_re_len_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_re_pad_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_re_pad_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_remove_msg.java41
-rw-r--r--bdb/rpc_server/java/gen/__db_remove_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_rename_msg.java44
-rw-r--r--bdb/rpc_server/java/gen/__db_rename_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_stat_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_stat_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_sync_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__db_sync_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__db_truncate_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__db_truncate_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_close_msg.java32
-rw-r--r--bdb/rpc_server/java/gen/__dbc_close_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__dbc_count_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_count_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_del_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_del_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__dbc_dup_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_dup_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__dbc_get_msg.java65
-rw-r--r--bdb/rpc_server/java/gen/__dbc_get_reply.java38
-rw-r--r--bdb/rpc_server/java/gen/__dbc_pget_msg.java80
-rw-r--r--bdb/rpc_server/java/gen/__dbc_pget_reply.java41
-rw-r--r--bdb/rpc_server/java/gen/__dbc_put_msg.java65
-rw-r--r--bdb/rpc_server/java/gen/__dbc_put_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__env_cachesize_msg.java41
-rw-r--r--bdb/rpc_server/java/gen/__env_cachesize_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_close_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__env_close_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_create_msg.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_create_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__env_dbremove_msg.java44
-rw-r--r--bdb/rpc_server/java/gen/__env_dbremove_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_dbrename_msg.java47
-rw-r--r--bdb/rpc_server/java/gen/__env_dbrename_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_encrypt_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__env_encrypt_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_flags_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__env_flags_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__env_open_msg.java41
-rw-r--r--bdb/rpc_server/java/gen/__env_open_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__env_remove_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__env_remove_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_abort_msg.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_abort_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_begin_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__txn_begin_reply.java35
-rw-r--r--bdb/rpc_server/java/gen/__txn_commit_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__txn_commit_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_discard_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__txn_discard_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_prepare_msg.java35
-rw-r--r--bdb/rpc_server/java/gen/__txn_prepare_reply.java32
-rw-r--r--bdb/rpc_server/java/gen/__txn_recover_msg.java38
-rw-r--r--bdb/rpc_server/java/gen/__txn_recover_reply.java41
-rw-r--r--bdb/rpc_server/java/gen/db_server.java67
-rw-r--r--bdb/rpc_server/java/jrpcgen.jarbin0 -> 57125 bytes
-rw-r--r--bdb/rpc_server/java/oncrpc.jarbin0 -> 84122 bytes
-rw-r--r--bdb/rpc_server/java/s_jrpcgen3
-rw-r--r--bdb/rpc_server/rpc.src477
-rw-r--r--bdb/tcl/docs/db.html403
-rw-r--r--bdb/tcl/docs/env.html607
-rw-r--r--bdb/tcl/docs/historic.html3
-rw-r--r--bdb/tcl/docs/index.html6
-rw-r--r--bdb/tcl/docs/library.html3
-rw-r--r--bdb/tcl/docs/lock.html308
-rw-r--r--bdb/tcl/docs/log.html24
-rw-r--r--bdb/tcl/docs/mpool.html3
-rw-r--r--bdb/tcl/docs/rep.html51
-rw-r--r--bdb/tcl/docs/test.html3
-rw-r--r--bdb/tcl/docs/txn.html93
-rw-r--r--bdb/tcl/tcl_compat.c467
-rw-r--r--bdb/tcl/tcl_db.c1180
-rw-r--r--bdb/tcl/tcl_db_pkg.c1739
-rw-r--r--bdb/tcl/tcl_dbcursor.c388
-rw-r--r--bdb/tcl/tcl_env.c882
-rw-r--r--bdb/tcl/tcl_internal.c367
-rw-r--r--bdb/tcl/tcl_lock.c258
-rw-r--r--bdb/tcl/tcl_log.c441
-rw-r--r--bdb/tcl/tcl_mp.c194
-rw-r--r--bdb/tcl/tcl_rep.c405
-rw-r--r--bdb/tcl/tcl_txn.c338
-rw-r--r--bdb/tcl/tcl_util.c381
-rw-r--r--bdb/test/TESTS1559
-rw-r--r--bdb/test/archive.tcl62
-rw-r--r--bdb/test/bigfile001.tcl85
-rw-r--r--bdb/test/bigfile002.tcl45
-rw-r--r--bdb/test/byteorder.tcl19
-rw-r--r--bdb/test/conscript.tcl20
-rw-r--r--bdb/test/dbm.tcl16
-rw-r--r--bdb/test/dbscript.tcl30
-rw-r--r--bdb/test/ddoyscript.tcl172
-rw-r--r--bdb/test/ddscript.tcl7
-rw-r--r--bdb/test/dead001.tcl66
-rw-r--r--bdb/test/dead002.tcl49
-rw-r--r--bdb/test/dead003.tcl34
-rw-r--r--bdb/test/dead004.tcl108
-rw-r--r--bdb/test/dead005.tcl87
-rw-r--r--bdb/test/dead006.tcl16
-rw-r--r--bdb/test/dead007.tcl34
-rw-r--r--bdb/test/env001.tcl37
-rw-r--r--bdb/test/env002.tcl32
-rw-r--r--bdb/test/env003.tcl100
-rw-r--r--bdb/test/env004.tcl16
-rw-r--r--bdb/test/env005.tcl26
-rw-r--r--bdb/test/env006.tcl12
-rw-r--r--bdb/test/env007.tcl151
-rw-r--r--bdb/test/env008.tcl10
-rw-r--r--bdb/test/env009.tcl57
-rw-r--r--bdb/test/env010.tcl49
-rw-r--r--bdb/test/env011.tcl39
-rw-r--r--bdb/test/hsearch.tcl4
-rw-r--r--bdb/test/join.tcl28
-rw-r--r--bdb/test/lock001.tcl100
-rw-r--r--bdb/test/lock002.tcl36
-rw-r--r--bdb/test/lock003.tcl87
-rw-r--r--bdb/test/lock004.tcl29
-rw-r--r--bdb/test/lock005.tcl177
-rw-r--r--bdb/test/lockscript.tcl51
-rw-r--r--bdb/test/log.tcl337
-rw-r--r--bdb/test/log001.tcl120
-rw-r--r--bdb/test/log002.tcl85
-rw-r--r--bdb/test/log003.tcl118
-rw-r--r--bdb/test/log004.tcl46
-rw-r--r--bdb/test/log005.tcl89
-rw-r--r--bdb/test/logtrack.tcl23
-rw-r--r--bdb/test/mdbscript.tcl33
-rw-r--r--bdb/test/memp001.tcl199
-rw-r--r--bdb/test/memp002.tcl62
-rw-r--r--bdb/test/memp003.tcl153
-rw-r--r--bdb/test/mpool.tcl420
-rw-r--r--bdb/test/mpoolscript.tcl11
-rw-r--r--bdb/test/mutex.tcl225
-rw-r--r--bdb/test/mutex001.tcl51
-rw-r--r--bdb/test/mutex002.tcl94
-rw-r--r--bdb/test/mutex003.tcl52
-rw-r--r--bdb/test/mutexscript.tcl10
-rw-r--r--bdb/test/ndbm.tcl17
-rw-r--r--bdb/test/parallel.tcl295
-rw-r--r--bdb/test/recd001.tcl104
-rw-r--r--bdb/test/recd002.tcl17
-rw-r--r--bdb/test/recd003.tcl24
-rw-r--r--bdb/test/recd004.tcl17
-rw-r--r--bdb/test/recd005.tcl29
-rw-r--r--bdb/test/recd006.tcl14
-rw-r--r--bdb/test/recd007.tcl375
-rw-r--r--bdb/test/recd008.tcl10
-rw-r--r--bdb/test/recd009.tcl13
-rw-r--r--bdb/test/recd010.tcl72
-rw-r--r--bdb/test/recd011.tcl23
-rw-r--r--bdb/test/recd012.tcl135
-rw-r--r--bdb/test/recd013.tcl99
-rw-r--r--bdb/test/recd014.tcl114
-rw-r--r--bdb/test/recd015.tcl160
-rw-r--r--bdb/test/recd016.tcl183
-rw-r--r--bdb/test/recd017.tcl151
-rw-r--r--bdb/test/recd018.tcl110
-rw-r--r--bdb/test/recd019.tcl121
-rw-r--r--bdb/test/recd020.tcl180
-rw-r--r--bdb/test/recd15scr.tcl74
-rw-r--r--bdb/test/recdscript.tcl37
-rw-r--r--bdb/test/rep001.tcl249
-rw-r--r--bdb/test/rep002.tcl278
-rw-r--r--bdb/test/rep003.tcl221
-rw-r--r--bdb/test/rep004.tcl198
-rw-r--r--bdb/test/rep005.tcl225
-rw-r--r--bdb/test/reputils.tcl659
-rw-r--r--bdb/test/rpc001.tcl47
-rw-r--r--bdb/test/rpc002.tcl51
-rw-r--r--bdb/test/rpc003.tcl166
-rw-r--r--bdb/test/rpc004.tcl76
-rw-r--r--bdb/test/rpc005.tcl137
-rw-r--r--bdb/test/rsrc001.tcl22
-rw-r--r--bdb/test/rsrc002.tcl11
-rw-r--r--bdb/test/rsrc003.tcl33
-rw-r--r--bdb/test/rsrc004.tcl52
-rw-r--r--bdb/test/scr001/chk.code37
-rw-r--r--bdb/test/scr002/chk.def64
-rw-r--r--bdb/test/scr003/chk.define77
-rw-r--r--bdb/test/scr004/chk.javafiles31
-rw-r--r--bdb/test/scr005/chk.nl112
-rw-r--r--bdb/test/scr006/chk.offt36
-rw-r--r--bdb/test/scr007/chk.proto45
-rw-r--r--bdb/test/scr008/chk.pubdef179
-rw-r--r--bdb/test/scr009/chk.srcfiles39
-rw-r--r--bdb/test/scr010/chk.str31
-rw-r--r--bdb/test/scr010/spell.ok825
-rw-r--r--bdb/test/scr011/chk.tags41
-rw-r--r--bdb/test/scr012/chk.vx_code68
-rw-r--r--bdb/test/scr013/chk.stats114
-rw-r--r--bdb/test/scr014/chk.err34
-rw-r--r--bdb/test/scr015/README36
-rw-r--r--bdb/test/scr015/TestConstruct01.cpp330
-rw-r--r--bdb/test/scr015/TestConstruct01.testerr4
-rw-r--r--bdb/test/scr015/TestConstruct01.testout27
-rw-r--r--bdb/test/scr015/TestExceptInclude.cpp27
-rw-r--r--bdb/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--bdb/test/scr015/TestKeyRange.cpp171
-rw-r--r--bdb/test/scr015/TestKeyRange.testin8
-rw-r--r--bdb/test/scr015/TestKeyRange.testout19
-rw-r--r--bdb/test/scr015/TestLogc.cpp101
-rw-r--r--bdb/test/scr015/TestLogc.testout1
-rw-r--r--bdb/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--bdb/test/scr015/TestSimpleAccess.testout3
-rw-r--r--bdb/test/scr015/TestTruncate.cpp84
-rw-r--r--bdb/test/scr015/TestTruncate.testout6
-rw-r--r--bdb/test/scr015/chk.cxxtests71
-rw-r--r--bdb/test/scr015/ignore4
-rw-r--r--bdb/test/scr015/testall32
-rw-r--r--bdb/test/scr015/testone122
-rw-r--r--bdb/test/scr016/CallbackTest.java83
-rw-r--r--bdb/test/scr016/CallbackTest.testout60
-rw-r--r--bdb/test/scr016/README37
-rw-r--r--bdb/test/scr016/TestAppendRecno.java258
-rw-r--r--bdb/test/scr016/TestAppendRecno.testout82
-rw-r--r--bdb/test/scr016/TestAssociate.java333
-rw-r--r--bdb/test/scr016/TestAssociate.testout30
-rw-r--r--bdb/test/scr016/TestClosedDb.java62
-rw-r--r--bdb/test/scr016/TestClosedDb.testout2
-rw-r--r--bdb/test/scr016/TestConstruct01.java474
-rw-r--r--bdb/test/scr016/TestConstruct01.testerr0
-rw-r--r--bdb/test/scr016/TestConstruct01.testout3
-rw-r--r--bdb/test/scr016/TestConstruct02.java326
-rw-r--r--bdb/test/scr016/TestConstruct02.testout3
-rw-r--r--bdb/test/scr016/TestDbtFlags.java241
-rw-r--r--bdb/test/scr016/TestDbtFlags.testerr54
-rw-r--r--bdb/test/scr016/TestDbtFlags.testout78
-rw-r--r--bdb/test/scr016/TestGetSetMethods.java99
-rw-r--r--bdb/test/scr016/TestKeyRange.java203
-rw-r--r--bdb/test/scr016/TestKeyRange.testout27
-rw-r--r--bdb/test/scr016/TestLockVec.java249
-rw-r--r--bdb/test/scr016/TestLockVec.testout8
-rw-r--r--bdb/test/scr016/TestLogc.java100
-rw-r--r--bdb/test/scr016/TestLogc.testout1
-rw-r--r--bdb/test/scr016/TestOpenEmpty.java189
-rw-r--r--bdb/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--bdb/test/scr016/TestReplication.java289
-rw-r--r--bdb/test/scr016/TestRpcServer.java193
-rw-r--r--bdb/test/scr016/TestSameDbt.java56
-rw-r--r--bdb/test/scr016/TestSameDbt.testout2
-rw-r--r--bdb/test/scr016/TestSimpleAccess.java37
-rw-r--r--bdb/test/scr016/TestSimpleAccess.testout3
-rw-r--r--bdb/test/scr016/TestStat.java57
-rw-r--r--bdb/test/scr016/TestStat.testout11
-rw-r--r--bdb/test/scr016/TestTruncate.java87
-rw-r--r--bdb/test/scr016/TestTruncate.testout6
-rw-r--r--bdb/test/scr016/TestUtil.java57
-rw-r--r--bdb/test/scr016/TestXAServlet.java313
-rw-r--r--bdb/test/scr016/chk.javatests79
-rw-r--r--bdb/test/scr016/ignore22
-rw-r--r--bdb/test/scr016/testall32
-rw-r--r--bdb/test/scr016/testone122
-rw-r--r--bdb/test/scr017/O.BH196
-rw-r--r--bdb/test/scr017/O.R196
-rw-r--r--bdb/test/scr017/chk.db18526
-rw-r--r--bdb/test/scr017/t.c188
-rw-r--r--bdb/test/scr018/chk.comma30
-rw-r--r--bdb/test/scr018/t.c46
-rw-r--r--bdb/test/scr019/chk.include40
-rw-r--r--bdb/test/scr020/chk.inc43
-rw-r--r--bdb/test/scr021/chk.flags97
-rw-r--r--bdb/test/scr022/chk.rr22
-rw-r--r--bdb/test/sdb001.tcl51
-rw-r--r--bdb/test/sdb002.tcl78
-rw-r--r--bdb/test/sdb003.tcl66
-rw-r--r--bdb/test/sdb004.tcl88
-rw-r--r--bdb/test/sdb005.tcl59
-rw-r--r--bdb/test/sdb006.tcl129
-rw-r--r--bdb/test/sdb007.tcl197
-rw-r--r--bdb/test/sdb008.tcl234
-rw-r--r--bdb/test/sdb009.tcl59
-rw-r--r--bdb/test/sdb010.tcl142
-rw-r--r--bdb/test/sdb011.tcl143
-rw-r--r--bdb/test/sdb012.tcl428
-rw-r--r--bdb/test/sdbscript.tcl4
-rw-r--r--bdb/test/sdbtest001.tcl43
-rw-r--r--bdb/test/sdbtest002.tcl41
-rw-r--r--bdb/test/sdbutils.tcl50
-rw-r--r--bdb/test/sec001.tcl205
-rw-r--r--bdb/test/sec002.tcl143
-rw-r--r--bdb/test/shelltest.tcl88
-rw-r--r--bdb/test/si001.tcl116
-rw-r--r--bdb/test/si002.tcl167
-rw-r--r--bdb/test/si003.tcl142
-rw-r--r--bdb/test/si004.tcl194
-rw-r--r--bdb/test/si005.tcl179
-rw-r--r--bdb/test/si006.tcl129
-rw-r--r--bdb/test/sindex.tcl259
-rw-r--r--bdb/test/sysscript.tcl9
-rw-r--r--bdb/test/test.tcl1418
-rw-r--r--bdb/test/test001.tcl148
-rw-r--r--bdb/test/test002.tcl61
-rw-r--r--bdb/test/test003.tcl63
-rw-r--r--bdb/test/test004.tcl71
-rw-r--r--bdb/test/test005.tcl17
-rw-r--r--bdb/test/test006.tcl64
-rw-r--r--bdb/test/test007.tcl16
-rw-r--r--bdb/test/test008.tcl80
-rw-r--r--bdb/test/test009.tcl21
-rw-r--r--bdb/test/test010.tcl78
-rw-r--r--bdb/test/test011.tcl159
-rw-r--r--bdb/test/test012.tcl48
-rw-r--r--bdb/test/test013.tcl76
-rw-r--r--bdb/test/test014.tcl77
-rw-r--r--bdb/test/test015.tcl61
-rw-r--r--bdb/test/test016.tcl71
-rw-r--r--bdb/test/test017.tcl123
-rw-r--r--bdb/test/test018.tcl11
-rw-r--r--bdb/test/test019.tcl62
-rw-r--r--bdb/test/test020.tcl43
-rw-r--r--bdb/test/test021.tcl54
-rw-r--r--bdb/test/test022.tcl13
-rw-r--r--bdb/test/test023.tcl35
-rw-r--r--bdb/test/test024.tcl80
-rw-r--r--bdb/test/test025.tcl63
-rw-r--r--bdb/test/test026.tcl67
-rw-r--r--bdb/test/test027.tcl14
-rw-r--r--bdb/test/test028.tcl34
-rw-r--r--bdb/test/test029.tcl85
-rw-r--r--bdb/test/test030.tcl58
-rw-r--r--bdb/test/test031.tcl72
-rw-r--r--bdb/test/test032.tcl82
-rw-r--r--bdb/test/test033.tcl167
-rw-r--r--bdb/test/test034.tcl9
-rw-r--r--bdb/test/test035.tcl10
-rw-r--r--bdb/test/test036.tcl62
-rw-r--r--bdb/test/test037.tcl27
-rw-r--r--bdb/test/test038.tcl105
-rw-r--r--bdb/test/test039.tcl100
-rw-r--r--bdb/test/test040.tcl9
-rw-r--r--bdb/test/test041.tcl9
-rw-r--r--bdb/test/test042.tcl134
-rw-r--r--bdb/test/test043.tcl42
-rw-r--r--bdb/test/test044.tcl55
-rw-r--r--bdb/test/test045.tcl32
-rw-r--r--bdb/test/test046.tcl194
-rw-r--r--bdb/test/test047.tcl114
-rw-r--r--bdb/test/test048.tcl71
-rw-r--r--bdb/test/test049.tcl46
-rw-r--r--bdb/test/test050.tcl60
-rw-r--r--bdb/test/test051.tcl88
-rw-r--r--bdb/test/test052.tcl48
-rw-r--r--bdb/test/test053.tcl59
-rw-r--r--bdb/test/test054.tcl182
-rw-r--r--bdb/test/test055.tcl55
-rw-r--r--bdb/test/test056.tcl40
-rw-r--r--bdb/test/test057.tcl53
-rw-r--r--bdb/test/test058.tcl14
-rw-r--r--bdb/test/test059.tcl46
-rw-r--r--bdb/test/test060.tcl17
-rw-r--r--bdb/test/test061.tcl39
-rw-r--r--bdb/test/test062.tcl66
-rw-r--r--bdb/test/test063.tcl61
-rw-r--r--bdb/test/test064.tcl21
-rw-r--r--bdb/test/test065.tcl155
-rw-r--r--bdb/test/test066.tcl38
-rw-r--r--bdb/test/test067.tcl91
-rw-r--r--bdb/test/test068.tcl65
-rw-r--r--bdb/test/test069.tcl12
-rw-r--r--bdb/test/test070.tcl44
-rw-r--r--bdb/test/test071.tcl9
-rw-r--r--bdb/test/test072.tcl137
-rw-r--r--bdb/test/test073.tcl77
-rw-r--r--bdb/test/test074.tcl76
-rw-r--r--bdb/test/test075.tcl360
-rw-r--r--bdb/test/test076.tcl49
-rw-r--r--bdb/test/test077.tcl37
-rw-r--r--bdb/test/test078.tcl64
-rw-r--r--bdb/test/test079.tcl14
-rw-r--r--bdb/test/test080.tcl123
-rw-r--r--bdb/test/test081.tcl11
-rw-r--r--bdb/test/test082.tcl13
-rw-r--r--bdb/test/test083.tcl54
-rw-r--r--bdb/test/test084.tcl17
-rw-r--r--bdb/test/test085.tcl122
-rw-r--r--bdb/test/test086.tcl20
-rw-r--r--bdb/test/test087.tcl82
-rw-r--r--bdb/test/test088.tcl74
-rw-r--r--bdb/test/test089.tcl180
-rw-r--r--bdb/test/test090.tcl16
-rw-r--r--bdb/test/test091.tcl9
-rw-r--r--bdb/test/test092.tcl241
-rw-r--r--bdb/test/test093.tcl393
-rw-r--r--bdb/test/test094.tcl251
-rw-r--r--bdb/test/test095.tcl296
-rw-r--r--bdb/test/test096.tcl202
-rw-r--r--bdb/test/test097.tcl188
-rw-r--r--bdb/test/test098.tcl91
-rw-r--r--bdb/test/test099.tcl177
-rw-r--r--bdb/test/test100.tcl17
-rw-r--r--bdb/test/test101.tcl17
-rw-r--r--bdb/test/testparams.tcl113
-rw-r--r--bdb/test/testutils.tcl1139
-rw-r--r--bdb/test/txn.tcl181
-rw-r--r--bdb/test/txn001.tcl116
-rw-r--r--bdb/test/txn002.tcl91
-rw-r--r--bdb/test/txn003.tcl238
-rw-r--r--bdb/test/txn004.tcl62
-rw-r--r--bdb/test/txn005.tcl75
-rw-r--r--bdb/test/txn006.tcl47
-rw-r--r--bdb/test/txn007.tcl57
-rw-r--r--bdb/test/txn008.tcl32
-rw-r--r--bdb/test/txn009.tcl32
-rw-r--r--bdb/test/txnscript.tcl67
-rw-r--r--bdb/test/update.tcl5
-rw-r--r--bdb/test/upgrade.tcl43
-rw-r--r--bdb/test/upgrade/README85
-rw-r--r--bdb/test/upgrade/generate-2.X/pack-2.6.6.pl114
-rw-r--r--bdb/test/upgrade/generate-2.X/test-2.6.patch379
-rw-r--r--bdb/test/wrap.tcl27
-rw-r--r--bdb/txn/txn.c1149
-rw-r--r--bdb/txn/txn.src69
-rw-r--r--bdb/txn/txn_method.c105
-rw-r--r--bdb/txn/txn_rec.c265
-rw-r--r--bdb/txn/txn_recover.c306
-rw-r--r--bdb/txn/txn_region.c371
-rw-r--r--bdb/txn/txn_stat.c102
-rw-r--r--bdb/txn/txn_util.c234
-rw-r--r--bdb/xa/xa.c184
-rw-r--r--bdb/xa/xa_db.c28
-rw-r--r--bdb/xa/xa_map.c30
1187 files changed, 170239 insertions, 57257 deletions
diff --git a/bdb/LICENSE b/bdb/LICENSE
index 32cc483d68a..1cd727bfd98 100644
--- a/bdb/LICENSE
+++ b/bdb/LICENSE
@@ -1,5 +1,5 @@
/*-
- * $Id: LICENSE,v 11.7 2000/11/01 20:35:49 bostic Exp $
+ * $Id: LICENSE,v 11.9 2002/01/11 15:51:10 bostic Exp $
*/
The following is the license that applies to this copy of the Berkeley DB
@@ -10,7 +10,7 @@ Web at http://www.sleepycat.com.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
/*
- * Copyright (c) 1990-2000
+ * Copyright (c) 1990-2002
* Sleepycat Software. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/bdb/btree/bt_compare.c b/bdb/btree/bt_compare.c
index 91481c31366..cbe2a1a7170 100644
--- a/bdb/btree/bt_compare.c
+++ b/bdb/btree/bt_compare.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_compare.c,v 11.12 2000/10/26 19:00:28 krinsky Exp $";
+static const char revid[] = "$Id: bt_compare.c,v 11.17 2002/03/27 04:30:42 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -51,8 +51,8 @@ static const char revid[] = "$Id: bt_compare.c,v 11.12 2000/10/26 19:00:28 krins
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
/*
* __bam_cmp --
@@ -92,7 +92,7 @@ __bam_cmp(dbp, dbt, h, indx, func, cmpp)
case P_LBTREE:
case P_LDUP:
case P_LRECNO:
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW)
bo = (BOVERFLOW *)bk;
else {
@@ -125,7 +125,7 @@ __bam_cmp(dbp, dbt, h, indx, func, cmpp)
return (0);
}
- bi = GET_BINTERNAL(h, indx);
+ bi = GET_BINTERNAL(dbp, h, indx);
if (B_TYPE(bi->type) == B_OVERFLOW)
bo = (BOVERFLOW *)(bi->data);
else {
@@ -136,7 +136,7 @@ __bam_cmp(dbp, dbt, h, indx, func, cmpp)
}
break;
default:
- return (__db_pgfmt(dbp, PGNO(h)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
/*
diff --git a/bdb/btree/bt_conv.c b/bdb/btree/bt_conv.c
index fd30f375f7c..4264b62ffdd 100644
--- a/bdb/btree/bt_conv.c
+++ b/bdb/btree/bt_conv.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_conv.c,v 11.6 2000/03/31 00:30:26 ubell Exp $";
+static const char revid[] = "$Id: bt_conv.c,v 11.13 2002/08/06 06:11:12 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,20 +16,21 @@ static const char revid[] = "$Id: bt_conv.c,v 11.6 2000/03/31 00:30:26 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
/*
* __bam_pgin --
* Convert host-specific page layout from the host-independent format
* stored on disk.
*
- * PUBLIC: int __bam_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ * PUBLIC: int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
-__bam_pgin(dbenv, pg, pp, cookie)
+__bam_pgin(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
+ DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
@@ -38,12 +39,12 @@ __bam_pgin(dbenv, pg, pp, cookie)
PAGE *h;
pginfo = (DB_PGINFO *)cookie->data;
- if (!pginfo->needswap)
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
- __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 1));
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
}
/*
@@ -51,11 +52,12 @@ __bam_pgin(dbenv, pg, pp, cookie)
* Convert host-specific page layout to the host-independent format
* stored on disk.
*
- * PUBLIC: int __bam_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ * PUBLIC: int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
-__bam_pgout(dbenv, pg, pp, cookie)
+__bam_pgout(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
+ DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
@@ -64,12 +66,12 @@ __bam_pgout(dbenv, pg, pp, cookie)
PAGE *h;
pginfo = (DB_PGINFO *)cookie->data;
- if (!pginfo->needswap)
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
return (TYPE(h) == P_BTREEMETA ? __bam_mswap(pp) :
- __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 0));
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
}
/*
@@ -93,6 +95,8 @@ __bam_mswap(pg)
SWAP32(p); /* re_len */
SWAP32(p); /* re_pad */
SWAP32(p); /* root */
+ p += 92 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
return (0);
}
diff --git a/bdb/btree/bt_curadj.c b/bdb/btree/bt_curadj.c
index 011acd2f4a1..50d3d422e49 100644
--- a/bdb/btree/bt_curadj.c
+++ b/bdb/btree/bt_curadj.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_curadj.c,v 11.20 2001/01/17 16:15:49 bostic Exp $";
+static const char revid[] = "$Id: bt_curadj.c,v 11.30 2002/07/03 19:03:48 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,9 +16,8 @@ static const char revid[] = "$Id: bt_curadj.c,v 11.20 2001/01/17 16:15:49 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
static int __bam_opd_cursor __P((DB *, DBC *, db_pgno_t, u_int32_t, u_int32_t));
@@ -203,10 +202,9 @@ __bam_ca_di(my_dbc, pgno, indx, adjust)
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
- if (found != 0 && DB_LOGGING(my_dbc)) {
- if ((ret = __bam_curadj_log(dbenv,
- my_dbc->txn, &lsn, 0, dbp->log_fileid,
- DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0)
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DI, pgno, 0, 0, adjust, indx, 0)) != 0)
return (ret);
}
@@ -234,8 +232,13 @@ __bam_opd_cursor(dbp, dbc, first, tpgno, ti)
* Allocate a new cursor and create the stack. If duplicates
* are sorted, we've just created an off-page duplicate Btree.
* If duplicates aren't sorted, we've just created a Recno tree.
+ *
+ * Note that in order to get here at all, there shouldn't be
+ * an old off-page dup cursor--to augment the checking db_c_newopd
+ * will do, assert this.
*/
- if ((ret = __db_c_newopd(dbc, tpgno, &dbc_nopd)) != 0)
+ DB_ASSERT(orig_cp->opd == NULL);
+ if ((ret = __db_c_newopd(dbc, tpgno, orig_cp->opd, &dbc_nopd)) != 0)
return (ret);
cp = (BTREE_CURSOR *)dbc_nopd->internal;
@@ -321,17 +324,16 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
return (ret);
if (my_txn != NULL && dbc->txn != my_txn)
found = 1;
- /* We released the MUTEX to get a cursor, start over. */
+ /* We released the mutex to get a cursor, start over. */
goto loop;
}
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
- if (found != 0 && DB_LOGGING(my_dbc)) {
- if ((ret = __bam_curadj_log(dbenv,
- my_dbc->txn, &lsn, 0, dbp->log_fileid,
- DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_DUP, fpgno, tpgno, 0, first, fi, ti)) != 0)
return (ret);
}
return (0);
@@ -372,8 +374,16 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
orig_cp = (BTREE_CURSOR *)dbc->internal;
+ /*
+ * A note on the orig_cp->opd != NULL requirement here:
+ * it's possible that there's a cursor that refers to
+ * the same duplicate set, but which has no opd cursor,
+ * because it refers to a different item and we took
+ * care of it while processing a previous record.
+ */
if (orig_cp->pgno != fpgno ||
orig_cp->indx != first ||
+ orig_cp->opd == NULL ||
((BTREE_CURSOR *)orig_cp->opd->internal)->indx
!= ti)
continue;
@@ -383,7 +393,7 @@ loop: MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
orig_cp->opd = NULL;
orig_cp->indx = fi;
/*
- * We released the MUTEX to free a cursor,
+ * We released the mutex to free a cursor,
* start over.
*/
goto loop;
@@ -440,10 +450,9 @@ __bam_ca_rsplit(my_dbc, fpgno, tpgno)
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
- if (found != 0 && DB_LOGGING(my_dbc)) {
- if ((ret = __bam_curadj_log(dbenv,
- my_dbc->txn, &lsn, 0, dbp->log_fileid,
- DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp, my_dbc->txn,
+ &lsn, 0, DB_CA_RSPLIT, fpgno, tpgno, 0, 0, 0, 0)) != 0)
return (ret);
}
return (0);
@@ -512,9 +521,9 @@ __bam_ca_split(my_dbc, ppgno, lpgno, rpgno, split_indx, cleft)
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
- if (found != 0 && DB_LOGGING(my_dbc)) {
- if ((ret = __bam_curadj_log(dbenv, my_dbc->txn,
- &lsn, 0, dbp->log_fileid, DB_CA_SPLIT, ppgno, rpgno,
+ if (found != 0 && DBC_LOGGING(my_dbc)) {
+ if ((ret = __bam_curadj_log(dbp,
+ my_dbc->txn, &lsn, 0, DB_CA_SPLIT, ppgno, rpgno,
cleft ? lpgno : PGNO_INVALID, 0, split_indx, 0)) != 0)
return (ret);
}
diff --git a/bdb/btree/bt_cursor.c b/bdb/btree/bt_cursor.c
index 84ab7c80744..14d90e8873d 100644
--- a/bdb/btree/bt_cursor.c
+++ b/bdb/btree/bt_cursor.c
@@ -1,31 +1,29 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_cursor.c,v 11.88 2001/01/11 18:19:49 bostic Exp $";
+static const char revid[] = "$Id: bt_cursor.c,v 11.147 2002/08/13 20:46:07 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#include <stdlib.h>
#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "lock.h"
-#include "qam.h"
-#include "common_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+static int __bam_bulk __P((DBC *, DBT *, u_int32_t));
static int __bam_c_close __P((DBC *, db_pgno_t, int *));
static int __bam_c_del __P((DBC *));
static int __bam_c_destroy __P((DBC *));
@@ -33,15 +31,16 @@ static int __bam_c_first __P((DBC *));
static int __bam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
static int __bam_c_getstack __P((DBC *));
static int __bam_c_last __P((DBC *));
-static int __bam_c_next __P((DBC *, int));
+static int __bam_c_next __P((DBC *, int, int));
static int __bam_c_physdel __P((DBC *));
static int __bam_c_prev __P((DBC *));
static int __bam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
-static void __bam_c_reset __P((BTREE_CURSOR *));
-static int __bam_c_search __P((DBC *, const DBT *, u_int32_t, int *));
+static int __bam_c_search __P((DBC *,
+ db_pgno_t, const DBT *, u_int32_t, int *));
static int __bam_c_writelock __P((DBC *));
-static int __bam_getboth_finddatum __P((DBC *, DBT *));
+static int __bam_getboth_finddatum __P((DBC *, DBT *, u_int32_t));
static int __bam_getbothc __P((DBC *, DBT *));
+static int __bam_get_prev __P((DBC *));
static int __bam_isopd __P((DBC *, db_pgno_t *));
/*
@@ -53,48 +52,60 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
* don't -- we don't duplicate locks when we duplicate cursors if we are
* running in a transaction environment as there's no point if locks are
* never discarded. This means that the cursor may or may not hold a lock.
+ * In the case where we are decending the tree we always want to
+ * unlock the held interior page so we use ACQUIRE_COUPLE.
*/
#undef ACQUIRE
-#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) {\
+#define ACQUIRE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
if ((pagep) != NULL) { \
- ret = memp_fput((dbc)->dbp->mpf, pagep, 0); \
+ ret = __mpf->put(__mpf, pagep, 0); \
+ pagep = NULL; \
+ } else \
+ ret = 0; \
+ if ((ret) == 0 && STD_LOCKING(dbc)) \
+ ret = __db_lget(dbc, LCK_COUPLE, lpgno, mode, 0, &(lock));\
+ if ((ret) == 0) \
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
+}
+
+#undef ACQUIRE_COUPLE
+#define ACQUIRE_COUPLE(dbc, mode, lpgno, lock, fpgno, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
+ if ((pagep) != NULL) { \
+ ret = __mpf->put(__mpf, pagep, 0); \
pagep = NULL; \
} else \
ret = 0; \
if ((ret) == 0 && STD_LOCKING(dbc)) \
ret = __db_lget(dbc, \
- (lock).off == LOCK_INVALID ? 0 : LCK_COUPLE, \
- lpgno, mode, 0, &lock); \
- else \
- (lock).off = LOCK_INVALID; \
+ LCK_COUPLE_ALWAYS, lpgno, mode, 0, &(lock)); \
if ((ret) == 0) \
- ret = memp_fget((dbc)->dbp->mpf, &(fpgno), 0, &(pagep));\
+ ret = __mpf->get(__mpf, &(fpgno), 0, &(pagep)); \
}
/* Acquire a new page/lock for a cursor. */
#undef ACQUIRE_CUR
-#define ACQUIRE_CUR(dbc, mode, ret) { \
+#define ACQUIRE_CUR(dbc, mode, p, ret) { \
BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
- ACQUIRE(dbc, mode, \
- __cp->pgno, __cp->lock, __cp->pgno, __cp->page, ret); \
- if ((ret) == 0) \
+ ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ if ((ret) == 0) { \
+ __cp->pgno = p; \
__cp->lock_mode = (mode); \
+ } \
}
/*
- * Acquire a new page/lock for a cursor, and move the cursor on success.
- * The reason that this is a separate macro is because we don't want to
- * set the pgno/indx fields in the cursor until we actually have the lock,
- * otherwise the cursor adjust routines will adjust the cursor even though
- * we're not really on the page.
+ * Acquire a new page/lock for a cursor and release the previous.
+ * This is typically used when decending a tree and we do not
+ * want to hold the interior nodes locked.
*/
-#undef ACQUIRE_CUR_SET
-#define ACQUIRE_CUR_SET(dbc, mode, p, ret) { \
+#undef ACQUIRE_CUR_COUPLE
+#define ACQUIRE_CUR_COUPLE(dbc, mode, p, ret) { \
BTREE_CURSOR *__cp = (BTREE_CURSOR *)(dbc)->internal; \
- ACQUIRE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
+ ACQUIRE_COUPLE(dbc, mode, p, __cp->lock, p, __cp->page, ret); \
if ((ret) == 0) { \
- __cp->pgno = p; \
- __cp->indx = 0; \
+ __cp->pgno = p; \
__cp->lock_mode = (mode); \
} \
}
@@ -112,7 +123,7 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
if (STD_LOCKING(dbc) && \
__cp->lock_mode != DB_LOCK_WRITE && \
((ret) = __db_lget(dbc, \
- __cp->lock.off == LOCK_INVALID ? 0 : LCK_COUPLE, \
+ LOCK_ISSET(__cp->lock) ? LCK_COUPLE : 0, \
__cp->pgno, DB_LOCK_WRITE, 0, &__cp->lock)) == 0) \
__cp->lock_mode = DB_LOCK_WRITE; \
}
@@ -120,19 +131,19 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
/* Discard the current page/lock. */
#undef DISCARD
#define DISCARD(dbc, ldiscard, lock, pagep, ret) { \
+ DB_MPOOLFILE *__mpf = (dbc)->dbp->mpf; \
int __t_ret; \
if ((pagep) != NULL) { \
- ret = memp_fput((dbc)->dbp->mpf, pagep, 0); \
+ ret = __mpf->put(__mpf, pagep, 0); \
pagep = NULL; \
} else \
ret = 0; \
- if ((lock).off != LOCK_INVALID) { \
- __t_ret = ldiscard ? \
- __LPUT((dbc), lock): __TLPUT((dbc), lock); \
- if (__t_ret != 0 && (ret) == 0) \
- ret = __t_ret; \
- (lock).off = LOCK_INVALID; \
- } \
+ if (ldiscard) \
+ __t_ret = __LPUT((dbc), lock); \
+ else \
+ __t_ret = __TLPUT((dbc), lock); \
+ if (__t_ret != 0 && (ret) == 0) \
+ ret = __t_ret; \
}
/* Discard the current page/lock for a cursor. */
@@ -146,12 +157,12 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
/* If on-page item is a deleted record. */
#undef IS_DELETED
-#define IS_DELETED(page, indx) \
- B_DISSET(GET_BKEYDATA(page, \
+#define IS_DELETED(dbp, page, indx) \
+ B_DISSET(GET_BKEYDATA(dbp, page, \
(indx) + (TYPE(page) == P_LBTREE ? O_INDX : 0))->type)
#undef IS_CUR_DELETED
#define IS_CUR_DELETED(dbc) \
- IS_DELETED((dbc)->internal->page, (dbc)->internal->indx)
+ IS_DELETED((dbc)->dbp, (dbc)->internal->page, (dbc)->internal->indx)
/*
* Test to see if two cursors could point to duplicates of the same key.
@@ -163,8 +174,8 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
*/
#undef IS_DUPLICATE
#define IS_DUPLICATE(dbc, i1, i2) \
- (((PAGE *)(dbc)->internal->page)->inp[i1] == \
- ((PAGE *)(dbc)->internal->page)->inp[i2])
+ (P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i1] == \
+ P_INP((dbc)->dbp,((PAGE *)(dbc)->internal->page))[i2])
#undef IS_CUR_DUPLICATE
#define IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx) \
(F_ISSET(dbc, DBC_OPD) || \
@@ -172,22 +183,6 @@ static int __bam_isopd __P((DBC *, db_pgno_t *));
IS_DUPLICATE(dbc, (dbc)->internal->indx, orig_indx)))
/*
- * __bam_c_reset --
- * Initialize internal cursor structure.
- */
-static void
-__bam_c_reset(cp)
- BTREE_CURSOR *cp;
-{
- cp->csp = cp->sp;
- cp->lock.off = LOCK_INVALID;
- cp->lock_mode = DB_LOCK_NG;
- cp->recno = RECNO_OOB;
- cp->order = INVALID_ORDER;
- cp->flags = 0;
-}
-
-/*
* __bam_c_init --
* Initialize the access private portion of a cursor
*
@@ -198,35 +193,26 @@ __bam_c_init(dbc, dbtype)
DBC *dbc;
DBTYPE dbtype;
{
- BTREE *t;
- BTREE_CURSOR *cp;
- DB *dbp;
+ DB_ENV *dbenv;
int ret;
- u_int32_t minkey;
- dbp = dbc->dbp;
+ dbenv = dbc->dbp->dbenv;
/* Allocate/initialize the internal structure. */
- if (dbc->internal == NULL) {
- if ((ret = __os_malloc(dbp->dbenv,
- sizeof(BTREE_CURSOR), NULL, &cp)) != 0)
- return (ret);
- dbc->internal = (DBC_INTERNAL *)cp;
-
- cp->sp = cp->csp = cp->stack;
- cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]);
- } else
- cp = (BTREE_CURSOR *)dbc->internal;
- __bam_c_reset(cp);
+ if (dbc->internal == NULL && (ret =
+ __os_malloc(dbenv, sizeof(BTREE_CURSOR), &dbc->internal)) != 0)
+ return (ret);
/* Initialize methods. */
dbc->c_close = __db_c_close;
dbc->c_count = __db_c_count;
dbc->c_del = __db_c_del;
dbc->c_dup = __db_c_dup;
- dbc->c_get = __db_c_get;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
dbc->c_put = __db_c_put;
if (dbtype == DB_BTREE) {
+ dbc->c_am_bulk = __bam_bulk;
dbc->c_am_close = __bam_c_close;
dbc->c_am_del = __bam_c_del;
dbc->c_am_destroy = __bam_c_destroy;
@@ -234,6 +220,7 @@ __bam_c_init(dbc, dbtype)
dbc->c_am_put = __bam_c_put;
dbc->c_am_writelock = __bam_c_writelock;
} else {
+ dbc->c_am_bulk = __bam_bulk;
dbc->c_am_close = __bam_c_close;
dbc->c_am_del = __ram_c_del;
dbc->c_am_destroy = __bam_c_destroy;
@@ -242,18 +229,6 @@ __bam_c_init(dbc, dbtype)
dbc->c_am_writelock = __bam_c_writelock;
}
- /*
- * The btree leaf page data structures require that two key/data pairs
- * (or four items) fit on a page, but other than that there's no fixed
- * requirement. The btree off-page duplicates only require two items,
- * to be exact, but requiring four for them as well seems reasonable.
- *
- * Recno uses the btree bt_ovflsize value -- it's close enough.
- */
- t = dbp->bt_internal;
- minkey = F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey;
- cp->ovflsize = B_MINKEY_TO_OVFLSIZE(minkey, dbp->pgsize);
-
return (0);
}
@@ -267,12 +242,13 @@ int
__bam_c_refresh(dbc)
DBC *dbc;
{
+ BTREE *t;
BTREE_CURSOR *cp;
DB *dbp;
dbp = dbc->dbp;
+ t = dbp->bt_internal;
cp = (BTREE_CURSOR *)dbc->internal;
- __bam_c_reset(cp);
/*
* If our caller set the root page number, it's because the root was
@@ -280,11 +256,32 @@ __bam_c_refresh(dbc)
* pull it out of our internal information.
*/
if (cp->root == PGNO_INVALID)
- cp->root = ((BTREE *)dbp->bt_internal)->bt_root;
+ cp->root = t->bt_root;
+
+ LOCK_INIT(cp->lock);
+ cp->lock_mode = DB_LOCK_NG;
+
+ cp->sp = cp->csp = cp->stack;
+ cp->esp = cp->stack + sizeof(cp->stack) / sizeof(cp->stack[0]);
+
+ /*
+ * The btree leaf page data structures require that two key/data pairs
+ * (or four items) fit on a page, but other than that there's no fixed
+ * requirement. The btree off-page duplicates only require two items,
+ * to be exact, but requiring four for them as well seems reasonable.
+ *
+ * Recno uses the btree bt_ovflsize value -- it's close enough.
+ */
+ cp->ovflsize = B_MINKEY_TO_OVFLSIZE(
+ dbp, F_ISSET(dbc, DBC_OPD) ? 2 : t->bt_minkey, dbp->pgsize);
+
+ cp->recno = RECNO_OOB;
+ cp->order = INVALID_ORDER;
+ cp->flags = 0;
/* Initialize for record numbers. */
if (F_ISSET(dbc, DBC_OPD) ||
- dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_BT_RECNUM)) {
+ dbc->dbtype == DB_RECNO || F_ISSET(dbp, DB_AM_RECNUM)) {
F_SET(cp, C_RECNUM);
/*
@@ -293,7 +290,7 @@ __bam_c_refresh(dbc)
* mutable record numbers.
*/
if ((F_ISSET(dbc, DBC_OPD) && dbc->dbtype == DB_RECNO) ||
- F_ISSET(dbp, DB_BT_RECNUM | DB_RE_RENUMBER))
+ F_ISSET(dbp, DB_AM_RECNUM | DB_AM_RENUMBER))
F_SET(cp, C_RENUMBER);
}
@@ -313,11 +310,12 @@ __bam_c_close(dbc, root_pgno, rmroot)
BTREE_CURSOR *cp, *cp_opd, *cp_c;
DB *dbp;
DBC *dbc_opd, *dbc_c;
+ DB_MPOOLFILE *mpf;
PAGE *h;
- u_int32_t num;
int cdb_lock, ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
cp_opd = (dbc_opd = cp->opd) == NULL ?
NULL : (BTREE_CURSOR *)dbc_opd->internal;
@@ -408,10 +406,10 @@ __bam_c_close(dbc, root_pgno, rmroot)
* We will not have been provided a root page number. Acquire
* one from the primary database.
*/
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
goto err;
- root_pgno = GET_BOVERFLOW(h, cp->indx + O_INDX)->pgno;
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ root_pgno = GET_BOVERFLOW(dbp, h, cp->indx + O_INDX)->pgno;
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
goto err;
dbc_c = dbc_opd;
@@ -453,18 +451,14 @@ lock: cp_c = (BTREE_CURSOR *)dbc_c->internal;
* info in __db_c_get--the OPD is also a WRITEDUP.
*/
if (CDB_LOCKING(dbp->dbenv)) {
- DB_ASSERT(!F_ISSET(dbc, DBC_OPD) || F_ISSET(dbc, DBC_WRITEDUP));
- if (!F_ISSET(dbc, DBC_WRITER)) {
- if ((ret =
- lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
+ if (F_ISSET(dbc, DBC_WRITEDUP | DBC_WRITECURSOR)) {
+ if ((ret = dbp->dbenv->lock_get(
+ dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
&dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
goto err;
cdb_lock = 1;
}
-
- cp_c->lock.off = LOCK_INVALID;
- if ((ret =
- memp_fget(dbp->mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
goto err;
goto delete;
@@ -480,9 +474,7 @@ lock: cp_c = (BTREE_CURSOR *)dbc_c->internal;
* is responsible for acquiring any necessary locks before calling us.
*/
if (F_ISSET(dbc, DBC_OPD)) {
- cp_c->lock.off = LOCK_INVALID;
- if ((ret =
- memp_fget(dbp->mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp_c->pgno, 0, &cp_c->page)) != 0)
goto err;
goto delete;
}
@@ -542,13 +534,13 @@ delete: /*
* in that case. So, if the off-page duplicate tree is empty at this
* point, we want to remove it.
*/
- if ((ret = memp_fget(dbp->mpf, &root_pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0)
goto err;
- if ((num = NUM_ENT(h)) == 0) {
+ if (NUM_ENT(h) == 0) {
if ((ret = __db_free(dbc, h)) != 0)
goto err;
} else {
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
goto err;
goto done;
}
@@ -566,8 +558,7 @@ delete: /*
* the primary page.
*/
if (dbc_opd != NULL) {
- cp->lock.off = LOCK_INVALID;
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
goto err;
if ((ret = __bam_c_physdel(dbc)) != 0)
goto err;
@@ -604,7 +595,7 @@ __bam_c_destroy(dbc)
DBC *dbc;
{
/* Discard the structures. */
- __os_free(dbc->internal, sizeof(BTREE_CURSOR));
+ __os_free(dbc->dbp->dbenv, dbc->internal);
return (0);
}
@@ -622,11 +613,13 @@ __bam_c_count(dbc, recnop)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
db_indx_t indx, top;
db_recno_t recno;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -637,7 +630,7 @@ __bam_c_count(dbc, recnop)
* new locks, we have to have a read lock to even get here.
*/
if (cp->opd == NULL) {
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
return (ret);
/*
@@ -654,14 +647,14 @@ __bam_c_count(dbc, recnop)
break;
*recnop = recno;
} else {
- if ((ret = memp_fget(dbp->mpf,
- &cp->opd->internal->root, 0, &cp->page)) != 0)
+ if ((ret =
+ mpf->get(mpf, &cp->opd->internal->root, 0, &cp->page)) != 0)
return (ret);
*recnop = RE_NREC(cp->page);
}
- ret = memp_fput(dbp->mpf, cp->page, 0);
+ ret = mpf->put(mpf, cp->page, 0);
cp->page = NULL;
return (ret);
@@ -677,9 +670,11 @@ __bam_c_del(dbc)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
@@ -706,25 +701,27 @@ __bam_c_del(dbc)
goto err;
cp->page = cp->csp->page;
} else {
- ACQUIRE_CUR(dbc, DB_LOCK_WRITE, ret);
+ ACQUIRE_CUR(dbc, DB_LOCK_WRITE, cp->pgno, ret);
if (ret != 0)
goto err;
}
/* Log the change. */
- if (DB_LOGGING(dbc) &&
- (ret = __bam_cdel_log(dbp->dbenv, dbc->txn, &LSN(cp->page), 0,
- dbp->log_fileid, PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0)
- goto err;
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cdel_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), cp->indx)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
/* Set the intent-to-delete flag on the page. */
if (TYPE(cp->page) == P_LBTREE)
- B_DSET(GET_BKEYDATA(cp->page, cp->indx + O_INDX)->type);
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx + O_INDX)->type);
else
- B_DSET(GET_BKEYDATA(cp->page, cp->indx)->type);
+ B_DSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type);
/* Mark the page dirty. */
- ret = memp_fset(dbp->mpf, cp->page, DB_MPOOL_DIRTY);
+ ret = mpf->set(mpf, cp->page, DB_MPOOL_DIRTY);
err: /*
* If we've been successful so far and the tree has record numbers,
@@ -736,7 +733,7 @@ err: /*
(void)__bam_stkrel(dbc, 0);
} else
if (cp->page != NULL &&
- (t_ret = memp_fput(dbp->mpf, cp->page, 0)) != 0 && ret == 0)
+ (t_ret = mpf->put(mpf, cp->page, 0)) != 0 && ret == 0)
ret = t_ret;
cp->page = NULL;
@@ -771,7 +768,7 @@ __bam_c_dup(orig_dbc, new_dbc)
* holding inside a transaction because all the locks are retained
* until the transaction commits or aborts.
*/
- if (orig->lock.off != LOCK_INVALID && orig_dbc->txn == NULL) {
+ if (LOCK_ISSET(orig->lock) && orig_dbc->txn == NULL) {
if ((ret = __db_lget(new_dbc,
0, new->pgno, new->lock_mode, 0, &new->lock)) != 0)
return (ret);
@@ -796,11 +793,13 @@ __bam_c_get(dbc, key, data, flags, pgnop)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
db_pgno_t orig_pgno;
db_indx_t orig_indx;
int exact, newopd, ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
orig_pgno = cp->pgno;
orig_indx = cp->indx;
@@ -820,7 +819,7 @@ __bam_c_get(dbc, key, data, flags, pgnop)
* write lock, but upgrading to a write lock has no better
* chance of succeeding now instead of later, so don't try.
*/
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
goto err;
break;
case DB_FIRST:
@@ -829,9 +828,10 @@ __bam_c_get(dbc, key, data, flags, pgnop)
goto err;
break;
case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
/*
* There are two ways to get here based on DBcursor->c_get
- * with the DB_GET_BOTH flag set:
+ * with the DB_GET_BOTH/DB_GET_BOTH_RANGE flags set:
*
* 1. Searching a sorted off-page duplicate tree: do a tree
* search.
@@ -839,20 +839,34 @@ __bam_c_get(dbc, key, data, flags, pgnop)
* 2. Searching btree: do a tree search. If it returns a
* reference to off-page duplicate tree, return immediately
* and let our caller deal with it. If the search doesn't
- * return a reference to off-page duplicate tree, start an
- * on-page search.
+ * return a reference to off-page duplicate tree, continue
+ * with an on-page search.
*/
if (F_ISSET(dbc, DBC_OPD)) {
if ((ret = __bam_c_search(
- dbc, data, DB_GET_BOTH, &exact)) != 0)
- goto err;
- if (!exact) {
- ret = DB_NOTFOUND;
+ dbc, PGNO_INVALID, data, flags, &exact)) != 0)
goto err;
+ if (flags == DB_GET_BOTH) {
+ if (!exact) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
+ break;
}
+
+ /*
+ * We didn't require an exact match, so the search may
+ * may have returned an entry past the end of the page,
+ * or we may be referencing a deleted record. If so,
+ * move to the next entry.
+ */
+ if ((cp->indx == NUM_ENT(cp->page) ||
+ IS_CUR_DELETED(dbc)) &&
+ (ret = __bam_c_next(dbc, 1, 0)) != 0)
+ goto err;
} else {
if ((ret = __bam_c_search(
- dbc, key, DB_GET_BOTH, &exact)) != 0)
+ dbc, PGNO_INVALID, key, flags, &exact)) != 0)
return (ret);
if (!exact) {
ret = DB_NOTFOUND;
@@ -863,7 +877,8 @@ __bam_c_get(dbc, key, data, flags, pgnop)
newopd = 1;
break;
}
- if ((ret = __bam_getboth_finddatum(dbc, data)) != 0)
+ if ((ret =
+ __bam_getboth_finddatum(dbc, data, flags)) != 0)
goto err;
}
break;
@@ -882,11 +897,11 @@ __bam_c_get(dbc, key, data, flags, pgnop)
if ((ret = __bam_c_first(dbc)) != 0)
goto err;
} else
- if ((ret = __bam_c_next(dbc, 1)) != 0)
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
goto err;
break;
case DB_NEXT_DUP:
- if ((ret = __bam_c_next(dbc, 1)) != 0)
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
goto err;
if (!IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx)) {
ret = DB_NOTFOUND;
@@ -900,7 +915,7 @@ __bam_c_get(dbc, key, data, flags, pgnop)
goto err;
} else
do {
- if ((ret = __bam_c_next(dbc, 1)) != 0)
+ if ((ret = __bam_c_next(dbc, 1, 0)) != 0)
goto err;
} while (IS_CUR_DUPLICATE(dbc, orig_pgno, orig_indx));
break;
@@ -927,12 +942,14 @@ __bam_c_get(dbc, key, data, flags, pgnop)
case DB_SET:
case DB_SET_RECNO:
newopd = 1;
- if ((ret = __bam_c_search(dbc, key, flags, &exact)) != 0)
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
goto err;
break;
case DB_SET_RANGE:
newopd = 1;
- if ((ret = __bam_c_search(dbc, key, flags, &exact)) != 0)
+ if ((ret = __bam_c_search(dbc,
+ PGNO_INVALID, key, flags, &exact)) != 0)
goto err;
/*
@@ -942,7 +959,7 @@ __bam_c_get(dbc, key, data, flags, pgnop)
* the next entry.
*/
if (cp->indx == NUM_ENT(cp->page) || IS_CUR_DELETED(dbc))
- if ((ret = __bam_c_next(dbc, 0)) != 0)
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
goto err;
break;
default:
@@ -957,8 +974,15 @@ __bam_c_get(dbc, key, data, flags, pgnop)
if (newopd && pgnop != NULL)
(void)__bam_isopd(dbc, pgnop);
- /* Don't return the key, it was passed to us */
- if (flags == DB_SET)
+ /*
+ * Don't return the key, it was passed to us (this is true even if the
+ * application defines a compare function returning equality for more
+ * than one key value, since in that case which actual value we store
+ * in the database is undefined -- and particularly true in the case of
+ * duplicates where we only store one key value).
+ */
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTH_RANGE || flags == DB_SET)
F_SET(key, DB_DBT_ISSET);
err: /*
@@ -966,13 +990,596 @@ err: /*
* moved, clear the delete flag, DBcursor->c_get never references
* a deleted key, if it moved at all.
*/
- if (F_ISSET(cp, C_DELETED)
- && (cp->pgno != orig_pgno || cp->indx != orig_indx))
+ if (F_ISSET(cp, C_DELETED) &&
+ (cp->pgno != orig_pgno || cp->indx != orig_indx))
F_CLR(cp, C_DELETED);
return (ret);
}
+static int
+__bam_get_prev(dbc)
+ DBC *dbc;
+{
+ BTREE_CURSOR *cp;
+ DBT key, data;
+ db_pgno_t pgno;
+ int ret;
+
+ if ((ret = __bam_c_prev(dbc)) != 0)
+ return (ret);
+
+ if (__bam_isopd(dbc, &pgno)) {
+ cp = (BTREE_CURSOR *)dbc->internal;
+ if ((ret = __db_c_newopd(dbc, pgno, cp->opd, &cp->opd)) != 0)
+ return (ret);
+ if ((ret = cp->opd->c_am_get(cp->opd,
+ &key, &data, DB_LAST, NULL)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __bam_bulk -- Return bulk data from a btree.
+ */
+static int
+__bam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t *inp, indx, pg_keyoff;
+ int32_t *endp, key_off, *offp, *saveoffp;
+ u_int8_t *dbuf, *dp, *np;
+ u_int32_t key_size, size, space;
+ int adj, is_key, need_pg, next_key, no_dup;
+ int pagesize, rec_key, ret;
+
+ ret = 0;
+ key_off = 0;
+ size = 0;
+ pagesize = dbc->dbp->pgsize;
+ cp = (BTREE_CURSOR *)dbc->internal;
+
+ /*
+ * dp tracks the beginging of the page in the buffer.
+ * np is the next place to copy things into the buffer.
+ * dbuf always stays at the beging of the buffer.
+ */
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is a termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *)((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+
+ /*
+ * Distinguish between BTREE and RECNO.
+ * There are no keys in RECNO. If MULTIPLE_KEY is specified
+ * then we return the record numbers.
+ * is_key indicates that multiple btree keys are returned.
+ * rec_key is set if we are returning record numbers.
+ * next_key is set if we are going after the next key rather than dup.
+ */
+ if (dbc->dbtype == DB_BTREE) {
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1: 0;
+ rec_key = 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 2;
+ } else {
+ is_key = 0;
+ rec_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ adj = 1;
+ }
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+
+next_pg:
+ indx = cp->indx;
+ pg = cp->page;
+
+ inp = P_INP(dbc->dbp, pg);
+ /* The current page is not yet in the buffer. */
+ need_pg = 1;
+
+ /*
+ * Keep track of the offset of the current key on the page.
+ * If we are returning keys, set it to 0 first so we force
+ * the copy of the key to the buffer.
+ */
+ pg_keyoff = 0;
+ if (is_key == 0)
+ pg_keyoff = inp[indx];
+
+ do {
+ if (IS_DELETED(dbc->dbp, pg, indx)) {
+ if (dbc->dbtype != DB_RECNO)
+ continue;
+
+ cp->recno++;
+ /*
+ * If we are not returning recnos then we
+ * need to fill in every slot so the user
+ * can calculate the record numbers.
+ */
+ if (rec_key != 0)
+ continue;
+
+ space -= 2 * sizeof(*offp);
+ /* Check if space as underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /* Just mark the empty recno slots. */
+ *offp-- = 0;
+ *offp-- = 0;
+ continue;
+ }
+
+ /*
+ * Check to see if we have a new key.
+ * If so, then see if we need to put the
+ * key on the page. If its already there
+ * then we just point to it.
+ */
+ if (is_key && pg_keyoff != inp[indx]) {
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx);
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = key_size = bo->tlen;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ /* Nothing added, then error. */
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * We need to back up to the
+ * last record put into the
+ * buffer so that it is
+ * CURRENT.
+ */
+ if (indx != 0)
+ indx -= P_INDX;
+ else {
+ if ((ret =
+ __bam_get_prev(
+ dbc)) != 0)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ break;
+ }
+ /*
+ * Move the data part of the page
+ * to the buffer.
+ */
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = bk->len;
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ pg_keyoff = inp[indx];
+ }
+ }
+
+ /*
+ * Reserve space for the pointers and sizes.
+ * Either key/data pair or just for a data item.
+ */
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (rec_key)
+ space -= sizeof(*offp);
+
+ /* Check to see if space has underflowed. */
+ if (space > data->ulen)
+ goto back_up;
+
+ /*
+ * Determine if the next record is in the
+ * buffer already or if it needs to be copied in.
+ * If we have an off page dup, then copy as many
+ * as will fit into the buffer.
+ */
+ bk = GET_BKEYDATA(dbc->dbp, pg, indx + adj - 1);
+ if (B_TYPE(bk->type) == B_DUPLICATE) {
+ bo = (BOVERFLOW *)bk;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ /*
+ * We pass the offset of the current key.
+ * On return we check to see if offp has
+ * moved to see if any data fit.
+ */
+ saveoffp = offp;
+ if ((ret = __bam_bulk_duplicates(dbc, bo->pgno,
+ dbuf, is_key ? offp + P_INDX : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ /* If nothing was added, then error. */
+ if (offp == saveoffp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ } else if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space)
+ goto back_up;
+ if ((ret =
+ __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ *offp-- = size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ /*
+ * Back up the index so that the
+ * last record in the buffer is CURRENT
+ */
+ if (indx >= adj)
+ indx -= adj;
+ else {
+ if ((ret =
+ __bam_get_prev(dbc)) != 0 &&
+ ret != DB_NOTFOUND)
+ return (ret);
+ indx = cp->indx;
+ pg = cp->page;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno--;
+get_space:
+ /*
+ * See if we put anything in the
+ * buffer or if we are doing a DBP->get
+ * did we get all of the data.
+ */
+ if (offp >=
+ (is_key ? &endp[-1] : endp) ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ /*
+ * Add the offsets and sizes to the end of the buffer.
+ * First add the key info then the data info.
+ */
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ } else if (rec_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)(inp[indx + adj - 1] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ *offp-- = bk->len;
+ }
+ if (dbc->dbtype == DB_RECNO)
+ cp->recno++;
+ else if (no_dup) {
+ while (indx + adj < NUM_ENT(pg) &&
+ pg_keyoff == inp[indx + adj])
+ indx += adj;
+ }
+ /*
+ * Stop when we either run off the page or we
+ * move to the next key and we are not returning mulitple keys.
+ */
+ } while ((indx += adj) < NUM_ENT(pg) &&
+ (next_key || pg_keyoff == inp[indx]));
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ cp->indx = indx;
+ ret = __bam_c_next(dbc, 0, 1);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+
+ /*
+ * If we did a DBP->get we must error if we did not return
+ * all the data for the current key because there is
+ * no way to know if we did not get it all, nor any
+ * interface to fetch the balance.
+ */
+
+ if (ret == 0 &&
+ F_ISSET(dbc, DBC_TRANSIENT) && pg_keyoff == inp[indx]) {
+ data->size = (data->ulen - space) + size;
+ return (ENOMEM);
+ }
+ /*
+ * Must leave the index pointing at the last record fetched.
+ * If we are not fetching keys, we may have stepped to the
+ * next key.
+ */
+ if (next_key || pg_keyoff == inp[indx])
+ cp->indx = indx;
+ else
+ cp->indx = indx - P_INDX;
+
+ if (rec_key == 1)
+ *offp = (u_int32_t) RECNO_OOB;
+ else
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
+/*
+ * __bam_bulk_overflow --
+ * Dump overflow record into the buffer.
+ * The space requirements have already been checked.
+ * PUBLIC: int __bam_bulk_overflow
+ * PUBLIC: __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *));
+ */
+int
+__bam_bulk_overflow(dbc, len, pgno, dp)
+ DBC *dbc;
+ u_int32_t len;
+ db_pgno_t pgno;
+ u_int8_t *dp;
+{
+ DBT dbt;
+
+ memset(&dbt, 0, sizeof(dbt));
+ F_SET(&dbt, DB_DBT_USERMEM);
+ dbt.ulen = len;
+ dbt.data = (void *)dp;
+ return (__db_goff(dbc->dbp, &dbt, len, pgno, NULL, NULL));
+}
+
+/*
+ * __bam_bulk_duplicates --
+ * Put as many off page duplicates as will fit into the buffer.
+ * This routine will adjust the cursor to reflect the position in
+ * the overflow tree.
+ * PUBLIC: int __bam_bulk_duplicates __P((DBC *,
+ * PUBLIC: db_pgno_t, u_int8_t *, int32_t *,
+ * PUBLIC: int32_t **, u_int8_t **, u_int32_t *, int));
+ */
+int
+__bam_bulk_duplicates(dbc, pgno, dbuf, keyoff, offpp, dpp, spacep, no_dup)
+ DBC *dbc;
+ db_pgno_t pgno;
+ u_int8_t *dbuf;
+ int32_t *keyoff, **offpp;
+ u_int8_t **dpp;
+ u_int32_t *spacep;
+ int no_dup;
+{
+ DB *dbp;
+ BKEYDATA *bk;
+ BOVERFLOW *bo;
+ BTREE_CURSOR *cp;
+ DBC *opd;
+ DBT key, data;
+ PAGE *pg;
+ db_indx_t indx, *inp;
+ int32_t *offp;
+ u_int32_t size, space;
+ u_int8_t *dp, *np;
+ int first, need_pg, pagesize, ret, t_ret;
+
+ ret = 0;
+
+ dbp = dbc->dbp;
+ cp = (BTREE_CURSOR *)dbc->internal;
+ opd = cp->opd;
+
+ if (opd == NULL) {
+ if ((ret = __db_c_newopd(dbc, pgno, NULL, &opd)) != 0)
+ return (ret);
+ cp->opd = opd;
+ if ((ret = opd->c_am_get(opd,
+ &key, &data, DB_FIRST, NULL)) != 0)
+ return (ret);
+ }
+
+ pagesize = opd->dbp->pgsize;
+ cp = (BTREE_CURSOR *)opd->internal;
+ space = *spacep;
+ /* Get current offset slot. */
+ offp = *offpp;
+
+ /*
+ * np is the next place to put data.
+ * dp is the begining of the current page in the buffer.
+ */
+ np = dp = *dpp;
+ first = 1;
+ indx = cp->indx;
+
+ do {
+ /* Fetch the current record. No initial move. */
+ if ((ret = __bam_c_next(opd, 0, 0)) != 0)
+ break;
+ pg = cp->page;
+ indx = cp->indx;
+ inp = P_INP(dbp, pg);
+ /* We need to copy the page to the buffer. */
+ need_pg = 1;
+
+ do {
+ if (IS_DELETED(dbp, pg, indx))
+ goto contin;
+ bk = GET_BKEYDATA(dbp, pg, indx);
+ space -= 2 * sizeof(*offp);
+ /* Allocate space for key if needed. */
+ if (first == 0 && keyoff != NULL)
+ space -= 2 * sizeof(*offp);
+
+ /* Did space underflow? */
+ if (space > *spacep) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + -(int32_t)space;
+ if (need_pg)
+ space += pagesize - HOFFSET(pg);
+ }
+ break;
+ }
+ if (B_TYPE(bk->type) == B_OVERFLOW) {
+ bo = (BOVERFLOW *)bk;
+ size = bo->tlen;
+ if (size > space) {
+ ret = ENOMEM;
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ if ((ret = __bam_bulk_overflow(dbc,
+ bo->tlen, bo->pgno, np)) != 0)
+ return (ret);
+ space -= size;
+ *offp-- = (int32_t)(np - dbuf);
+ np += size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+ ret = ENOMEM;
+ /* Return space required. */
+ if (first == 1) {
+ space = *spacep + size;
+ }
+ break;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (first == 0 && keyoff != NULL) {
+ *offp-- = keyoff[0];
+ *offp-- = keyoff[-1];
+ }
+ size = bk->len;
+ *offp-- = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(BKEYDATA, data));
+ }
+ *offp-- = size;
+ first = 0;
+ if (no_dup)
+ break;
+contin:
+ indx++;
+ if (opd->dbtype == DB_RECNO)
+ cp->recno++;
+ } while (indx < NUM_ENT(pg));
+ if (no_dup)
+ break;
+ cp->indx = indx;
+
+ } while (ret == 0);
+
+ /* Return the updated information. */
+ *spacep = space;
+ *offpp = offp;
+ *dpp = np;
+
+ /*
+ * If we ran out of space back up the pointer.
+ * If we did not return any dups or reached the end, close the opd.
+ */
+ if (ret == ENOMEM) {
+ if (opd->dbtype == DB_RECNO) {
+ if (--cp->recno == 0)
+ goto close_opd;
+ } else if (indx != 0)
+ cp->indx--;
+ else {
+ t_ret = __bam_c_prev(opd);
+ if (t_ret == DB_NOTFOUND)
+ goto close_opd;
+ if (t_ret != 0)
+ ret = t_ret;
+ }
+ } else if (keyoff == NULL && ret == DB_NOTFOUND) {
+ cp->indx--;
+ if (opd->dbtype == DB_RECNO)
+ --cp->recno;
+ } else if (indx == 0 || ret == DB_NOTFOUND) {
+close_opd:
+ opd->c_close(opd);
+ ((BTREE_CURSOR *)dbc->internal)->opd = NULL;
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ return (ret);
+}
+
/*
* __bam_getbothc --
* Search for a matching data item on a join.
@@ -984,9 +1591,11 @@ __bam_getbothc(dbc, data)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
int cmp, exact, ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -995,7 +1604,7 @@ __bam_getbothc(dbc, data)
* write lock, but upgrading to a write lock has no better
* chance of succeeding now instead of later, so don't try.
*/
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
return (ret);
/*
@@ -1017,11 +1626,12 @@ __bam_getbothc(dbc, data)
return (DB_NOTFOUND);
/* Discard the current page, we're going to do a full search. */
- if ((ret = memp_fput(dbp->mpf, cp->page, 0)) != 0)
+ if ((ret = mpf->put(mpf, cp->page, 0)) != 0)
return (ret);
cp->page = NULL;
- return (__bam_c_search(dbc, data, DB_GET_BOTH, &exact));
+ return (__bam_c_search(dbc,
+ PGNO_INVALID, data, DB_GET_BOTH, &exact));
}
/*
@@ -1038,7 +1648,7 @@ __bam_getbothc(dbc, data)
return (DB_NOTFOUND);
cp->indx += P_INDX;
- return (__bam_getboth_finddatum(dbc, data));
+ return (__bam_getboth_finddatum(dbc, data, DB_GET_BOTH));
}
/*
@@ -1046,9 +1656,10 @@ __bam_getbothc(dbc, data)
* Find a matching on-page data item.
*/
static int
-__bam_getboth_finddatum(dbc, data)
+__bam_getboth_finddatum(dbc, data, flags)
DBC *dbc;
DBT *data;
+ u_int32_t flags;
{
BTREE_CURSOR *cp;
DB *dbp;
@@ -1060,17 +1671,14 @@ __bam_getboth_finddatum(dbc, data)
/*
* Called (sometimes indirectly) from DBC->get to search on-page data
- * item(s) for a matching value. If the original flag was DB_GET_BOTH,
- * the cursor argument is set to the first data item for the key. If
- * the original flag was DB_GET_BOTHC, the cursor argument is set to
- * the first data item that we can potentially return. In both cases,
- * there may or may not be additional duplicate data items to search.
+ * item(s) for a matching value. If the original flag was DB_GET_BOTH
+ * or DB_GET_BOTH_RANGE, the cursor is set to the first undeleted data
+ * item for the key. If the original flag was DB_GET_BOTHC, the cursor
+ * argument is set to the first data item we can potentially return.
+ * In both cases, there may or may not be additional duplicate data
+ * items to search.
*
* If the duplicates are not sorted, do a linear search.
- *
- * If the duplicates are sorted, do a binary search. The reason for
- * this is that large pages and small key/data pairs result in large
- * numbers of on-page duplicates before they get pushed off-page.
*/
if (dbp->dup_compare == NULL) {
for (;; cp->indx += P_INDX) {
@@ -1085,41 +1693,62 @@ __bam_getboth_finddatum(dbc, data)
!IS_DUPLICATE(dbc, cp->indx, cp->indx + P_INDX))
break;
}
- } else {
- /*
- * Find the top and bottom of the duplicate set. Binary search
- * requires at least two items, don't loop if there's only one.
- */
- for (base = top = cp->indx;
- top < NUM_ENT(cp->page); top += P_INDX)
- if (!IS_DUPLICATE(dbc, cp->indx, top))
- break;
- if (base == (top - P_INDX)) {
- if ((ret = __bam_cmp(dbp, data,
- cp->page, cp->indx + O_INDX,
- dbp->dup_compare, &cmp)) != 0)
- return (ret);
- return (cmp == 0 ? 0 : DB_NOTFOUND);
- }
+ return (DB_NOTFOUND);
+ }
- for (lim =
- (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) {
- cp->indx = base + ((lim >> 1) * P_INDX);
- if ((ret = __bam_cmp(dbp, data, cp->page,
- cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
- return (ret);
- if (cmp == 0) {
- if (!IS_CUR_DELETED(dbc))
- return (0);
- break;
- }
- if (cmp > 0) {
- base = cp->indx + P_INDX;
- --lim;
- }
+ /*
+ * If the duplicates are sorted, do a binary search. The reason for
+ * this is that large pages and small key/data pairs result in large
+ * numbers of on-page duplicates before they get pushed off-page.
+ *
+ * Find the top and bottom of the duplicate set. Binary search
+ * requires at least two items, don't loop if there's only one.
+ */
+ for (base = top = cp->indx; top < NUM_ENT(cp->page); top += P_INDX)
+ if (!IS_DUPLICATE(dbc, cp->indx, top))
+ break;
+ if (base == (top - P_INDX)) {
+ if ((ret = __bam_cmp(dbp, data,
+ cp->page, cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ return (cmp == 0 ||
+ (cmp < 0 && flags == DB_GET_BOTH_RANGE) ? 0 : DB_NOTFOUND);
+ }
+
+ for (lim = (top - base) / (db_indx_t)P_INDX; lim != 0; lim >>= 1) {
+ cp->indx = base + ((lim >> 1) * P_INDX);
+ if ((ret = __bam_cmp(dbp, data, cp->page,
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ return (ret);
+ if (cmp == 0) {
+ /*
+ * XXX
+ * No duplicate duplicates in sorted duplicate sets,
+ * so there can be only one.
+ */
+ if (!IS_CUR_DELETED(dbc))
+ return (0);
+ break;
+ }
+ if (cmp > 0) {
+ base = cp->indx + P_INDX;
+ --lim;
}
}
- return (DB_NOTFOUND);
+
+ /* No match found; if we're looking for an exact match, we're done. */
+ if (flags == DB_GET_BOTH)
+ return (DB_NOTFOUND);
+
+ /*
+ * Base is the smallest index greater than the data item, may be zero
+ * or a last + O_INDX index, and may be deleted. Find an undeleted
+ * item.
+ */
+ cp->indx = base;
+ while (cp->indx < top && IS_CUR_DELETED(dbc))
+ cp->indx += P_INDX;
+ return (cp->indx < top ? 0 : DB_NOTFOUND);
}
/*
@@ -1136,19 +1765,22 @@ __bam_c_put(dbc, key, data, flags, pgnop)
BTREE_CURSOR *cp;
DB *dbp;
DBT dbt;
+ DB_MPOOLFILE *mpf;
+ db_pgno_t root_pgno;
u_int32_t iiop;
- int cmp, exact, needkey, ret, stack;
+ int cmp, exact, ret, stack;
void *arg;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
+ root_pgno = cp->root;
-split: needkey = ret = stack = 0;
+split: ret = stack = 0;
switch (flags) {
case DB_AFTER:
case DB_BEFORE:
case DB_CURRENT:
- needkey = 1;
iiop = flags;
/*
@@ -1182,7 +1814,7 @@ split: needkey = ret = stack = 0;
ACQUIRE_WRITE_LOCK(dbc, ret);
if (ret != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
goto err;
break;
case DB_KEYFIRST:
@@ -1192,15 +1824,22 @@ split: needkey = ret = stack = 0;
* Searching off-page, sorted duplicate tree: do a tree search
* for the correct item; __bam_c_search returns the smallest
* slot greater than the key, use it.
+ *
+ * See comment below regarding where we can start the search.
*/
if (F_ISSET(dbc, DBC_OPD)) {
- if ((ret =
- __bam_c_search(dbc, data, flags, &exact)) != 0)
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno,
+ data, flags, &exact)) != 0)
goto err;
stack = 1;
/* Disallow "sorted" duplicate duplicates. */
if (exact) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
+ iiop = DB_CURRENT;
+ break;
+ }
ret = __db_duperr(dbp, flags);
goto err;
}
@@ -1208,8 +1847,17 @@ split: needkey = ret = stack = 0;
break;
}
- /* Searching a btree. */
- if ((ret = __bam_c_search(dbc, key,
+ /*
+ * Searching a btree.
+ *
+ * If we've done a split, we can start the search from the
+ * parent of the split page, which __bam_split returned
+ * for us in root_pgno, unless we're in a Btree with record
+ * numbering. In that case, we'll need the true root page
+ * in order to adjust the record count.
+ */
+ if ((ret = __bam_c_search(dbc,
+ F_ISSET(cp, C_RECNUM) ? cp->root : root_pgno, key,
flags == DB_KEYFIRST || dbp->dup_compare != NULL ?
DB_KEYFIRST : DB_KEYLAST, &exact)) != 0)
goto err;
@@ -1264,8 +1912,8 @@ split: needkey = ret = stack = 0;
*/
for (;; cp->indx += P_INDX) {
if ((ret = __bam_cmp(dbp, data, cp->page,
- cp->indx + O_INDX, dbp->dup_compare, &cmp)) !=0)
- return (ret);
+ cp->indx + O_INDX, dbp->dup_compare, &cmp)) != 0)
+ goto err;
if (cmp < 0) {
iiop = DB_BEFORE;
break;
@@ -1273,7 +1921,7 @@ split: needkey = ret = stack = 0;
/* Disallow "sorted" duplicate duplicates. */
if (cmp == 0) {
- if (IS_DELETED(cp->page, cp->indx)) {
+ if (IS_DELETED(dbp, cp->page, cp->indx)) {
iiop = DB_CURRENT;
break;
}
@@ -1282,8 +1930,8 @@ split: needkey = ret = stack = 0;
}
if (cp->indx + P_INDX >= NUM_ENT(cp->page) ||
- ((PAGE *)cp->page)->inp[cp->indx] !=
- ((PAGE *)cp->page)->inp[cp->indx + P_INDX]) {
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx] !=
+ P_INP(dbp, ((PAGE *)cp->page))[cp->indx + P_INDX]) {
iiop = DB_AFTER;
break;
}
@@ -1306,7 +1954,7 @@ split: needkey = ret = stack = 0;
flags == DB_BEFORE || flags == DB_CURRENT) {
memset(&dbt, 0, sizeof(DBT));
if ((ret = __db_ret(dbp, cp->page, 0, &dbt,
- &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
goto err;
arg = &dbt;
} else
@@ -1327,7 +1975,7 @@ split: needkey = ret = stack = 0;
goto err;
/* Split the tree. */
- if ((ret = __bam_split(dbc, arg)) != 0)
+ if ((ret = __bam_split(dbc, arg, &root_pgno)) != 0)
return (ret);
goto split;
@@ -1361,22 +2009,22 @@ done: /*
* __bam_c_rget --
* Return the record number for a cursor.
*
- * PUBLIC: int __bam_c_rget __P((DBC *, DBT *, u_int32_t));
+ * PUBLIC: int __bam_c_rget __P((DBC *, DBT *));
*/
int
-__bam_c_rget(dbc, data, flags)
+__bam_c_rget(dbc, data)
DBC *dbc;
DBT *data;
- u_int32_t flags;
{
BTREE_CURSOR *cp;
DB *dbp;
DBT dbt;
+ DB_MPOOLFILE *mpf;
db_recno_t recno;
int exact, ret;
- COMPQUIET(flags, 0);
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -1384,24 +2032,24 @@ __bam_c_rget(dbc, data, flags)
* Get a copy of the key.
* Release the page, making sure we don't release it twice.
*/
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &cp->page)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &cp->page)) != 0)
return (ret);
memset(&dbt, 0, sizeof(DBT));
if ((ret = __db_ret(dbp, cp->page,
- cp->indx, &dbt, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ cp->indx, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
goto err;
- ret = memp_fput(dbp->mpf, cp->page, 0);
+ ret = mpf->put(mpf, cp->page, 0);
cp->page = NULL;
if (ret != 0)
return (ret);
- if ((ret = __bam_search(dbc, &dbt,
+ if ((ret = __bam_search(dbc, PGNO_INVALID, &dbt,
F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND,
1, &recno, &exact)) != 0)
goto err;
- ret = __db_retcopy(dbp, data,
- &recno, sizeof(recno), &dbc->rdata.data, &dbc->rdata.ulen);
+ ret = __db_retcopy(dbp->dbenv, data,
+ &recno, sizeof(recno), &dbc->rdata->data, &dbc->rdata->ulen);
/* Release the stack. */
err: __bam_stkrel(dbc, 0);
@@ -1444,17 +2092,15 @@ __bam_c_first(dbc)
DBC *dbc;
{
BTREE_CURSOR *cp;
- DB *dbp;
db_pgno_t pgno;
int ret;
- dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
/* Walk down the left-hand side of the tree. */
for (pgno = cp->root;;) {
- ACQUIRE_CUR_SET(dbc, DB_LOCK_READ, pgno, ret);
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
if (ret != 0)
return (ret);
@@ -1462,7 +2108,7 @@ __bam_c_first(dbc)
if (ISLEAF(cp->page))
break;
- pgno = GET_BINTERNAL(cp->page, 0)->pgno;
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page, 0)->pgno;
}
/* If we want a write lock instead of a read lock, get it now. */
@@ -1472,9 +2118,11 @@ __bam_c_first(dbc)
return (ret);
}
+ cp->indx = 0;
+
/* If on an empty page or a deleted record, move to the next one. */
if (NUM_ENT(cp->page) == 0 || IS_CUR_DELETED(dbc))
- if ((ret = __bam_c_next(dbc, 0)) != 0)
+ if ((ret = __bam_c_next(dbc, 0, 0)) != 0)
return (ret);
return (0);
@@ -1489,17 +2137,15 @@ __bam_c_last(dbc)
DBC *dbc;
{
BTREE_CURSOR *cp;
- DB *dbp;
db_pgno_t pgno;
int ret;
- dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
/* Walk down the right-hand side of the tree. */
for (pgno = cp->root;;) {
- ACQUIRE_CUR_SET(dbc, DB_LOCK_READ, pgno, ret);
+ ACQUIRE_CUR_COUPLE(dbc, DB_LOCK_READ, pgno, ret);
if (ret != 0)
return (ret);
@@ -1507,8 +2153,8 @@ __bam_c_last(dbc)
if (ISLEAF(cp->page))
break;
- pgno =
- GET_BINTERNAL(cp->page, NUM_ENT(cp->page) - O_INDX)->pgno;
+ pgno = GET_BINTERNAL(dbc->dbp, cp->page,
+ NUM_ENT(cp->page) - O_INDX)->pgno;
}
/* If we want a write lock instead of a read lock, get it now. */
@@ -1535,18 +2181,16 @@ __bam_c_last(dbc)
* Move to the next record.
*/
static int
-__bam_c_next(dbc, initial_move)
+__bam_c_next(dbc, initial_move, deleted_okay)
DBC *dbc;
- int initial_move;
+ int initial_move, deleted_okay;
{
BTREE_CURSOR *cp;
- DB *dbp;
db_indx_t adjust;
db_lockmode_t lock_mode;
db_pgno_t pgno;
int ret;
- dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
@@ -1566,7 +2210,7 @@ __bam_c_next(dbc, initial_move)
F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
}
if (cp->page == NULL) {
- ACQUIRE_CUR(dbc, lock_mode, ret);
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
if (ret != 0)
return (ret);
}
@@ -1587,12 +2231,13 @@ __bam_c_next(dbc, initial_move)
= NEXT_PGNO(cp->page)) == PGNO_INVALID)
return (DB_NOTFOUND);
- ACQUIRE_CUR_SET(dbc, lock_mode, pgno, ret);
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
if (ret != 0)
return (ret);
+ cp->indx = 0;
continue;
}
- if (IS_CUR_DELETED(dbc)) {
+ if (!deleted_okay && IS_CUR_DELETED(dbc)) {
cp->indx += adjust;
continue;
}
@@ -1610,13 +2255,11 @@ __bam_c_prev(dbc)
DBC *dbc;
{
BTREE_CURSOR *cp;
- DB *dbp;
db_indx_t adjust;
db_lockmode_t lock_mode;
db_pgno_t pgno;
int ret;
- dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
ret = 0;
@@ -1636,7 +2279,7 @@ __bam_c_prev(dbc)
F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE : DB_LOCK_READ;
}
if (cp->page == NULL) {
- ACQUIRE_CUR(dbc, lock_mode, ret);
+ ACQUIRE_CUR(dbc, lock_mode, cp->pgno, ret);
if (ret != 0)
return (ret);
}
@@ -1648,7 +2291,7 @@ __bam_c_prev(dbc)
PREV_PGNO(cp->page)) == PGNO_INVALID)
return (DB_NOTFOUND);
- ACQUIRE_CUR_SET(dbc, lock_mode, pgno, ret);
+ ACQUIRE_CUR(dbc, lock_mode, pgno, ret);
if (ret != 0)
return (ret);
@@ -1671,8 +2314,9 @@ __bam_c_prev(dbc)
* Move to a specified record.
*/
static int
-__bam_c_search(dbc, key, flags, exactp)
+__bam_c_search(dbc, root_pgno, key, flags, exactp)
DBC *dbc;
+ db_pgno_t root_pgno;
const DBT *key;
u_int32_t flags;
int *exactp;
@@ -1681,7 +2325,7 @@ __bam_c_search(dbc, key, flags, exactp)
BTREE_CURSOR *cp;
DB *dbp;
PAGE *h;
- db_indx_t indx;
+ db_indx_t indx, *inp;
db_pgno_t bt_lpgno;
db_recno_t recno;
u_int32_t sflags;
@@ -1712,6 +2356,9 @@ __bam_c_search(dbc, key, flags, exactp)
case DB_GET_BOTH:
sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND) | S_EXACT;
goto search;
+ case DB_GET_BOTH_RANGE:
+ sflags = (F_ISSET(dbc, DBC_RMW) ? S_FIND_WR : S_FIND);
+ goto search;
case DB_SET_RANGE:
sflags =
(F_ISSET(dbc, DBC_RMW) ? S_WRITE : S_READ) | S_DUPFIRST;
@@ -1758,6 +2405,7 @@ fast_search: /*
if (ret != 0)
goto fast_miss;
+ inp = P_INP(dbp, h);
/*
* It's okay if the page type isn't right or it's empty, it
* just means that the world changed.
@@ -1796,7 +2444,7 @@ fast_search: /*
if (flags == DB_KEYLAST)
goto fast_hit;
for (;
- indx > 0 && h->inp[indx - P_INDX] == h->inp[indx];
+ indx > 0 && inp[indx - P_INDX] == inp[indx];
indx -= P_INDX)
;
goto fast_hit;
@@ -1823,7 +2471,7 @@ try_begin: if (h->prev_pgno == PGNO_INVALID) {
goto fast_hit;
for (;
indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
- h->inp[indx] == h->inp[indx + P_INDX];
+ inp[indx] == inp[indx + P_INDX];
indx += P_INDX)
;
goto fast_hit;
@@ -1852,8 +2500,8 @@ fast_miss: /*
if (ret != 0)
return (ret);
-search: if ((ret =
- __bam_search(dbc, key, sflags, 1, NULL, exactp)) != 0)
+search: if ((ret = __bam_search(dbc, root_pgno,
+ key, sflags, 1, NULL, exactp)) != 0)
return (ret);
break;
default:
@@ -1870,12 +2518,15 @@ search: if ((ret =
/*
* If we inserted a key into the first or last slot of the tree,
* remember where it was so we can do it more quickly next time.
+ * If there are duplicates and we are inserting into the last slot,
+ * the cursor will point _to_ the last item, not after it, which
+ * is why we subtract P_INDX below.
*/
if (TYPE(cp->page) == P_LBTREE &&
(flags == DB_KEYFIRST || flags == DB_KEYLAST))
t->bt_lpgno =
(NEXT_PGNO(cp->page) == PGNO_INVALID &&
- cp->indx >= NUM_ENT(cp->page)) ||
+ cp->indx >= NUM_ENT(cp->page) - P_INDX) ||
(PREV_PGNO(cp->page) == PGNO_INVALID &&
cp->indx == 0) ? cp->pgno : PGNO_INVALID;
return (0);
@@ -1893,11 +2544,13 @@ __bam_c_physdel(dbc)
DB *dbp;
DBT key;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
int delete_page, empty_page, exact, level, ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
delete_page = empty_page = ret = 0;
@@ -1911,7 +2564,7 @@ __bam_c_physdel(dbc)
* space will never be reused unless the exact same key is specified.
*/
if (delete_page &&
- !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_BT_REVSPLIT))
+ !F_ISSET(dbc, DBC_OPD) && F_ISSET(dbp, DB_AM_REVSPLITOFF))
delete_page = 0;
/*
@@ -1926,11 +2579,17 @@ __bam_c_physdel(dbc)
* To delete a leaf page other than an empty root page, we need a
* copy of a key from the page. Use the 0th page index since it's
* the last key the page held.
+ *
+ * !!!
+ * Note that because __bam_c_physdel is always called from a cursor
+ * close, it should be safe to use the cursor's own "my_rkey" memory
+ * to temporarily hold this key. We shouldn't own any returned-data
+ * memory of interest--if we do, we're in trouble anyway.
*/
if (delete_page) {
memset(&key, 0, sizeof(DBT));
if ((ret = __db_ret(dbp, cp->page,
- 0, &key, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ 0, &key, &dbc->my_rkey.data, &dbc->my_rkey.ulen)) != 0)
return (ret);
}
@@ -1940,7 +2599,7 @@ __bam_c_physdel(dbc)
* !!!
* The following operations to delete a page may deadlock. The easy
* scenario is if we're deleting an item because we're closing cursors
- * because we've already deadlocked and want to call txn_abort(). If
+ * because we've already deadlocked and want to call txn->abort. If
* we fail due to deadlock, we'll leave a locked, possibly empty page
* in the tree, which won't be empty long because we'll undo the delete
* when we undo the transaction's modifications.
@@ -1977,8 +2636,8 @@ __bam_c_physdel(dbc)
*/
for (level = LEAFLEVEL;; ++level) {
/* Acquire a page and its parent, locked. */
- if ((ret = __bam_search(
- dbc, &key, S_WRPAIR, level, NULL, &exact)) != 0)
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ &key, S_WRPAIR, level, NULL, &exact)) != 0)
return (ret);
/*
@@ -2031,19 +2690,19 @@ __bam_c_physdel(dbc)
*/
switch (TYPE(h)) {
case P_IBTREE:
- pgno = GET_BINTERNAL(h, 0)->pgno;
+ pgno = GET_BINTERNAL(dbp, h, 0)->pgno;
break;
case P_IRECNO:
- pgno = GET_RINTERNAL(h, 0)->pgno;
+ pgno = GET_RINTERNAL(dbp, h, 0)->pgno;
break;
default:
- return (__db_pgfmt(dbp, PGNO(h)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
if ((ret =
__db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &lock)) != 0)
break;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
break;
BT_STK_PUSH(dbp->dbenv, cp, h, 0, lock, DB_LOCK_WRITE, ret);
if (ret != 0)
@@ -2076,10 +2735,12 @@ __bam_c_getstack(dbc)
BTREE_CURSOR *cp;
DB *dbp;
DBT dbt;
+ DB_MPOOLFILE *mpf;
PAGE *h;
int exact, ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -2087,21 +2748,22 @@ __bam_c_getstack(dbc)
* routine has to already hold a read lock on the page, so there
* is no additional lock to acquire.
*/
- if ((ret = memp_fget(dbp->mpf, &cp->pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &cp->pgno, 0, &h)) != 0)
return (ret);
/* Get a copy of a key from the page. */
memset(&dbt, 0, sizeof(DBT));
if ((ret = __db_ret(dbp,
- h, 0, &dbt, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ h, 0, &dbt, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
goto err;
/* Get a write-locked stack for the page. */
exact = 0;
- ret = __bam_search(dbc, &dbt, S_KEYFIRST, 1, NULL, &exact);
+ ret = __bam_search(dbc, PGNO_INVALID,
+ &dbt, S_KEYFIRST, 1, NULL, &exact);
err: /* Discard the key and the page. */
- if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
return (ret);
@@ -2122,7 +2784,8 @@ __bam_isopd(dbc, pgnop)
if (TYPE(dbc->internal->page) != P_LBTREE)
return (0);
- bo = GET_BOVERFLOW(dbc->internal->page, dbc->internal->indx + O_INDX);
+ bo = GET_BOVERFLOW(dbc->dbp,
+ dbc->internal->page, dbc->internal->indx + O_INDX);
if (B_TYPE(bo->type) == B_DUPLICATE) {
*pgnop = bo->pgno;
return (1);
diff --git a/bdb/btree/bt_delete.c b/bdb/btree/bt_delete.c
index 9725887882a..8c76ead2922 100644
--- a/bdb/btree/bt_delete.c
+++ b/bdb/btree/bt_delete.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_delete.c,v 11.31 2001/01/17 18:48:46 bostic Exp $";
+static const char revid[] = "$Id: bt_delete.c,v 11.44 2002/07/03 19:03:49 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,88 +53,10 @@ static const char revid[] = "$Id: bt_delete.c,v 11.31 2001/01/17 18:48:46 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "lock.h"
-
-/*
- * __bam_delete --
- * Delete the items referenced by a key.
- *
- * PUBLIC: int __bam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
- */
-int
-__bam_delete(dbp, txn, key, flags)
- DB *dbp;
- DB_TXN *txn;
- DBT *key;
- u_int32_t flags;
-{
- DBC *dbc;
- DBT lkey;
- DBT data;
- u_int32_t f_init, f_next;
- int ret, t_ret;
-
- PANIC_CHECK(dbp->dbenv);
- DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
- DB_CHECK_TXN(dbp, txn);
-
- /* Check for invalid flags. */
- if ((ret =
- __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
- return (ret);
-
- /* Allocate a cursor. */
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
-
- DEBUG_LWRITE(dbc, txn, "bam_delete", key, NULL, flags);
-
- /*
- * Walk a cursor through the key/data pairs, deleting as we go. Set
- * the DB_DBT_USERMEM flag, as this might be a threaded application
- * and the flags checking will catch us. We don't actually want the
- * keys or data, so request a partial of length 0.
- */
- memset(&lkey, 0, sizeof(lkey));
- F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
- memset(&data, 0, sizeof(data));
- F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
-
- /*
- * If locking (and we haven't already acquired CDB locks), set the
- * read-modify-write flag.
- */
- f_init = DB_SET;
- f_next = DB_NEXT_DUP;
- if (STD_LOCKING(dbc)) {
- f_init |= DB_RMW;
- f_next |= DB_RMW;
- }
-
- /* Walk through the set of key/data pairs, deleting as we go. */
- if ((ret = dbc->c_get(dbc, key, &data, f_init)) != 0)
- goto err;
- for (;;) {
- if ((ret = dbc->c_del(dbc, 0)) != 0)
- goto err;
- if ((ret = dbc->c_get(dbc, &lkey, &data, f_next)) != 0) {
- if (ret == DB_NOTFOUND) {
- ret = 0;
- break;
- }
- goto err;
- }
- }
-
-err: /* Discard the cursor. */
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
-
- return (ret);
-}
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
/*
* __bam_ditem --
@@ -151,14 +73,18 @@ __bam_ditem(dbc, h, indx)
BINTERNAL *bi;
BKEYDATA *bk;
DB *dbp;
+ DB_MPOOLFILE *mpf;
u_int32_t nbytes;
int ret;
+ db_indx_t *inp;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
switch (TYPE(h)) {
case P_IBTREE:
- bi = GET_BINTERNAL(h, indx);
+ bi = GET_BINTERNAL(dbp, h, indx);
switch (B_TYPE(bi->type)) {
case B_DUPLICATE:
case B_KEYDATA:
@@ -171,7 +97,7 @@ __bam_ditem(dbc, h, indx)
return (ret);
break;
default:
- return (__db_pgfmt(dbp, PGNO(h)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
break;
case P_IRECNO:
@@ -195,7 +121,7 @@ __bam_ditem(dbc, h, indx)
* won't work!
*/
if (indx + P_INDX < (u_int32_t)NUM_ENT(h) &&
- h->inp[indx] == h->inp[indx + P_INDX])
+ inp[indx] == inp[indx + P_INDX])
return (__bam_adjindx(dbc,
h, indx, indx + O_INDX, 0));
/*
@@ -203,14 +129,14 @@ __bam_ditem(dbc, h, indx)
* doesn't matter if we delete the key item before or
* after the data item for the purposes of this one.
*/
- if (indx > 0 && h->inp[indx] == h->inp[indx - P_INDX])
+ if (indx > 0 && inp[indx] == inp[indx - P_INDX])
return (__bam_adjindx(dbc,
h, indx, indx - P_INDX, 0));
}
/* FALLTHROUGH */
case P_LDUP:
case P_LRECNO:
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
switch (B_TYPE(bk->type)) {
case B_DUPLICATE:
nbytes = BOVERFLOW_SIZE;
@@ -218,24 +144,24 @@ __bam_ditem(dbc, h, indx)
case B_OVERFLOW:
nbytes = BOVERFLOW_SIZE;
if ((ret = __db_doff(
- dbc, (GET_BOVERFLOW(h, indx))->pgno)) != 0)
+ dbc, (GET_BOVERFLOW(dbp, h, indx))->pgno)) != 0)
return (ret);
break;
case B_KEYDATA:
nbytes = BKEYDATA_SIZE(bk->len);
break;
default:
- return (__db_pgfmt(dbp, PGNO(h)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
break;
default:
- return (__db_pgfmt(dbp, PGNO(h)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(h)));
}
/* Delete the item and mark the page dirty. */
if ((ret = __db_ditem(dbc, h, indx, nbytes)) != 0)
return (ret);
- if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
return (0);
@@ -255,33 +181,37 @@ __bam_adjindx(dbc, h, indx, indx_copy, is_insert)
int is_insert;
{
DB *dbp;
- db_indx_t copy;
+ DB_MPOOLFILE *mpf;
+ db_indx_t copy, *inp;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
/* Log the change. */
- if (DB_LOGGING(dbc) &&
- (ret = __bam_adj_log(dbp->dbenv, dbc->txn, &LSN(h),
- 0, dbp->log_fileid, PGNO(h), &LSN(h), indx, indx_copy,
- (u_int32_t)is_insert)) != 0)
- return (ret);
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_adj_log(dbp, dbc->txn, &LSN(h), 0,
+ PGNO(h), &LSN(h), indx, indx_copy, (u_int32_t)is_insert)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
/* Shuffle the indices and mark the page dirty. */
if (is_insert) {
- copy = h->inp[indx_copy];
+ copy = inp[indx_copy];
if (indx != NUM_ENT(h))
- memmove(&h->inp[indx + O_INDX], &h->inp[indx],
+ memmove(&inp[indx + O_INDX], &inp[indx],
sizeof(db_indx_t) * (NUM_ENT(h) - indx));
- h->inp[indx] = copy;
+ inp[indx] = copy;
++NUM_ENT(h);
} else {
--NUM_ENT(h);
if (indx != NUM_ENT(h))
- memmove(&h->inp[indx], &h->inp[indx + O_INDX],
+ memmove(&inp[indx], &inp[indx + O_INDX],
sizeof(db_indx_t) * (NUM_ENT(h) - indx));
}
- if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
return (0);
@@ -303,6 +233,7 @@ __bam_dpages(dbc, stack_epg)
DB *dbp;
DBT a, b;
DB_LOCK c_lock, p_lock;
+ DB_MPOOLFILE *mpf;
EPG *epg;
PAGE *child, *parent;
db_indx_t nitems;
@@ -311,6 +242,7 @@ __bam_dpages(dbc, stack_epg)
int done, ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -328,8 +260,7 @@ __bam_dpages(dbc, stack_epg)
*/
ret = 0;
for (epg = cp->sp; epg < stack_epg; ++epg) {
- if ((t_ret =
- memp_fput(dbp->mpf, epg->page, 0)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
ret = t_ret;
(void)__TLPUT(dbc, epg->lock);
}
@@ -364,7 +295,7 @@ __bam_dpages(dbc, stack_epg)
pgno = PGNO(epg->page);
nitems = NUM_ENT(epg->page);
- if ((ret = memp_fput(dbp->mpf, epg->page, 0)) != 0)
+ if ((ret = mpf->put(mpf, epg->page, 0)) != 0)
goto err_inc;
(void)__TLPUT(dbc, epg->lock);
@@ -394,7 +325,7 @@ __bam_dpages(dbc, stack_epg)
err_inc: ++epg;
err: for (; epg <= cp->csp; ++epg) {
if (epg->page != NULL)
- (void)memp_fput(dbp->mpf, epg->page, 0);
+ (void)mpf->put(mpf, epg->page, 0);
(void)__TLPUT(dbc, epg->lock);
}
BT_STK_CLR(cp);
@@ -415,14 +346,15 @@ err: for (; epg <= cp->csp; ++epg) {
for (done = 0; !done;) {
/* Initialize. */
parent = child = NULL;
- p_lock.off = c_lock.off = LOCK_INVALID;
+ LOCK_INIT(p_lock);
+ LOCK_INIT(c_lock);
/* Lock the root. */
pgno = root_pgno;
if ((ret =
__db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &p_lock)) != 0)
goto stop;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &parent)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &parent)) != 0)
goto stop;
if (NUM_ENT(parent) != 1)
@@ -434,7 +366,7 @@ err: for (; epg <= cp->csp; ++epg) {
* If this is overflow, then try to delete it.
* The child may or may not still point at it.
*/
- bi = GET_BINTERNAL(parent, 0);
+ bi = GET_BINTERNAL(dbp, parent, 0);
if (B_TYPE(bi->type) == B_OVERFLOW)
if ((ret = __db_doff(dbc,
((BOVERFLOW *)bi->data)->pgno)) != 0)
@@ -442,7 +374,7 @@ err: for (; epg <= cp->csp; ++epg) {
pgno = bi->pgno;
break;
case P_IRECNO:
- pgno = GET_RINTERNAL(parent, 0)->pgno;
+ pgno = GET_RINTERNAL(dbp, parent, 0)->pgno;
break;
default:
goto stop;
@@ -452,24 +384,24 @@ err: for (; epg <= cp->csp; ++epg) {
if ((ret =
__db_lget(dbc, 0, pgno, DB_LOCK_WRITE, 0, &c_lock)) != 0)
goto stop;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &child)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &child)) != 0)
goto stop;
/* Log the change. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
memset(&a, 0, sizeof(a));
a.data = child;
a.size = dbp->pgsize;
memset(&b, 0, sizeof(b));
- b.data = P_ENTRY(parent, 0);
+ b.data = P_ENTRY(dbp, parent, 0);
b.size = TYPE(parent) == P_IRECNO ? RINTERNAL_SIZE :
BINTERNAL_SIZE(((BINTERNAL *)b.data)->len);
- if ((ret =
- __bam_rsplit_log(dbp->dbenv, dbc->txn, &child->lsn,
- 0, dbp->log_fileid, PGNO(child), &a, PGNO(parent),
- RE_NREC(parent), &b, &parent->lsn)) != 0)
+ if ((ret = __bam_rsplit_log(dbp, dbc->txn,
+ &child->lsn, 0, PGNO(child), &a, PGNO(parent),
+ RE_NREC(parent), &b, &parent->lsn)) != 0)
goto stop;
- }
+ } else
+ LSN_NOT_LOGGED(child->lsn);
/*
* Make the switch.
@@ -491,9 +423,9 @@ err: for (; epg <= cp->csp; ++epg) {
RE_NREC_SET(parent, rcnt);
/* Mark the pages dirty. */
- if ((ret = memp_fset(dbp->mpf, parent, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, parent, DB_MPOOL_DIRTY)) != 0)
goto stop;
- if ((ret = memp_fset(dbp->mpf, child, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, child, DB_MPOOL_DIRTY)) != 0)
goto stop;
/* Adjust the cursors. */
@@ -514,15 +446,13 @@ err: for (; epg <= cp->csp; ++epg) {
if (0) {
stop: done = 1;
}
- if (p_lock.off != LOCK_INVALID)
- (void)__TLPUT(dbc, p_lock);
+ (void)__TLPUT(dbc, p_lock);
if (parent != NULL &&
- (t_ret = memp_fput(dbp->mpf, parent, 0)) != 0 && ret == 0)
+ (t_ret = mpf->put(mpf, parent, 0)) != 0 && ret == 0)
ret = t_ret;
- if (c_lock.off != LOCK_INVALID)
- (void)__TLPUT(dbc, c_lock);
+ (void)__TLPUT(dbc, c_lock);
if (child != NULL &&
- (t_ret = memp_fput(dbp->mpf, child, 0)) != 0 && ret == 0)
+ (t_ret = mpf->put(mpf, child, 0)) != 0 && ret == 0)
ret = t_ret;
}
diff --git a/bdb/btree/bt_method.c b/bdb/btree/bt_method.c
index 5e3af27d033..aa27ed6bab9 100644
--- a/bdb/btree/bt_method.c
+++ b/bdb/btree/bt_method.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_method.c,v 11.20 2000/11/30 00:58:28 ubell Exp $";
+static const char revid[] = "$Id: bt_method.c,v 11.29 2002/04/21 13:17:04 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,9 +16,9 @@ static const char revid[] = "$Id: bt_method.c,v 11.20 2000/11/30 00:58:28 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "qam.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/qam.h"
static int __bam_set_bt_compare
__P((DB *, int (*)(DB *, const DBT *, const DBT *)));
@@ -82,7 +82,8 @@ __bam_db_close(dbp)
{
BTREE *t;
- t = dbp->bt_internal;
+ if ((t = dbp->bt_internal) == NULL)
+ return (0);
/* Recno */
/* Close any backing source file descriptor. */
if (t->re_fp != NULL)
@@ -90,9 +91,9 @@ __bam_db_close(dbp)
/* Free any backing source file name. */
if (t->re_source != NULL)
- __os_freestr(t->re_source);
+ __os_free(dbp->dbenv, t->re_source);
- __os_free(t, sizeof(BTREE));
+ __os_free(dbp->dbenv, t);
dbp->bt_internal = NULL;
return (0);
@@ -127,7 +128,7 @@ __bam_set_flags(dbp, flagsp)
if (LF_ISSET(DB_DUP | DB_DUPSORT)) {
/* DB_DUP/DB_DUPSORT is incompatible with DB_RECNUM. */
- if (F_ISSET(dbp, DB_BT_RECNUM))
+ if (F_ISSET(dbp, DB_AM_RECNUM))
goto incompat;
if (LF_ISSET(DB_DUPSORT)) {
@@ -145,12 +146,12 @@ __bam_set_flags(dbp, flagsp)
if (F_ISSET(dbp, DB_AM_DUP))
goto incompat;
- F_SET(dbp, DB_BT_RECNUM);
+ F_SET(dbp, DB_AM_RECNUM);
LF_CLR(DB_RECNUM);
}
if (LF_ISSET(DB_REVSPLITOFF)) {
- F_SET(dbp, DB_BT_REVSPLIT);
+ F_SET(dbp, DB_AM_REVSPLITOFF);
LF_CLR(DB_REVSPLITOFF);
}
@@ -279,12 +280,12 @@ __ram_set_flags(dbp, flagsp)
DB_ILLEGAL_METHOD(dbp, DB_OK_RECNO);
if (LF_ISSET(DB_RENUMBER)) {
- F_SET(dbp, DB_RE_RENUMBER);
+ F_SET(dbp, DB_AM_RENUMBER);
LF_CLR(DB_RENUMBER);
}
if (LF_ISSET(DB_SNAPSHOT)) {
- F_SET(dbp, DB_RE_SNAPSHOT);
+ F_SET(dbp, DB_AM_SNAPSHOT);
LF_CLR(DB_SNAPSHOT);
}
@@ -310,7 +311,7 @@ __ram_set_re_delim(dbp, re_delim)
t = dbp->bt_internal;
t->re_delim = re_delim;
- F_SET(dbp, DB_RE_DELIMITER);
+ F_SET(dbp, DB_AM_DELIMITER);
return (0);
}
@@ -336,7 +337,7 @@ __ram_set_re_len(dbp, re_len)
q = dbp->q_internal;
q->re_len = re_len;
- F_SET(dbp, DB_RE_FIXEDLEN);
+ F_SET(dbp, DB_AM_FIXEDLEN);
return (0);
}
@@ -362,7 +363,7 @@ __ram_set_re_pad(dbp, re_pad)
q = dbp->q_internal;
q->re_pad = re_pad;
- F_SET(dbp, DB_RE_PAD);
+ F_SET(dbp, DB_AM_PAD);
return (0);
}
diff --git a/bdb/btree/bt_open.c b/bdb/btree/bt_open.c
index 405c1880f5e..0b72391c267 100644
--- a/bdb/btree/bt_open.c
+++ b/bdb/btree/bt_open.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_open.c,v 11.42 2000/11/30 00:58:28 ubell Exp $";
+static const char revid[] = "$Id: bt_open.c,v 11.76 2002/09/04 19:06:42 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -54,33 +54,38 @@ static const char revid[] = "$Id: bt_open.c,v 11.42 2000/11/30 00:58:28 ubell Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "btree.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "log.h"
-#include "mp.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+
+static void __bam_init_meta __P((DB *, BTMETA *, db_pgno_t, DB_LSN *));
/*
* __bam_open --
* Open a btree.
*
- * PUBLIC: int __bam_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ * PUBLIC: int __bam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
*/
int
-__bam_open(dbp, name, base_pgno, flags)
+__bam_open(dbp, txn, name, base_pgno, flags)
DB *dbp;
+ DB_TXN *txn;
const char *name;
db_pgno_t base_pgno;
u_int32_t flags;
{
BTREE *t;
+ COMPQUIET(name, NULL);
t = dbp->bt_internal;
/* Initialize the remaining fields/methods of the DB. */
- dbp->del = __bam_delete;
dbp->key_range = __bam_key_range;
dbp->stat = __bam_stat;
@@ -99,8 +104,8 @@ __bam_open(dbp, name, base_pgno, flags)
* Verify that the bt_minkey value specified won't cause the
* calculation of ovflsize to underflow [#2406] for this pagesize.
*/
- if (B_MINKEY_TO_OVFLSIZE(t->bt_minkey, dbp->pgsize) >
- B_MINKEY_TO_OVFLSIZE(DEFMINKEYPAGE, dbp->pgsize)) {
+ if (B_MINKEY_TO_OVFLSIZE(dbp, t->bt_minkey, dbp->pgsize) >
+ B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
__db_err(dbp->dbenv,
"bt_minkey value of %lu too high for page size of %lu",
(u_long)t->bt_minkey, (u_long)dbp->pgsize);
@@ -108,7 +113,7 @@ __bam_open(dbp, name, base_pgno, flags)
}
/* Start up the tree. */
- return (__bam_read_root(dbp, name, base_pgno, flags));
+ return (__bam_read_root(dbp, txn, base_pgno, flags));
}
/*
@@ -143,6 +148,7 @@ __bam_metachk(dbp, name, btm)
name, (u_long)vers);
return (DB_OLD_VERSION);
case 8:
+ case 9:
break;
default:
__db_err(dbenv,
@@ -187,13 +193,13 @@ __bam_metachk(dbp, name, btm)
if (F_ISSET(&btm->dbmeta, BTM_RECNUM)) {
if (dbp->type != DB_BTREE)
goto wrong_type;
- F_SET(dbp, DB_BT_RECNUM);
+ F_SET(dbp, DB_AM_RECNUM);
if ((ret = __db_fcchk(dbenv,
- "DB->open", dbp->flags, DB_AM_DUP, DB_BT_RECNUM)) != 0)
+ "DB->open", dbp->flags, DB_AM_DUP, DB_AM_RECNUM)) != 0)
return (ret);
} else
- if (F_ISSET(dbp, DB_BT_RECNUM)) {
+ if (F_ISSET(dbp, DB_AM_RECNUM)) {
__db_err(dbenv,
"%s: DB_RECNUM specified to open method but not set in database",
name);
@@ -203,9 +209,9 @@ __bam_metachk(dbp, name, btm)
if (F_ISSET(&btm->dbmeta, BTM_FIXEDLEN)) {
if (dbp->type != DB_RECNO)
goto wrong_type;
- F_SET(dbp, DB_RE_FIXEDLEN);
+ F_SET(dbp, DB_AM_FIXEDLEN);
} else
- if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
__db_err(dbenv,
"%s: DB_FIXEDLEN specified to open method but not set in database",
name);
@@ -215,9 +221,9 @@ __bam_metachk(dbp, name, btm)
if (F_ISSET(&btm->dbmeta, BTM_RENUMBER)) {
if (dbp->type != DB_RECNO)
goto wrong_type;
- F_SET(dbp, DB_RE_RENUMBER);
+ F_SET(dbp, DB_AM_RENUMBER);
} else
- if (F_ISSET(dbp, DB_RE_RENUMBER)) {
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
__db_err(dbenv,
"%s: DB_RENUMBER specified to open method but not set in database",
name);
@@ -266,116 +272,129 @@ wrong_type:
/*
* __bam_read_root --
- * Check (and optionally create) a tree.
+ * Read the root page and check a tree.
*
- * PUBLIC: int __bam_read_root __P((DB *, const char *, db_pgno_t, u_int32_t));
+ * PUBLIC: int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
*/
int
-__bam_read_root(dbp, name, base_pgno, flags)
+__bam_read_root(dbp, txn, base_pgno, flags)
DB *dbp;
- const char *name;
+ DB_TXN *txn;
db_pgno_t base_pgno;
u_int32_t flags;
{
BTMETA *meta;
BTREE *t;
DBC *dbc;
- DB_LSN orig_lsn;
DB_LOCK metalock;
- PAGE *root;
- int locked, ret, t_ret;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
- ret = 0;
- t = dbp->bt_internal;
meta = NULL;
- root = NULL;
- locked = 0;
+ t = dbp->bt_internal;
+ LOCK_INIT(metalock);
+ mpf = dbp->mpf;
+ ret = 0;
- /*
- * Get a cursor. If DB_CREATE is specified, we may be creating
- * the root page, and to do that safely in CDB we need a write
- * cursor. In STD_LOCKING mode, we'll synchronize using the
- * meta page lock instead.
- */
- if ((ret = dbp->cursor(dbp, dbp->open_txn,
- &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbp->dbenv) ?
- DB_WRITECURSOR : 0)) != 0)
+ /* Get a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
return (ret);
- /* Get, and optionally create the metadata page. */
+ /* Get the metadata page. */
if ((ret =
__db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
goto err;
- if ((ret = memp_fget(
- dbp->mpf, &base_pgno, DB_MPOOL_CREATE, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &base_pgno, 0, (PAGE **)&meta)) != 0)
goto err;
/*
- * If the magic number is correct, we're not creating the tree.
- * Correct any fields that may not be right. Note, all of the
- * local flags were set by DB->open.
+ * If the magic number is set, the tree has been created. Correct
+ * any fields that may not be right. Note, all of the local flags
+ * were set by DB->open.
+ *
+ * Otherwise, we'd better be in recovery or abort, in which case the
+ * metadata page will be created/initialized elsewhere.
*/
-again: if (meta->dbmeta.magic != 0) {
- t->bt_maxkey = meta->maxkey;
- t->bt_minkey = meta->minkey;
- t->re_pad = meta->re_pad;
- t->re_len = meta->re_len;
-
- t->bt_meta = base_pgno;
- t->bt_root = meta->root;
-
- (void)memp_fput(dbp->mpf, meta, 0);
- meta = NULL;
- goto done;
- }
+ DB_ASSERT(meta->dbmeta.magic != 0 ||
+ IS_RECOVERING(dbp->dbenv) || F_ISSET(dbp, DB_AM_RECOVER));
- /* In recovery if it's not there it will be created elsewhere.*/
- if (IS_RECOVERING(dbp->dbenv))
- goto done;
-
- /* If we're doing CDB; we now have to get the write lock. */
- if (CDB_LOCKING(dbp->dbenv)) {
- /*
- * We'd better have DB_CREATE set if we're actually doing
- * the create.
- */
- DB_ASSERT(LF_ISSET(DB_CREATE));
- if ((ret = lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
- &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
- goto err;
- }
+ t->bt_maxkey = meta->maxkey;
+ t->bt_minkey = meta->minkey;
+ t->re_pad = meta->re_pad;
+ t->re_len = meta->re_len;
+
+ t->bt_meta = base_pgno;
+ t->bt_root = meta->root;
/*
- * If we are doing locking, relase the read lock and get a write lock.
- * We want to avoid deadlock.
+ * !!!
+ * If creating a subdatabase, we've already done an insert when
+ * we put the subdatabase's entry into the master database, so
+ * our last-page-inserted value is wrongly initialized for the
+ * master database, not the subdatabase we're creating. I'm not
+ * sure where the *right* place to clear this value is, it's not
+ * intuitively obvious that it belongs here.
*/
- if (locked == 0 && STD_LOCKING(dbc)) {
- if ((ret = __LPUT(dbc, metalock)) != 0)
- goto err;
- if ((ret = __db_lget(dbc,
- 0, base_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
- goto err;
- locked = 1;
- goto again;
- }
+ t->bt_lpgno = PGNO_INVALID;
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!LF_ISSET(DB_RDONLY) && dbp->meta_pgno == PGNO_BASE_MD) {
+ mpf->last_pgno(mpf, &meta->dbmeta.last_pgno);
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ } else
+ ret = mpf->put(mpf, meta, 0);
+ meta = NULL;
+
+err: /* Put the metadata page back. */
+ if (meta != NULL && (t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __bam_init_meta --
+ *
+ * Initialize a btree meta-data page. The following fields may need
+ * to be updated later: last_pgno, root.
+ */
+static void
+__bam_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ BTMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ BTREE *t;
- /* Initialize the tree structure metadata information. */
- orig_lsn = meta->dbmeta.lsn;
memset(meta, 0, sizeof(BTMETA));
- meta->dbmeta.lsn = orig_lsn;
- meta->dbmeta.pgno = base_pgno;
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
meta->dbmeta.magic = DB_BTREEMAGIC;
meta->dbmeta.version = DB_BTREEVERSION;
meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
meta->dbmeta.type = P_BTREEMETA;
meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
if (F_ISSET(dbp, DB_AM_DUP))
F_SET(&meta->dbmeta, BTM_DUP);
- if (F_ISSET(dbp, DB_RE_FIXEDLEN))
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
F_SET(&meta->dbmeta, BTM_FIXEDLEN);
- if (F_ISSET(dbp, DB_BT_RECNUM))
+ if (F_ISSET(dbp, DB_AM_RECNUM))
F_SET(&meta->dbmeta, BTM_RECNUM);
- if (F_ISSET(dbp, DB_RE_RENUMBER))
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
F_SET(&meta->dbmeta, BTM_RENUMBER);
if (F_ISSET(dbp, DB_AM_SUBDB))
F_SET(&meta->dbmeta, BTM_SUBDB);
@@ -385,14 +404,165 @@ again: if (meta->dbmeta.magic != 0) {
F_SET(&meta->dbmeta, BTM_RECNO);
memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+ t = dbp->bt_internal;
meta->maxkey = t->bt_maxkey;
meta->minkey = t->bt_minkey;
meta->re_len = t->re_len;
meta->re_pad = t->re_pad;
+}
- /* If necessary, log the meta-data and root page creates. */
- if ((ret = __db_log_page(dbp,
- name, &orig_lsn, base_pgno, (PAGE *)meta)) != 0)
+/*
+ * __bam_new_file --
+ * Create the necessary pages to begin a new database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__bam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ BTMETA *meta;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ PAGE *root;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ root = NULL;
+ meta = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (BTMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ __bam_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->root = 1;
+ meta->dbmeta.last_pgno = 1;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now build root page. */
+ if (name == NULL) {
+ pgno = 1;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &root)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, dbp->pgsize, 0);
+#endif
+ root = (PAGE *)buf;
+ }
+
+ P_INIT(root, dbp->pgsize, 1, PGNO_INVALID, PGNO_INVALID,
+ LEAFLEVEL, dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE);
+ LSN_NOT_LOGGED(root->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, root, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, root->pgno, root, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn,
+ name, DB_APP_DATA, fhp, dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ root = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (root != NULL)
+ (void)mpf->put(mpf, root, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __bam_new_subdb --
+ * Create a metadata page and a root page for a new btree.
+ *
+ * PUBLIC: int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__bam_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ BTMETA *meta;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *root;
+ int ret, t_ret;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ root = NULL;
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get, and optionally create the metadata page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Build meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ __bam_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
goto err;
/* Create and initialize a root page. */
@@ -401,68 +571,35 @@ again: if (meta->dbmeta.magic != 0) {
goto err;
root->level = LEAFLEVEL;
- if (dbp->open_txn != NULL && (ret = __bam_root_log(dbp->dbenv,
- dbp->open_txn, &meta->dbmeta.lsn, 0, dbp->log_fileid,
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __bam_root_log(mdbp, txn, &meta->dbmeta.lsn, 0,
meta->dbmeta.pgno, root->pgno, &meta->dbmeta.lsn)) != 0)
goto err;
meta->root = root->pgno;
-
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name);
- if ((ret = __db_log_page(dbp,
- name, &root->lsn, root->pgno, root)) != 0)
+ if ((ret =
+ __db_log_page(mdbp, txn, &root->lsn, root->pgno, root)) != 0)
goto err;
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
-
- t->bt_meta = base_pgno;
- t->bt_root = root->pgno;
/* Release the metadata and root pages. */
- if ((ret = memp_fput(dbp->mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
goto err;
meta = NULL;
- if ((ret = memp_fput(dbp->mpf, root, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, root, DB_MPOOL_DIRTY)) != 0)
goto err;
root = NULL;
-
- /*
- * Flush the metadata and root pages to disk.
- *
- * !!!
- * It's not useful to return not-yet-flushed here -- convert it to
- * an error.
- */
- if ((ret = memp_fsync(dbp->mpf)) == DB_INCOMPLETE) {
- __db_err(dbp->dbenv, "Metapage flush failed");
- ret = EINVAL;
- }
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
-
-done: /*
- * !!!
- * We already did an insert and so the last-page-inserted has been
- * set. I'm not sure where the *right* place to clear this value
- * is, it's not intuitively obvious that it belongs here.
- */
- t->bt_lpgno = PGNO_INVALID;
-
err:
-DB_TEST_RECOVERY_LABEL
- /* Put any remaining pages back. */
if (meta != NULL)
- if ((t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 &&
- ret == 0)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
ret = t_ret;
if (root != NULL)
- if ((t_ret = memp_fput(dbp->mpf, root, 0)) != 0 &&
- ret == 0)
+ if ((t_ret = mpf->put(mpf, root, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
-
- /* We can release the metapage lock when we are done. */
- if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
- ret = t_ret;
-
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
return (ret);
}
diff --git a/bdb/btree/bt_put.c b/bdb/btree/bt_put.c
index 19a04526d1b..39bd2024e76 100644
--- a/bdb/btree/bt_put.c
+++ b/bdb/btree/bt_put.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_put.c,v 11.46 2001/01/17 18:48:46 bostic Exp $";
+static const char revid[] = "$Id: bt_put.c,v 11.69 2002/08/06 06:11:12 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,12 +53,16 @@ static const char revid[] = "$Id: bt_put.c,v 11.46 2001/01/17 18:48:46 bostic Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+static int __bam_build
+ __P((DBC *, u_int32_t, DBT *, PAGE *, u_int32_t, u_int32_t));
static int __bam_dup_convert __P((DBC *, PAGE *, u_int32_t));
static int __bam_ovput
__P((DBC *, u_int32_t, db_pgno_t, PAGE *, u_int32_t, DBT *));
+static u_int32_t
+ __bam_partsize __P((DB *, u_int32_t, DBT *, PAGE *, u_int32_t));
/*
* __bam_iitem --
@@ -77,6 +81,7 @@ __bam_iitem(dbc, key, data, op, flags)
BTREE_CURSOR *cp;
DB *dbp;
DBT bk_hdr, tdbt;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_indx_t indx;
u_int32_t data_size, have_bytes, need_bytes, needed;
@@ -85,6 +90,7 @@ __bam_iitem(dbc, key, data, op, flags)
COMPQUIET(bk, NULL);
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
t = dbp->bt_internal;
h = cp->page;
@@ -95,7 +101,7 @@ __bam_iitem(dbc, key, data, op, flags)
* Fixed-length records with partial puts: it's an error to specify
* anything other simple overwrite.
*/
- if (F_ISSET(dbp, DB_RE_FIXEDLEN) &&
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
F_ISSET(data, DB_DBT_PARTIAL) && data->dlen != data->size) {
data_size = data->size;
goto len_err;
@@ -110,16 +116,18 @@ __bam_iitem(dbc, key, data, op, flags)
* the fixed-length record size.
*/
data_size = F_ISSET(data, DB_DBT_PARTIAL) ?
- __bam_partsize(op, data, h, indx) : data->size;
+ __bam_partsize(dbp, op, data, h, indx) : data->size;
padrec = 0;
- if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
if (data_size > t->re_len) {
len_err: __db_err(dbp->dbenv,
"Length improper for fixed length record %lu",
(u_long)data_size);
return (EINVAL);
}
- if (data_size < t->re_len) {
+
+ /* Records that are deleted anyway needn't be padded out. */
+ if (!LF_ISSET(BI_DELETED) && data_size < t->re_len) {
padrec = 1;
data_size = t->re_len;
}
@@ -146,8 +154,8 @@ len_err: __db_err(dbp->dbenv,
*/
if (op == DB_CURRENT && dbp->dup_compare != NULL) {
if ((ret = __bam_cmp(dbp, data, h,
- indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
- dbp->dup_compare, &cmp)) != 0)
+ indx + (TYPE(h) == P_LBTREE ? O_INDX : 0),
+ dbp->dup_compare, &cmp)) != 0)
return (ret);
if (cmp != 0) {
__db_err(dbp->dbenv,
@@ -190,7 +198,7 @@ len_err: __db_err(dbp->dbenv,
*/
bigkey = 0;
if (op == DB_CURRENT) {
- bk = GET_BKEYDATA(h,
+ bk = GET_BKEYDATA(dbp, h,
indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
if (B_TYPE(bk->type) == B_KEYDATA)
have_bytes = BKEYDATA_PSIZE(bk->len);
@@ -221,7 +229,7 @@ len_err: __db_err(dbp->dbenv,
* The t->bt_maxkey test here may be insufficient -- do we have to
* check in the btree split code, so we don't undo it there!?!?
*/
- if (P_FREESPACE(h) < needed ||
+ if (P_FREESPACE(dbp, h) < needed ||
(t->bt_maxkey != 0 && NUM_ENT(h) > t->bt_maxkey))
return (DB_NEEDSPLIT);
@@ -328,6 +336,11 @@ len_err: __db_err(dbp->dbenv,
/* Add the data. */
if (bigdata) {
+ /*
+ * We do not have to handle deleted (BI_DELETED) records
+ * in this case; the actual records should never be created.
+ */
+ DB_ASSERT(!LF_ISSET(BI_DELETED));
if ((ret = __bam_ovput(dbc,
B_OVERFLOW, PGNO_INVALID, h, indx, data)) != 0)
return (ret);
@@ -347,7 +360,7 @@ len_err: __db_err(dbp->dbenv,
if (ret != 0)
return (ret);
}
- if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
/*
@@ -375,7 +388,7 @@ len_err: __db_err(dbp->dbenv,
* up at least 25% of the space on the page. If it does, move it onto
* its own page.
*/
- if (dupadjust && P_FREESPACE(h) <= dbp->pgsize / 2) {
+ if (dupadjust && P_FREESPACE(dbp, h) <= dbp->pgsize / 2) {
if ((ret = __bam_dup_convert(dbc, h, indx - O_INDX)) != 0)
return (ret);
}
@@ -390,11 +403,10 @@ len_err: __db_err(dbp->dbenv,
/*
* __bam_partsize --
* Figure out how much space a partial data item is in total.
- *
- * PUBLIC: u_int32_t __bam_partsize __P((u_int32_t, DBT *, PAGE *, u_int32_t));
*/
-u_int32_t
-__bam_partsize(op, data, h, indx)
+static u_int32_t
+__bam_partsize(dbp, op, data, h, indx)
+ DB *dbp;
u_int32_t op, indx;
DBT *data;
PAGE *h;
@@ -413,38 +425,18 @@ __bam_partsize(op, data, h, indx)
* Otherwise, it's the data provided plus any already existing data
* that we're not replacing.
*/
- bk = GET_BKEYDATA(h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
nbytes =
B_TYPE(bk->type) == B_OVERFLOW ? ((BOVERFLOW *)bk)->tlen : bk->len;
- /*
- * There are really two cases here:
- *
- * Case 1: We are replacing some bytes that do not exist (i.e., they
- * are past the end of the record). In this case the number of bytes
- * we are replacing is irrelevant and all we care about is how many
- * bytes we are going to add from offset. So, the new record length
- * is going to be the size of the new bytes (size) plus wherever those
- * new bytes begin (doff).
- *
- * Case 2: All the bytes we are replacing exist. Therefore, the new
- * size is the oldsize (nbytes) minus the bytes we are replacing (dlen)
- * plus the bytes we are adding (size).
- */
- if (nbytes < data->doff + data->dlen) /* Case 1 */
- return (data->doff + data->size);
-
- return (nbytes + data->size - data->dlen); /* Case 2 */
+ return (__db_partsize(nbytes, data));
}
/*
* __bam_build --
* Build the real record for a partial put, or short fixed-length record.
- *
- * PUBLIC: int __bam_build __P((DBC *, u_int32_t,
- * PUBLIC: DBT *, PAGE *, u_int32_t, u_int32_t));
*/
-int
+static int
__bam_build(dbc, op, dbt, h, indx, nbytes)
DBC *dbc;
u_int32_t op, indx, nbytes;
@@ -454,9 +446,8 @@ __bam_build(dbc, op, dbt, h, indx, nbytes)
BKEYDATA *bk, tbk;
BOVERFLOW *bo;
BTREE *t;
- BTREE_CURSOR *cp;
DB *dbp;
- DBT copy;
+ DBT copy, *rdata;
u_int32_t len, tlen;
u_int8_t *p;
int ret;
@@ -464,26 +455,26 @@ __bam_build(dbc, op, dbt, h, indx, nbytes)
COMPQUIET(bo, NULL);
dbp = dbc->dbp;
- cp = (BTREE_CURSOR *) dbc->internal;
t = dbp->bt_internal;
/* We use the record data return memory, it's only a short-term use. */
- if (dbc->rdata.ulen < nbytes) {
+ rdata = &dbc->my_rdata;
+ if (rdata->ulen < nbytes) {
if ((ret = __os_realloc(dbp->dbenv,
- nbytes, NULL, &dbc->rdata.data)) != 0) {
- dbc->rdata.ulen = 0;
- dbc->rdata.data = NULL;
+ nbytes, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
return (ret);
}
- dbc->rdata.ulen = nbytes;
+ rdata->ulen = nbytes;
}
/*
* We use nul or pad bytes for any part of the record that isn't
* specified; get it over with.
*/
- memset(dbc->rdata.data,
- F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_pad : 0, nbytes);
+ memset(rdata->data,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_pad : 0, nbytes);
/*
* In the next clauses, we need to do three things: a) set p to point
@@ -495,14 +486,15 @@ __bam_build(dbc, op, dbt, h, indx, nbytes)
* the chase.
*/
if (!F_ISSET(dbt, DB_DBT_PARTIAL) || op != DB_CURRENT) {
- p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
tlen = dbt->doff;
goto user_copy;
}
/* Find the current record. */
if (indx < NUM_ENT(h)) {
- bk = GET_BKEYDATA(h, indx + (TYPE(h) == P_LBTREE ? O_INDX : 0));
+ bk = GET_BKEYDATA(dbp, h, indx + (TYPE(h) == P_LBTREE ?
+ O_INDX : 0));
bo = (BOVERFLOW *)bk;
} else {
bk = &tbk;
@@ -516,12 +508,12 @@ __bam_build(dbc, op, dbt, h, indx, nbytes)
*/
memset(&copy, 0, sizeof(copy));
if ((ret = __db_goff(dbp, &copy, bo->tlen,
- bo->pgno, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ bo->pgno, &rdata->data, &rdata->ulen)) != 0)
return (ret);
/* Skip any leading data from the original record. */
tlen = dbt->doff;
- p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
/*
* Copy in any trailing data from the original record.
@@ -542,10 +534,10 @@ __bam_build(dbc, op, dbt, h, indx, nbytes)
}
} else {
/* Copy in any leading data from the original record. */
- memcpy(dbc->rdata.data,
+ memcpy(rdata->data,
bk->data, dbt->doff > bk->len ? bk->len : dbt->doff);
tlen = dbt->doff;
- p = (u_int8_t *)dbc->rdata.data + dbt->doff;
+ p = (u_int8_t *)rdata->data + dbt->doff;
/* Copy in any trailing data from the original record. */
len = dbt->doff + dbt->dlen;
@@ -564,11 +556,11 @@ user_copy:
tlen += dbt->size;
/* Set the DBT to reference our new record. */
- dbc->rdata.size = F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_len : tlen;
- dbc->rdata.dlen = 0;
- dbc->rdata.doff = 0;
- dbc->rdata.flags = 0;
- *dbt = dbc->rdata;
+ rdata->size = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : tlen;
+ rdata->dlen = 0;
+ rdata->doff = 0;
+ rdata->flags = 0;
+ *dbt = *rdata;
return (0);
}
@@ -591,6 +583,7 @@ __bam_ritem(dbc, h, indx, data)
db_indx_t cnt, lo, ln, min, off, prefix, suffix;
int32_t nbytes;
int ret;
+ db_indx_t *inp;
u_int8_t *p, *t;
dbp = dbc->dbp;
@@ -600,10 +593,10 @@ __bam_ritem(dbc, h, indx, data)
* to insert and whether it fits is handled in the caller. All we do
* here is manage the page shuffling.
*/
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
/* Log the change. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
/*
* We might as well check to see if the two data items share
* a common prefix and suffix -- it can save us a lot of log
@@ -627,17 +620,18 @@ __bam_ritem(dbc, h, indx, data)
orig.size = bk->len - (prefix + suffix);
repl.data = (u_int8_t *)data->data + prefix;
repl.size = data->size - (prefix + suffix);
- if ((ret = __bam_repl_log(dbp->dbenv, dbc->txn,
- &LSN(h), 0, dbp->log_fileid, PGNO(h), &LSN(h),
- (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
+ if ((ret = __bam_repl_log(dbp, dbc->txn, &LSN(h), 0, PGNO(h),
+ &LSN(h), (u_int32_t)indx, (u_int32_t)B_DISSET(bk->type),
&orig, &repl, (u_int32_t)prefix, (u_int32_t)suffix)) != 0)
return (ret);
- }
+ } else
+ LSN_NOT_LOGGED(LSN(h));
/*
* Set references to the first in-use byte on the page and the
* first byte of the item being replaced.
*/
+ inp = P_INP(dbp, h);
p = (u_int8_t *)h + HOFFSET(h);
t = (u_int8_t *)bk;
@@ -648,19 +642,19 @@ __bam_ritem(dbc, h, indx, data)
* the regions overlap.
*/
lo = BKEYDATA_SIZE(bk->len);
- ln = BKEYDATA_SIZE(data->size);
+ ln = (db_indx_t)BKEYDATA_SIZE(data->size);
if (lo != ln) {
nbytes = lo - ln; /* Signed difference. */
if (p == t) /* First index is fast. */
- h->inp[indx] += nbytes;
+ inp[indx] += nbytes;
else { /* Else, shift the page. */
memmove(p + nbytes, p, t - p);
/* Adjust the indices' offsets. */
- off = h->inp[indx];
+ off = inp[indx];
for (cnt = 0; cnt < NUM_ENT(h); ++cnt)
- if (h->inp[cnt] <= off)
- h->inp[cnt] += nbytes;
+ if (inp[cnt] <= off)
+ inp[cnt] += nbytes;
}
/* Clean up the page and adjust the item's reference. */
@@ -688,30 +682,31 @@ __bam_dup_convert(dbc, h, indx)
PAGE *h;
u_int32_t indx;
{
- BTREE_CURSOR *cp;
BKEYDATA *bk;
DB *dbp;
DBT hdr;
+ DB_MPOOLFILE *mpf;
PAGE *dp;
- db_indx_t cnt, cpindx, dindx, first, sz;
+ db_indx_t cnt, cpindx, dindx, first, *inp, sz;
int ret;
dbp = dbc->dbp;
- cp = (BTREE_CURSOR *)dbc->internal;
+ mpf = dbp->mpf;
+ inp = P_INP(dbp, h);
/*
* Count the duplicate records and calculate how much room they're
* using on the page.
*/
- while (indx > 0 && h->inp[indx] == h->inp[indx - P_INDX])
+ while (indx > 0 && inp[indx] == inp[indx - P_INDX])
indx -= P_INDX;
for (cnt = 0, sz = 0, first = indx;; ++cnt, indx += P_INDX) {
- if (indx >= NUM_ENT(h) || h->inp[first] != h->inp[indx])
+ if (indx >= NUM_ENT(h) || inp[first] != inp[indx])
break;
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
sz += B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
- bk = GET_BKEYDATA(h, indx + O_INDX);
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
sz += B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_PSIZE(bk->len) : BOVERFLOW_PSIZE;
}
@@ -766,7 +761,7 @@ __bam_dup_convert(dbc, h, indx)
* deleted entries are discarded (if the deleted entry is
* overflow, then free up those pages).
*/
- bk = GET_BKEYDATA(h, dindx + 1);
+ bk = GET_BKEYDATA(dbp, h, dindx + 1);
hdr.data = bk;
hdr.size = B_TYPE(bk->type) == B_KEYDATA ?
BKEYDATA_SIZE(bk->len) : BOVERFLOW_SIZE;
@@ -778,7 +773,7 @@ __bam_dup_convert(dbc, h, indx)
*/
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_doff(dbc,
- (GET_BOVERFLOW(h, dindx + 1))->pgno)) != 0)
+ (GET_BOVERFLOW(dbp, h, dindx + 1))->pgno)) != 0)
goto err;
} else {
if ((ret = __db_pitem(
@@ -802,7 +797,7 @@ __bam_dup_convert(dbc, h, indx)
/* Put in a new data item that points to the duplicates page. */
if ((ret = __bam_ovput(dbc,
- B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
+ B_DUPLICATE, dp->pgno, h, first + 1, NULL)) != 0)
goto err;
/* Adjust cursors for all the above movments. */
@@ -810,9 +805,9 @@ __bam_dup_convert(dbc, h, indx)
PGNO(h), first + P_INDX, first + P_INDX - indx)) != 0)
goto err;
- return (memp_fput(dbp->mpf, dp, DB_MPOOL_DIRTY));
+ return (mpf->put(mpf, dp, DB_MPOOL_DIRTY));
-err: (void)__db_free(dbc, dp);
+err: (void)mpf->put(mpf, dp, 0);
return (ret);
}
diff --git a/bdb/btree/bt_rec.c b/bdb/btree/bt_rec.c
index 24dc9bc6a6e..b6443547aa5 100644
--- a/bdb/btree/bt_rec.c
+++ b/bdb/btree/bt_rec.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_rec.c,v 11.35 2001/01/10 16:24:47 ubell Exp $";
+static const char revid[] = "$Id: bt_rec.c,v 11.57 2002/08/06 16:53:53 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,287 +18,17 @@ static const char revid[] = "$Id: bt_rec.c,v 11.35 2001/01/10 16:24:47 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "hash.h"
-#include "btree.h"
-#include "log.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
#define IS_BTREE_PAGE(pagep) \
(TYPE(pagep) == P_IBTREE || \
TYPE(pagep) == P_LBTREE || TYPE(pagep) == P_LDUP)
/*
- * __bam_pg_alloc_recover --
- * Recovery function for pg_alloc.
- *
- * PUBLIC: int __bam_pg_alloc_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__bam_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __bam_pg_alloc_args *argp;
- DB *file_dbp;
- DBC *dbc;
- DBMETA *meta;
- DB_MPOOLFILE *mpf;
- PAGE *pagep;
- db_pgno_t pgno;
- int cmp_n, cmp_p, level, modified, ret;
-
- REC_PRINT(__bam_pg_alloc_print);
- REC_INTRO(__bam_pg_alloc_read, 0);
-
- /*
- * Fix up the allocated page. If we're redoing the operation, we have
- * to get the page (creating it if it doesn't exist), and update its
- * LSN. If we're undoing the operation, we have to reset the page's
- * LSN and put it on the free list.
- *
- * Fix up the metadata page. If we're redoing the operation, we have
- * to get the metadata page and update its LSN and its free pointer.
- * If we're undoing the operation and the page was ever created, we put
- * it on the freelist.
- */
- pgno = PGNO_BASE_MD;
- meta = NULL;
- if ((ret = memp_fget(mpf, &pgno, 0, &meta)) != 0) {
- /* The metadata page must always exist on redo. */
- if (DB_REDO(op)) {
- (void)__db_pgerr(file_dbp, pgno);
- goto out;
- } else
- goto done;
- }
- if ((ret = memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
- /*
- * We specify creation and check for it later, because this
- * operation was supposed to create the page, and even in
- * the undo case it's going to get linked onto the freelist
- * which we're also fixing up.
- */
- (void)__db_pgerr(file_dbp, argp->pgno);
- goto err;
- }
-
- /* Fix up the allocated page. */
- modified = 0;
- cmp_n = log_compare(lsnp, &LSN(pagep));
- cmp_p = log_compare(&LSN(pagep), &argp->page_lsn);
-
- /*
- * If an inital allocation is aborted and then reallocated
- * during an archival restore the log record will have
- * an LSN for the page but the page will be empty.
- */
- if (IS_ZERO_LSN(LSN(pagep)))
- cmp_p = 0;
- CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
- /*
- * If we we rolled back this allocation previously during an
- * archive restore, the page may have the LSN of the meta page
- * at the point of the roll back. This will be no more
- * than the LSN of the metadata page at the time of this allocation.
- */
- if (DB_REDO(op) &&
- (cmp_p == 0 ||
- (IS_ZERO_LSN(argp->page_lsn) &&
- log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
- /* Need to redo update described. */
- switch (argp->ptype) {
- case P_LBTREE:
- case P_LRECNO:
- case P_LDUP:
- level = LEAFLEVEL;
- break;
- default:
- level = 0;
- break;
- }
- P_INIT(pagep, file_dbp->pgsize,
- argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype);
-
- pagep->lsn = *lsnp;
- modified = 1;
- } else if (cmp_n == 0 && DB_UNDO(op)) {
- /*
- * Undo the allocation, reinitialize the page and
- * link its next pointer to the free list.
- */
- P_INIT(pagep, file_dbp->pgsize,
- argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
-
- pagep->lsn = argp->page_lsn;
- modified = 1;
- }
-
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0) {
- goto err;
- }
-
- /*
- * If the page was newly created, put it on the limbo list.
- */
- if (IS_ZERO_LSN(LSN(pagep)) &&
- IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) {
- /* Put the page in limbo.*/
- if ((ret = __db_add_limbo(dbenv,
- info, argp->fileid, argp->pgno, 1)) != 0)
- goto err;
- }
-
- /* Fix up the metadata page. */
- modified = 0;
- cmp_n = log_compare(lsnp, &LSN(meta));
- cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
- CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
- if (cmp_p == 0 && DB_REDO(op)) {
- /* Need to redo update described. */
- LSN(meta) = *lsnp;
- meta->free = argp->next;
- modified = 1;
- } else if (cmp_n == 0 && DB_UNDO(op)) {
- /* Need to undo update described. */
- LSN(meta) = argp->meta_lsn;
-
- /*
- * If the page has a zero LSN then its newly created
- * and will go into limbo rather than directly on the
- * free list.
- */
- if (!IS_ZERO_LSN(argp->page_lsn))
- meta->free = argp->pgno;
- modified = 1;
- }
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
- goto out;
- /*
- * This could be the metapage from a subdb which is read from disk
- * to recover its creation.
- */
- if (F_ISSET(file_dbp, DB_AM_SUBDB))
- switch (argp->type) {
- case P_BTREEMETA:
- case P_HASHMETA:
- case P_QAMMETA:
- file_dbp->sync(file_dbp, 0);
- break;
- }
-
-done: *lsnp = argp->prev_lsn;
- ret = 0;
-
- if (0) {
-err:
- if (meta != NULL)
- (void)memp_fput(mpf, meta, 0);
- }
-out: REC_CLOSE;
-}
-
-/*
- * __bam_pg_free_recover --
- * Recovery function for pg_free.
- *
- * PUBLIC: int __bam_pg_free_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__bam_pg_free_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __bam_pg_free_args *argp;
- DB *file_dbp;
- DBC *dbc;
- DBMETA *meta;
- DB_LSN copy_lsn;
- DB_MPOOLFILE *mpf;
- PAGE *pagep;
- db_pgno_t pgno;
- int cmp_n, cmp_p, modified, ret;
-
- COMPQUIET(info, NULL);
- REC_PRINT(__bam_pg_free_print);
- REC_INTRO(__bam_pg_free_read, 1);
-
- /*
- * Fix up the freed page. If we're redoing the operation we get the
- * page and explicitly discard its contents, then update its LSN. If
- * we're undoing the operation, we get the page and restore its header.
- * Create the page if necessary, we may be freeing an aborted
- * create.
- */
- if ((ret = memp_fget(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
- goto out;
- modified = 0;
- __ua_memcpy(&copy_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
- cmp_n = log_compare(lsnp, &LSN(pagep));
- cmp_p = log_compare(&LSN(pagep), &copy_lsn);
- CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
- if (DB_REDO(op) &&
- (cmp_p == 0 ||
- (IS_ZERO_LSN(copy_lsn) &&
- log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
- /* Need to redo update described. */
- P_INIT(pagep, file_dbp->pgsize,
- argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
- pagep->lsn = *lsnp;
-
- modified = 1;
- } else if (cmp_n == 0 && DB_UNDO(op)) {
- /* Need to undo update described. */
- memcpy(pagep, argp->header.data, argp->header.size);
-
- modified = 1;
- }
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
- goto out;
-
- /*
- * Fix up the metadata page. If we're redoing or undoing the operation
- * we get the page and update its LSN and free pointer.
- */
- pgno = PGNO_BASE_MD;
- if ((ret = memp_fget(mpf, &pgno, 0, &meta)) != 0) {
- /* The metadata page must always exist. */
- (void)__db_pgerr(file_dbp, pgno);
- goto out;
- }
-
- modified = 0;
- cmp_n = log_compare(lsnp, &LSN(meta));
- cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
- CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
- if (cmp_p == 0 && DB_REDO(op)) {
- /* Need to redo the deallocation. */
- meta->free = argp->pgno;
- LSN(meta) = *lsnp;
- modified = 1;
- } else if (cmp_n == 0 && DB_UNDO(op)) {
- /* Need to undo the deallocation. */
- meta->free = argp->next;
- LSN(meta) = argp->meta_lsn;
- modified = 1;
- }
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
- goto out;
-
-done: *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: REC_CLOSE;
-}
-
-/*
* __bam_split_recover --
* Recovery function for split.
*
@@ -320,7 +50,7 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
PAGE *_lp, *lp, *np, *pp, *_rp, *rp, *sp;
db_pgno_t pgno, root_pgno;
u_int32_t ptype;
- int cmp, l_update, p_update, r_update, rc, ret, rootsplit, t_ret;
+ int cmp, l_update, p_update, r_update, rc, ret, ret_l, rootsplit, t_ret;
COMPQUIET(info, NULL);
REC_PRINT(__bam_split_print);
@@ -345,16 +75,16 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
* so it's got to be aligned. Copying it into allocated memory is
* the only way to guarantee this.
*/
- if ((ret = __os_malloc(dbenv, argp->pg.size, NULL, &sp)) != 0)
+ if ((ret = __os_malloc(dbenv, argp->pg.size, &sp)) != 0)
goto out;
memcpy(sp, argp->pg.data, argp->pg.size);
pgno = PGNO(sp);
root_pgno = argp->root_pgno;
- rootsplit = pgno == root_pgno;
- if (memp_fget(mpf, &argp->left, 0, &lp) != 0)
+ rootsplit = root_pgno != PGNO_INVALID;
+ if ((ret_l = mpf->get(mpf, &argp->left, 0, &lp)) != 0)
lp = NULL;
- if (memp_fget(mpf, &argp->right, 0, &rp) != 0)
+ if (mpf->get(mpf, &argp->right, 0, &rp) != 0)
rp = NULL;
if (DB_REDO(op)) {
@@ -368,8 +98,8 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
* same reason.
*/
if (rootsplit) {
- if ((ret = memp_fget(mpf, &pgno, 0, &pp)) != 0) {
- (void)__db_pgerr(file_dbp, pgno);
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
+ __db_pgerr(file_dbp, pgno, ret);
pp = NULL;
goto out;
}
@@ -377,7 +107,7 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
CHECK_LSN(op, cmp, &LSN(pp), &LSN(argp->pg.data));
p_update = cmp == 0;
} else if (lp == NULL) {
- (void)__db_pgerr(file_dbp, argp->left);
+ __db_pgerr(file_dbp, argp->left, ret_l);
goto out;
}
@@ -400,10 +130,8 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
goto check_next;
/* Allocate and initialize new left/right child pages. */
- if ((ret =
- __os_malloc(dbenv, file_dbp->pgsize, NULL, &_lp)) != 0
- || (ret =
- __os_malloc(dbenv, file_dbp->pgsize, NULL, &_rp)) != 0)
+ if ((ret = __os_malloc(dbenv, file_dbp->pgsize, &_lp)) != 0 ||
+ (ret = __os_malloc(dbenv, file_dbp->pgsize, &_rp)) != 0)
goto out;
if (rootsplit) {
P_INIT(_lp, file_dbp->pgsize, argp->left,
@@ -431,31 +159,31 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
goto out;
/* If the left child is wrong, update it. */
- if (lp == NULL && (ret =
- memp_fget(mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) {
- (void)__db_pgerr(file_dbp, argp->left);
+ if (lp == NULL && (ret = mpf->get(
+ mpf, &argp->left, DB_MPOOL_CREATE, &lp)) != 0) {
+ __db_pgerr(file_dbp, argp->left, ret);
lp = NULL;
goto out;
}
if (l_update) {
memcpy(lp, _lp, file_dbp->pgsize);
lp->lsn = *lsnp;
- if ((ret = memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
goto out;
lp = NULL;
}
/* If the right child is wrong, update it. */
- if (rp == NULL && (ret = memp_fget(mpf,
- &argp->right, DB_MPOOL_CREATE, &rp)) != 0) {
- (void)__db_pgerr(file_dbp, argp->right);
+ if (rp == NULL && (ret = mpf->get(
+ mpf, &argp->right, DB_MPOOL_CREATE, &rp)) != 0) {
+ __db_pgerr(file_dbp, argp->right, ret);
rp = NULL;
goto out;
}
if (r_update) {
memcpy(rp, _rp, file_dbp->pgsize);
rp->lsn = *lsnp;
- if ((ret = memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
goto out;
rp = NULL;
}
@@ -477,11 +205,11 @@ __bam_split_recover(dbenv, dbtp, lsnp, op, info)
P_INIT(pp, file_dbp->pgsize, root_pgno,
PGNO_INVALID, PGNO_INVALID, _lp->level + 1, ptype);
- RE_NREC_SET(pp,
- rc ? __bam_total(_lp) + __bam_total(_rp) : 0);
+ RE_NREC_SET(pp, rc ? __bam_total(file_dbp, _lp) +
+ __bam_total(file_dbp, _rp) : 0);
pp->lsn = *lsnp;
- if ((ret = memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
goto out;
pp = NULL;
}
@@ -494,8 +222,8 @@ check_next: /*
* page must exist because we're redoing the operation.
*/
if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
- if ((ret = memp_fget(mpf, &argp->npgno, 0, &np)) != 0) {
- (void)__db_pgerr(file_dbp, argp->npgno);
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
+ __db_pgerr(file_dbp, argp->npgno, ret);
np = NULL;
goto out;
}
@@ -505,7 +233,7 @@ check_next: /*
PREV_PGNO(np) = argp->right;
np->lsn = *lsnp;
if ((ret =
- memp_fput(mpf, np, DB_MPOOL_DIRTY)) != 0)
+ mpf->put(mpf, np, DB_MPOOL_DIRTY)) != 0)
goto out;
np = NULL;
}
@@ -518,13 +246,13 @@ check_next: /*
* the adds onto the page that caused the split, and there's
* really no undo-ing to be done.
*/
- if ((ret = memp_fget(mpf, &pgno, 0, &pp)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pp)) != 0) {
pp = NULL;
goto lrundo;
}
if (log_compare(lsnp, &LSN(pp)) == 0) {
memcpy(pp, argp->pg.data, argp->pg.size);
- if ((ret = memp_fput(mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
goto out;
pp = NULL;
}
@@ -542,7 +270,7 @@ lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
log_compare(lsnp, &LSN(lp)) == 0) {
lp->lsn = argp->llsn;
if ((ret =
- memp_fput(mpf, lp, DB_MPOOL_DIRTY)) != 0)
+ mpf->put(mpf, lp, DB_MPOOL_DIRTY)) != 0)
goto out;
lp = NULL;
}
@@ -550,7 +278,7 @@ lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
log_compare(lsnp, &LSN(rp)) == 0) {
rp->lsn = argp->rlsn;
if ((ret =
- memp_fput(mpf, rp, DB_MPOOL_DIRTY)) != 0)
+ mpf->put(mpf, rp, DB_MPOOL_DIRTY)) != 0)
goto out;
rp = NULL;
}
@@ -565,14 +293,14 @@ lrundo: if ((rootsplit && lp != NULL) || rp != NULL) {
* if there's nothing to undo.
*/
if (!rootsplit && !IS_ZERO_LSN(argp->nlsn)) {
- if ((ret = memp_fget(mpf, &argp->npgno, 0, &np)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->npgno, 0, &np)) != 0) {
np = NULL;
goto done;
}
if (log_compare(lsnp, &LSN(np)) == 0) {
PREV_PGNO(np) = argp->left;
np->lsn = argp->nlsn;
- if (memp_fput(mpf, np, DB_MPOOL_DIRTY))
+ if (mpf->put(mpf, np, DB_MPOOL_DIRTY))
goto out;
np = NULL;
}
@@ -583,22 +311,22 @@ done: *lsnp = argp->prev_lsn;
ret = 0;
out: /* Free any pages that weren't dirtied. */
- if (pp != NULL && (t_ret = memp_fput(mpf, pp, 0)) != 0 && ret == 0)
+ if (pp != NULL && (t_ret = mpf->put(mpf, pp, 0)) != 0 && ret == 0)
ret = t_ret;
- if (lp != NULL && (t_ret = memp_fput(mpf, lp, 0)) != 0 && ret == 0)
+ if (lp != NULL && (t_ret = mpf->put(mpf, lp, 0)) != 0 && ret == 0)
ret = t_ret;
- if (np != NULL && (t_ret = memp_fput(mpf, np, 0)) != 0 && ret == 0)
+ if (np != NULL && (t_ret = mpf->put(mpf, np, 0)) != 0 && ret == 0)
ret = t_ret;
- if (rp != NULL && (t_ret = memp_fput(mpf, rp, 0)) != 0 && ret == 0)
+ if (rp != NULL && (t_ret = mpf->put(mpf, rp, 0)) != 0 && ret == 0)
ret = t_ret;
/* Free any allocated space. */
if (_lp != NULL)
- __os_free(_lp, file_dbp->pgsize);
+ __os_free(dbenv, _lp);
if (_rp != NULL)
- __os_free(_rp, file_dbp->pgsize);
+ __os_free(dbenv, _rp);
if (sp != NULL)
- __os_free(sp, argp->pg.size);
+ __os_free(dbenv, sp);
REC_CLOSE;
}
@@ -627,23 +355,24 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
db_pgno_t pgno, root_pgno;
int cmp_n, cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_rsplit_print);
REC_INTRO(__bam_rsplit_read, 1);
/* Fix the root page. */
pgno = root_pgno = argp->root_pgno;
- if ((ret = memp_fget(mpf, &pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
/* The root page must always exist if we are going forward. */
if (DB_REDO(op)) {
- __db_pgerr(file_dbp, pgno);
+ __db_pgerr(file_dbp, pgno, ret);
goto out;
}
/* This must be the root of an OPD tree. */
DB_ASSERT(root_pgno !=
((BTREE *)file_dbp->bt_internal)->bt_root);
ret = 0;
- goto done;
+ goto do_page;
}
modified = 0;
cmp_n = log_compare(lsnp, &LSN(pagep));
@@ -666,22 +395,23 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
pagep->lsn = argp->rootlsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+do_page:
/*
* Fix the page copied over the root page. It's possible that the
* page never made it to disk, so if we're undo-ing and the page
* doesn't exist, it's okay and there's nothing further to do.
*/
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
modified = 0;
- __ua_memcpy(&copy_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN));
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->pgdbt.data), sizeof(DB_LSN));
cmp_n = log_compare(lsnp, &LSN(pagep));
cmp_p = log_compare(&LSN(pagep), &copy_lsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
@@ -694,13 +424,16 @@ __bam_rsplit_recover(dbenv, dbtp, lsnp, op, info)
memcpy(pagep, argp->pgdbt.data, argp->pgdbt.size);
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -725,15 +458,16 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info)
PAGE *pagep;
int cmp_n, cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_adj_print);
REC_INTRO(__bam_adj_read, 1);
/* Get the page; if it never existed and we're undoing, we're done. */
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
@@ -745,7 +479,7 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info)
/* Need to redo update described. */
if ((ret = __bam_adjindx(dbc,
pagep, argp->indx, argp->indx_copy, argp->is_insert)) != 0)
- goto err;
+ goto out;
LSN(pagep) = *lsnp;
modified = 1;
@@ -753,21 +487,21 @@ __bam_adj_recover(dbenv, dbtp, lsnp, op, info)
/* Need to undo update described. */
if ((ret = __bam_adjindx(dbc,
pagep, argp->indx, argp->indx_copy, !argp->is_insert)) != 0)
- goto err;
+ goto out;
LSN(pagep) = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
- if (0) {
-err: (void)memp_fput(mpf, pagep, 0);
- }
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -793,15 +527,16 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
PAGE *pagep;
int cmp_n, cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_cadjust_print);
REC_INTRO(__bam_cadjust_read, 1);
/* Get the page; if it never existed and we're undoing, we're done. */
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
@@ -812,11 +547,13 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
if (cmp_p == 0 && DB_REDO(op)) {
/* Need to redo update described. */
if (IS_BTREE_PAGE(pagep)) {
- GET_BINTERNAL(pagep, argp->indx)->nrecs += argp->adjust;
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
if (argp->opflags & CAD_UPDATEROOT)
RE_NREC_ADJ(pagep, argp->adjust);
} else {
- GET_RINTERNAL(pagep, argp->indx)->nrecs += argp->adjust;
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs +=
+ argp->adjust;
if (argp->opflags & CAD_UPDATEROOT)
RE_NREC_ADJ(pagep, argp->adjust);
}
@@ -826,24 +563,29 @@ __bam_cadjust_recover(dbenv, dbtp, lsnp, op, info)
} else if (cmp_n == 0 && DB_UNDO(op)) {
/* Need to undo update described. */
if (IS_BTREE_PAGE(pagep)) {
- GET_BINTERNAL(pagep, argp->indx)->nrecs -= argp->adjust;
+ GET_BINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
if (argp->opflags & CAD_UPDATEROOT)
RE_NREC_ADJ(pagep, -(argp->adjust));
} else {
- GET_RINTERNAL(pagep, argp->indx)->nrecs -= argp->adjust;
+ GET_RINTERNAL(file_dbp, pagep, argp->indx)->nrecs -=
+ argp->adjust;
if (argp->opflags & CAD_UPDATEROOT)
RE_NREC_ADJ(pagep, -(argp->adjust));
}
LSN(pagep) = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -869,15 +611,16 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
u_int32_t indx;
int cmp_n, cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_cdel_print);
REC_INTRO(__bam_cdel_read, 1);
/* Get the page; if it never existed and we're undoing, we're done. */
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
@@ -888,27 +631,30 @@ __bam_cdel_recover(dbenv, dbtp, lsnp, op, info)
if (cmp_p == 0 && DB_REDO(op)) {
/* Need to redo update described. */
indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
- B_DSET(GET_BKEYDATA(pagep, indx)->type);
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, indx)->type);
LSN(pagep) = *lsnp;
modified = 1;
} else if (cmp_n == 0 && DB_UNDO(op)) {
/* Need to undo update described. */
indx = argp->indx + (TYPE(pagep) == P_LBTREE ? O_INDX : 0);
- B_DCLR(GET_BKEYDATA(pagep, indx)->type);
+ B_DCLR(GET_BKEYDATA(file_dbp, pagep, indx)->type);
(void)__bam_ca_delete(file_dbp, argp->pgno, argp->indx, 0);
LSN(pagep) = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -936,18 +682,19 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info)
int cmp_n, cmp_p, modified, ret;
u_int8_t *p;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_repl_print);
REC_INTRO(__bam_repl_read, 1);
/* Get the page; if it never existed and we're undoing, we're done. */
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
- bk = GET_BKEYDATA(pagep, argp->indx);
+ bk = GET_BKEYDATA(file_dbp, pagep, argp->indx);
modified = 0;
cmp_n = log_compare(lsnp, &LSN(pagep));
@@ -961,8 +708,8 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info)
*/
memset(&dbt, 0, sizeof(dbt));
dbt.size = argp->prefix + argp->suffix + argp->repl.size;
- if ((ret = __os_malloc(dbenv, dbt.size, NULL, &dbt.data)) != 0)
- goto err;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
p = dbt.data;
memcpy(p, bk->data, argp->prefix);
p += argp->prefix;
@@ -971,9 +718,9 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info)
memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
- __os_free(dbt.data, dbt.size);
+ __os_free(dbenv, dbt.data);
if (ret != 0)
- goto err;
+ goto out;
LSN(pagep) = *lsnp;
modified = 1;
@@ -985,8 +732,8 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info)
*/
memset(&dbt, 0, sizeof(dbt));
dbt.size = argp->prefix + argp->suffix + argp->orig.size;
- if ((ret = __os_malloc(dbenv, dbt.size, NULL, &dbt.data)) != 0)
- goto err;
+ if ((ret = __os_malloc(dbenv, dbt.size, &dbt.data)) != 0)
+ goto out;
p = dbt.data;
memcpy(p, bk->data, argp->prefix);
p += argp->prefix;
@@ -995,27 +742,27 @@ __bam_repl_recover(dbenv, dbtp, lsnp, op, info)
memcpy(p, bk->data + (bk->len - argp->suffix), argp->suffix);
ret = __bam_ritem(dbc, pagep, argp->indx, &dbt);
- __os_free(dbt.data, dbt.size);
+ __os_free(dbenv, dbt.data);
if (ret != 0)
- goto err;
+ goto out;
/* Reset the deleted flag, if necessary. */
if (argp->isdeleted)
- B_DSET(GET_BKEYDATA(pagep, argp->indx)->type);
+ B_DSET(GET_BKEYDATA(file_dbp, pagep, argp->indx)->type);
LSN(pagep) = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
- if (0) {
-err: (void)memp_fput(mpf, pagep, 0);
- }
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -1040,14 +787,15 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info)
DB_MPOOLFILE *mpf;
int cmp_n, cmp_p, modified, ret;
+ meta = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__bam_root_print);
REC_INTRO(__bam_root_read, 0);
- if ((ret = memp_fget(mpf, &argp->meta_pgno, 0, &meta)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->meta_pgno, 0, &meta)) != 0) {
/* The metadata page must always exist on redo. */
if (DB_REDO(op)) {
- (void)__db_pgerr(file_dbp, argp->meta_pgno);
+ __db_pgerr(file_dbp, argp->meta_pgno, ret);
goto out;
} else
goto done;
@@ -1068,13 +816,16 @@ __bam_root_recover(dbenv, dbtp, lsnp, op, info)
meta->dbmeta.lsn = argp->meta_lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ meta = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
}
/*
@@ -1116,7 +867,7 @@ __bam_curadj_recover(dbenv, dbtp, lsnp, op, info)
break;
case DB_CA_DUP:
if ((ret = __bam_ca_undodup(file_dbp, argp->first_indx,
- argp->from_pgno, argp->from_indx, argp->to_indx)) != 0)
+ argp->from_pgno, argp->from_indx, argp->to_indx)) != 0)
goto out;
break;
@@ -1181,7 +932,8 @@ __bam_rcuradj_recover(dbenv, dbtp, lsnp, op, info)
* this function know anything about how offpage dups work.
*/
if ((ret =
- __db_icursor(file_dbp, NULL, DB_RECNO, argp->root, 0, &rdbc)) != 0)
+ __db_icursor(file_dbp,
+ NULL, DB_RECNO, argp->root, 0, DB_LOCK_INVALIDID, &rdbc)) != 0)
goto out;
cp = (BTREE_CURSOR *)rdbc->internal;
diff --git a/bdb/btree/bt_reclaim.c b/bdb/btree/bt_reclaim.c
index 538d837c2d2..ae4554ea7d6 100644
--- a/bdb/btree/bt_reclaim.c
+++ b/bdb/btree/bt_reclaim.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_reclaim.c,v 11.5 2000/03/22 04:21:01 ubell Exp $";
+static const char revid[] = "$Id: bt_reclaim.c,v 11.11 2002/03/29 20:46:26 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,10 +18,8 @@ static const char revid[] = "$Id: bt_reclaim.c,v 11.5 2000/03/22 04:21:01 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
/*
* __bam_reclaim --
@@ -51,3 +49,38 @@ __bam_reclaim(dbp, txn)
return (ret);
}
+
+/*
+ * __bam_truncate --
+ * Truncate a database.
+ *
+ * PUBLIC: int __bam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__bam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ db_trunc_param trunc;
+ int ret, t_ret;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+ /* Walk the tree, freeing pages. */
+ ret = __bam_traverse(dbc,
+ DB_LOCK_WRITE, dbc->internal->root, __db_truncate_callback, &trunc);
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = trunc.count;
+
+ return (ret);
+}
diff --git a/bdb/btree/bt_recno.c b/bdb/btree/bt_recno.c
index 6ac0cac350d..fab684f3a5f 100644
--- a/bdb/btree/bt_recno.c
+++ b/bdb/btree/bt_recno.c
@@ -1,36 +1,31 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_recno.c,v 11.65 2001/01/18 14:33:22 bostic Exp $";
+static const char revid[] = "$Id: bt_recno.c,v 11.106 2002/08/16 04:56:30 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <limits.h>
+#include <stdio.h>
#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "db_ext.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "lock_ext.h"
-#include "qam.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
static int __ram_add __P((DBC *, db_recno_t *, DBT *, u_int32_t, u_int32_t));
-static int __ram_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
-static int __ram_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
static int __ram_source __P((DB *));
static int __ram_sread __P((DBC *, db_recno_t));
static int __ram_update __P((DBC *, db_recno_t, int));
@@ -90,17 +85,32 @@ static int __ram_update __P((DBC *, db_recno_t, int));
* Do we need to log the current cursor adjustment?
*/
#define CURADJ_LOG(dbc) \
- (DB_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL)
+ (DBC_LOGGING((dbc)) && (dbc)->txn != NULL && (dbc)->txn->parent != NULL)
+
+/*
+ * After a search, copy the found page into the cursor, discarding any
+ * currently held lock.
+ */
+#define STACK_TO_CURSOR(cp) { \
+ (cp)->page = (cp)->csp->page; \
+ (cp)->pgno = (cp)->csp->page->pgno; \
+ (cp)->indx = (cp)->csp->indx; \
+ (void)__TLPUT(dbc, (cp)->lock); \
+ (cp)->lock = (cp)->csp->lock; \
+ (cp)->lock_mode = (cp)->csp->lock_mode; \
+}
/*
* __ram_open --
* Recno open function.
*
- * PUBLIC: int __ram_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ * PUBLIC: int __ram_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, u_int32_t));
*/
int
-__ram_open(dbp, name, base_pgno, flags)
+__ram_open(dbp, txn, name, base_pgno, flags)
DB *dbp;
+ DB_TXN *txn;
const char *name;
db_pgno_t base_pgno;
u_int32_t flags;
@@ -109,15 +119,14 @@ __ram_open(dbp, name, base_pgno, flags)
DBC *dbc;
int ret, t_ret;
+ COMPQUIET(name, NULL);
t = dbp->bt_internal;
/* Initialize the remaining fields/methods of the DB. */
- dbp->del = __ram_delete;
- dbp->put = __ram_put;
dbp->stat = __bam_stat;
/* Start up the tree. */
- if ((ret = __bam_read_root(dbp, name, base_pgno, flags)) != 0)
+ if ((ret = __bam_read_root(dbp, txn, base_pgno, flags)) != 0)
return (ret);
/*
@@ -132,7 +141,7 @@ __ram_open(dbp, name, base_pgno, flags)
return (ret);
/* If we're snapshotting an underlying source file, do it now. */
- if (F_ISSET(dbp, DB_RE_SNAPSHOT)) {
+ if (F_ISSET(dbp, DB_AM_SNAPSHOT)) {
/* Allocate a cursor. */
if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
return (ret);
@@ -147,104 +156,38 @@ __ram_open(dbp, name, base_pgno, flags)
ret = t_ret;
}
- return (0);
-}
-
-/*
- * __ram_delete --
- * Recno db->del function.
- */
-static int
-__ram_delete(dbp, txn, key, flags)
- DB *dbp;
- DB_TXN *txn;
- DBT *key;
- u_int32_t flags;
-{
- BTREE_CURSOR *cp;
- DBC *dbc;
- db_recno_t recno;
- int ret, t_ret;
-
- PANIC_CHECK(dbp->dbenv);
-
- /* Check for invalid flags. */
- if ((ret = __db_delchk(dbp,
- key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
- return (ret);
-
- /* Acquire a cursor. */
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
-
- DEBUG_LWRITE(dbc, txn, "ram_delete", key, NULL, flags);
-
- /* Check the user's record number and fill in as necessary. */
- if ((ret = __ram_getno(dbc, key, &recno, 0)) != 0)
- goto err;
-
- /* Do the delete. */
- cp = (BTREE_CURSOR *)dbc->internal;
- cp->recno = recno;
-
- ret = __ram_c_del(dbc);
-
- /* Release the cursor. */
-err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
-
return (ret);
}
/*
- * __ram_put --
- * Recno db->put function.
+ * __ram_append --
+ * Recno append function.
+ *
+ * PUBLIC: int __ram_append __P((DBC *, DBT *, DBT *));
*/
-static int
-__ram_put(dbp, txn, key, data, flags)
- DB *dbp;
- DB_TXN *txn;
+int
+__ram_append(dbc, key, data)
+ DBC *dbc;
DBT *key, *data;
- u_int32_t flags;
{
- DBC *dbc;
- db_recno_t recno;
- int ret, t_ret;
-
- PANIC_CHECK(dbp->dbenv);
-
- /* Check for invalid flags. */
- if ((ret = __db_putchk(dbp,
- key, data, flags, F_ISSET(dbp, DB_AM_RDONLY), 0)) != 0)
- return (ret);
-
- /* Allocate a cursor. */
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
+ BTREE_CURSOR *cp;
+ int ret;
- DEBUG_LWRITE(dbc, txn, "ram_put", key, data, flags);
+ cp = (BTREE_CURSOR *)dbc->internal;
/*
- * If we're appending to the tree, make sure we've read in all of
- * the backing source file. Otherwise, check the user's record
- * number and fill in as necessary. If we found the record or it
- * simply didn't exist, add the user's record.
+ * Make sure we've read in all of the backing source file. If
+ * we found the record or it simply didn't exist, add the
+ * user's record.
*/
- if (flags == DB_APPEND)
- ret = __ram_update(dbc, DB_MAX_RECORDS, 0);
- else
- ret = __ram_getno(dbc, key, &recno, 1);
+ ret = __ram_update(dbc, DB_MAX_RECORDS, 0);
if (ret == 0 || ret == DB_NOTFOUND)
- ret = __ram_add(dbc, &recno, data, flags, 0);
-
- /* Discard the cursor. */
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
+ ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0);
- /* Return the record number if we're appending to the tree. */
- if (ret == 0 && flags == DB_APPEND)
- ret = __db_retcopy(dbp, key, &recno, sizeof(recno),
- &dbc->rkey.data, &dbc->rkey.ulen);
+ /* Return the record number. */
+ if (ret == 0)
+ ret = __db_retcopy(dbc->dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
return (ret);
}
@@ -295,9 +238,9 @@ __ram_c_del(dbc)
goto err;
}
stack = 1;
- cp->page = cp->csp->page;
- cp->pgno = cp->csp->page->pgno;
- cp->indx = cp->csp->indx;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
/*
* If re-numbering records, the on-page deleted flag can only mean
@@ -310,7 +253,7 @@ __ram_c_del(dbc)
* delete records they never created, the latter is an error because
* if the record was "deleted", we could never have found it.
*/
- if (B_DISSET(GET_BKEYDATA(cp->page, cp->indx)->type)) {
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type)) {
ret = DB_KEYEMPTY;
goto err;
}
@@ -321,9 +264,8 @@ __ram_c_del(dbc)
goto err;
__bam_adjust(dbc, -1);
if (__ram_ca(dbc, CA_DELETE) > 0 &&
- CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp->dbenv,
- dbc->txn, &lsn, 0, dbp->log_fileid, CA_DELETE,
- cp->root, cp->recno, cp->order)) != 0)
+ CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp, dbc->txn,
+ &lsn, 0, CA_DELETE, cp->root, cp->recno, cp->order)) != 0)
goto err;
/*
@@ -346,15 +288,15 @@ __ram_c_del(dbc)
* going to be emptied by removing the single reference
* to the emptied page (or one of its parents).
*/
- for (epg = cp->sp; epg <= cp->csp; ++epg)
- if (NUM_ENT(epg->page) <= 1)
+ for (epg = cp->csp; epg >= cp->sp; --epg)
+ if (NUM_ENT(epg->page) > 1)
break;
/*
* We want to delete a single item out of the last page
- * that we're not deleting, back up to that page.
+ * that we're not deleting.
*/
- ret = __bam_dpages(dbc, --epg);
+ ret = __bam_dpages(dbc, epg);
/*
* Regardless of the return from __bam_dpages, it will
@@ -412,6 +354,7 @@ __ram_c_get(dbc, key, data, flags, pgnop)
dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
retry: switch (flags) {
case DB_CURRENT:
/*
@@ -504,6 +447,7 @@ retry: switch (flags) {
goto err;
/* NOTREACHED */
case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
/*
* If we're searching a set of off-page dups, we start
* a new linear search from the first record. Otherwise,
@@ -531,6 +475,8 @@ retry: switch (flags) {
* read from the backing source file. Do it now for DB_CURRENT (if
* the current record was deleted we may need more records from the
* backing file for a DB_CURRENT operation), DB_FIRST and DB_NEXT.
+ * (We don't have to test for flags == DB_FIRST, because the switch
+ * statement above re-set flags to DB_NEXT in that case.)
*/
if ((flags == DB_NEXT || flags == DB_CURRENT) && ((ret =
__ram_update(dbc, cp->recno, 0)) != 0) && ret != DB_NOTFOUND)
@@ -547,16 +493,8 @@ retry: switch (flags) {
goto err;
}
- /*
- * Copy the page into the cursor, discarding any lock we
- * are currently holding.
- */
- cp->page = cp->csp->page;
- cp->pgno = cp->csp->page->pgno;
- cp->indx = cp->csp->indx;
- (void)__TLPUT(dbc, cp->lock);
- cp->lock = cp->csp->lock;
- cp->lock_mode = cp->csp->lock_mode;
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
/*
* If re-numbering records, the on-page deleted flag means this
@@ -567,21 +505,34 @@ retry: switch (flags) {
* walking through off-page duplicates, and fail if they were
* requested explicitly by the application.
*/
- if (B_DISSET(GET_BKEYDATA(cp->page, cp->indx)->type))
+ if (B_DISSET(GET_BKEYDATA(dbp, cp->page, cp->indx)->type))
switch (flags) {
case DB_NEXT:
case DB_PREV:
(void)__bam_stkrel(dbc, STK_CLRDBC);
goto retry;
case DB_GET_BOTH:
- (void)__bam_stkrel(dbc, STK_CLRDBC);
- continue;
+ case DB_GET_BOTH_RANGE:
+ /*
+ * If we're an OPD tree, we don't care about
+ * matching a record number on a DB_GET_BOTH
+ * -- everything belongs to the same tree. A
+ * normal recno should give up and return
+ * DB_NOTFOUND if the matching recno is deleted.
+ */
+ if (F_ISSET(dbc, DBC_OPD)) {
+ (void)__bam_stkrel(dbc, STK_CLRDBC);
+ continue;
+ }
+ ret = DB_NOTFOUND;
+ goto err;
default:
ret = DB_KEYEMPTY;
goto err;
}
- if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC) {
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
if ((ret = __bam_cmp(dbp, data,
cp->page, cp->indx, __bam_defcmp, &cmp)) != 0)
return (ret);
@@ -598,10 +549,11 @@ retry: switch (flags) {
/* Return the key if the user didn't give us one. */
if (!F_ISSET(dbc, DBC_OPD)) {
- if (flags != DB_SET && flags != DB_SET_RANGE)
- ret = __db_retcopy(dbp,
- key, &cp->recno, sizeof(cp->recno),
- &dbc->rkey.data, &dbc->rkey.ulen);
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE)
+ ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen);
F_SET(key, DB_DBT_ISSET);
}
@@ -637,23 +589,43 @@ __ram_c_put(dbc, key, data, flags, pgnop)
cp = (BTREE_CURSOR *)dbc->internal;
/*
- * DB_KEYFIRST and DB_KEYLAST will only be set if we're dealing with
- * an off-page duplicate tree, they can't be specified at user level.
- * Translate them into something else.
+ * DB_KEYFIRST and DB_KEYLAST mean different things if they're
+ * used in an off-page duplicate tree. If we're an off-page
+ * duplicate tree, they really mean "put at the beginning of the
+ * tree" and "put at the end of the tree" respectively, so translate
+ * them to something else.
*/
- switch (flags) {
- case DB_KEYFIRST:
- cp->recno = 1;
- flags = DB_BEFORE;
- break;
- case DB_KEYLAST:
- if ((ret = __ram_add(dbc, &cp->recno, data, DB_APPEND, 0)) != 0)
- return (ret);
- if (CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(dbp->dbenv,
- dbc->txn, &lsn, 0, dbp->log_fileid, CA_ICURRENT,
- cp->root, cp->recno, cp->order)))
- return (ret);
- return (0);
+ if (F_ISSET(dbc, DBC_OPD))
+ switch (flags) {
+ case DB_KEYFIRST:
+ cp->recno = 1;
+ flags = DB_BEFORE;
+ break;
+ case DB_KEYLAST:
+ if ((ret = __ram_add(dbc,
+ &cp->recno, data, DB_APPEND, 0)) != 0)
+ return (ret);
+ if (CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
+ CA_ICURRENT, cp->root, cp->recno, cp->order)))
+ return (ret);
+ return (0);
+ }
+
+ /*
+ * Handle normal DB_KEYFIRST/DB_KEYLAST; for a recno, which has
+ * no duplicates, these are identical and mean "put the given
+ * datum at the given recno".
+ *
+ * Note that the code here used to be in __ram_put; now, we
+ * go through the access-method-common __db_put function, which
+ * handles DB_NOOVERWRITE, so we and __ram_add don't have to.
+ */
+ if (flags == DB_KEYFIRST || flags == DB_KEYLAST) {
+ ret = __ram_getno(dbc, key, &cp->recno, 1);
+ if (ret == 0 || ret == DB_NOTFOUND)
+ ret = __ram_add(dbc, &cp->recno, data, 0, 0);
+ return (ret);
}
/*
@@ -677,9 +649,8 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
*/
DB_ASSERT(exact || CD_ISSET(cp));
- cp->page = cp->csp->page;
- cp->pgno = cp->csp->page->pgno;
- cp->indx = cp->csp->indx;
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
ret = __bam_iitem(dbc, key, data, iiflags, 0);
t_ret = __bam_stkrel(dbc, STK_CLRDBC);
@@ -688,7 +659,7 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
ret = t_ret;
else if (ret == DB_NEEDSPLIT) {
arg = &cp->recno;
- if ((ret = __bam_split(dbc, arg)) != 0)
+ if ((ret = __bam_split(dbc, arg, NULL)) != 0)
goto err;
goto split;
}
@@ -709,8 +680,7 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
/* Only log if __ram_ca found any relevant cursors. */
if (nc > 0 && CURADJ_LOG(dbc) &&
- (ret = __bam_rcuradj_log(dbp->dbenv,
- dbc->txn, &lsn, 0, dbp->log_fileid, CA_IAFTER,
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IAFTER,
cp->root, cp->recno, cp->order)) != 0)
goto err;
break;
@@ -720,8 +690,7 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
/* Only log if __ram_ca found any relevant cursors. */
if (nc > 0 && CURADJ_LOG(dbc) &&
- (ret = __bam_rcuradj_log(dbp->dbenv,
- dbc->txn, &lsn, 0, dbp->log_fileid, CA_IBEFORE,
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0, CA_IBEFORE,
cp->root, cp->recno, cp->order)) != 0)
goto err;
break;
@@ -734,8 +703,8 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
* Only log if __ram_ca found any relevant cursors.
*/
if (CD_ISSET(cp) && __ram_ca(dbc, CA_ICURRENT) > 0 &&
- CURADJ_LOG(dbc) && (ret = __bam_rcuradj_log(
- dbp->dbenv, dbc->txn, &lsn, 0, dbp->log_fileid,
+ CURADJ_LOG(dbc) &&
+ (ret = __bam_rcuradj_log(dbp, dbc->txn, &lsn, 0,
CA_ICURRENT, cp->root, cp->recno, cp->order)) != 0)
goto err;
break;
@@ -743,8 +712,8 @@ split: if ((ret = __bam_rsearch(dbc, &cp->recno, S_INSERT, 1, &exact)) != 0)
/* Return the key if we've created a new record. */
if (!F_ISSET(dbc, DBC_OPD) && (flags == DB_AFTER || flags == DB_BEFORE))
- ret = __db_retcopy(dbp, key, &cp->recno,
- sizeof(cp->recno), &dbc->rkey.data, &dbc->rkey.ulen);
+ ret = __db_retcopy(dbp->dbenv, key, &cp->recno,
+ sizeof(cp->recno), &dbc->rkey->data, &dbc->rkey->ulen);
/* The cursor was reset, no further delete adjustment is necessary. */
err: CD_CLR(cp);
@@ -940,13 +909,12 @@ __ram_update(dbc, recno, can_create)
int can_create;
{
BTREE *t;
- BTREE_CURSOR *cp;
DB *dbp;
+ DBT *rdata;
db_recno_t nrecs;
int ret;
dbp = dbc->dbp;
- cp = (BTREE_CURSOR *)dbc->internal;
t = dbp->bt_internal;
/*
@@ -976,27 +944,13 @@ __ram_update(dbc, recno, can_create)
if (!can_create || recno <= nrecs + 1)
return (0);
- dbc->rdata.dlen = 0;
- dbc->rdata.doff = 0;
- dbc->rdata.flags = 0;
- if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
- if (dbc->rdata.ulen < t->re_len) {
- if ((ret = __os_realloc(dbp->dbenv,
- t->re_len, NULL, &dbc->rdata.data)) != 0) {
- dbc->rdata.ulen = 0;
- dbc->rdata.data = NULL;
- return (ret);
- }
- dbc->rdata.ulen = t->re_len;
- }
- dbc->rdata.size = t->re_len;
- memset(dbc->rdata.data, t->re_pad, t->re_len);
- } else
- dbc->rdata.size = 0;
+ rdata = &dbc->my_rdata;
+ rdata->flags = 0;
+ rdata->size = 0;
while (recno > ++nrecs)
if ((ret = __ram_add(dbc,
- &nrecs, &dbc->rdata, 0, BI_DELETED)) != 0)
+ &nrecs, rdata, 0, BI_DELETED)) != 0)
return (ret);
return (0);
}
@@ -1017,9 +971,9 @@ __ram_source(dbp)
/* Find the real name, and swap out the one we had before. */
if ((ret = __db_appname(dbp->dbenv,
- DB_APP_DATA, NULL, t->re_source, 0, NULL, &source)) != 0)
+ DB_APP_DATA, t->re_source, 0, NULL, &source)) != 0)
return (ret);
- __os_freestr(t->re_source);
+ __os_free(dbp->dbenv, t->re_source);
t->re_source = source;
/*
@@ -1060,6 +1014,7 @@ __ram_writeback(dbp)
t = dbp->bt_internal;
dbenv = dbp->dbenv;
fp = NULL;
+ pad = NULL;
/* If the file wasn't modified, we're done. */
if (!t->re_modified)
@@ -1119,40 +1074,45 @@ __ram_writeback(dbp)
/*
* We step through the records, writing each one out. Use the record
* number and the dbp->get() function, instead of a cursor, so we find
- * and write out "deleted" or non-existent records.
+ * and write out "deleted" or non-existent records. The DB handle may
+ * be threaded, so allocate memory as we go.
*/
memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
key.size = sizeof(db_recno_t);
key.data = &keyno;
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
/*
* We'll need the delimiter if we're doing variable-length records,
* and the pad character if we're doing fixed-length records.
*/
delim = t->re_delim;
- if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
- if ((ret = __os_malloc(dbenv, t->re_len, NULL, &pad)) != 0)
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
+ if ((ret = __os_malloc(dbenv, t->re_len, &pad)) != 0)
goto err;
memset(pad, t->re_pad, t->re_len);
- } else
- COMPQUIET(pad, NULL);
+ }
for (keyno = 1;; ++keyno) {
switch (ret = dbp->get(dbp, NULL, &key, &data, 0)) {
case 0:
- if (fwrite(data.data, 1, data.size, fp) != data.size)
+ if (data.size != 0 && (u_int32_t)fwrite(
+ data.data, 1, data.size, fp) != data.size)
goto write_err;
break;
case DB_KEYEMPTY:
- if (F_ISSET(dbp, DB_RE_FIXEDLEN) &&
- fwrite(pad, 1, t->re_len, fp) != t->re_len)
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN) &&
+ (u_int32_t)fwrite(pad, 1, t->re_len, fp) !=
+ t->re_len)
goto write_err;
break;
case DB_NOTFOUND:
ret = 0;
goto done;
+ default:
+ goto err;
}
- if (!F_ISSET(dbp, DB_RE_FIXEDLEN) &&
+ if (!F_ISSET(dbp, DB_AM_FIXEDLEN) &&
fwrite(&delim, 1, 1, fp) != 1) {
write_err: ret = errno;
__db_err(dbp->dbenv,
@@ -1174,6 +1134,12 @@ done: /* Close the file descriptor. */
if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
+ /* Discard memory allocated to hold the data items. */
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ if (pad != NULL)
+ __os_free(dbenv, pad);
+
if (ret == 0)
t->re_modified = 0;
@@ -1191,7 +1157,7 @@ __ram_sread(dbc, top)
{
BTREE *t;
DB *dbp;
- DBT data;
+ DBT data, *rdata;
db_recno_t recno;
size_t len;
int ch, ret, was_modified;
@@ -1203,45 +1169,56 @@ __ram_sread(dbc, top)
if ((ret = __bam_nrecs(dbc, &recno)) != 0)
return (ret);
- /* Use the record data return memory, it's only a short-term use. */
- len = F_ISSET(dbp, DB_RE_FIXEDLEN) ? t->re_len : 256;
- if (dbc->rdata.ulen < len) {
+ /*
+ * Use the record key return memory, it's only a short-term use.
+ * The record data return memory is used by __bam_iitem, which
+ * we'll indirectly call, so use the key so as not to collide.
+ */
+ len = F_ISSET(dbp, DB_AM_FIXEDLEN) ? t->re_len : 256;
+ rdata = &dbc->my_rkey;
+ if (rdata->ulen < len) {
if ((ret = __os_realloc(
- dbp->dbenv, len, NULL, &dbc->rdata.data)) != 0) {
- dbc->rdata.ulen = 0;
- dbc->rdata.data = NULL;
+ dbp->dbenv, len, &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
return (ret);
}
- dbc->rdata.ulen = len;
+ rdata->ulen = (u_int32_t)len;
}
memset(&data, 0, sizeof(data));
while (recno < top) {
- data.data = dbc->rdata.data;
+ data.data = rdata->data;
data.size = 0;
- if (F_ISSET(dbp, DB_RE_FIXEDLEN))
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN))
for (len = t->re_len; len > 0; --len) {
- if ((ch = getc(t->re_fp)) == EOF)
- goto eof;
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
((u_int8_t *)data.data)[data.size++] = ch;
}
else
for (;;) {
- if ((ch = getc(t->re_fp)) == EOF)
- goto eof;
+ if ((ch = getc(t->re_fp)) == EOF) {
+ if (data.size == 0)
+ goto eof;
+ break;
+ }
if (ch == t->re_delim)
break;
((u_int8_t *)data.data)[data.size++] = ch;
- if (data.size == dbc->rdata.ulen) {
+ if (data.size == rdata->ulen) {
if ((ret = __os_realloc(dbp->dbenv,
- dbc->rdata.ulen *= 2,
- NULL, &dbc->rdata.data)) != 0) {
- dbc->rdata.ulen = 0;
- dbc->rdata.data = NULL;
+ rdata->ulen *= 2,
+ &rdata->data)) != 0) {
+ rdata->ulen = 0;
+ rdata->data = NULL;
return (ret);
} else
- data.data = dbc->rdata.data;
+ data.data = rdata->data;
}
}
@@ -1281,7 +1258,6 @@ __ram_add(dbc, recnop, data, flags, bi_flags)
DBT *data;
u_int32_t flags, bi_flags;
{
- BKEYDATA *bk;
BTREE_CURSOR *cp;
int exact, ret, stack;
@@ -1292,9 +1268,9 @@ retry: /* Find the slot for insertion. */
S_INSERT | (flags == DB_APPEND ? S_APPEND : 0), 1, &exact)) != 0)
return (ret);
stack = 1;
- cp->page = cp->csp->page;
- cp->pgno = cp->csp->page->pgno;
- cp->indx = cp->csp->indx;
+
+ /* Copy the page into the cursor. */
+ STACK_TO_CURSOR(cp);
/*
* The application may modify the data based on the selected record
@@ -1305,24 +1281,6 @@ retry: /* Find the slot for insertion. */
goto err;
/*
- * If re-numbering records, the on-page deleted flag means this record
- * was implicitly created. If not re-numbering records, the on-page
- * deleted flag means this record was implicitly created, or, it was
- * deleted at some time.
- *
- * If DB_NOOVERWRITE is set and the item already exists in the tree,
- * return an error unless the item was either marked for deletion or
- * only implicitly created.
- */
- if (exact) {
- bk = GET_BKEYDATA(cp->page, cp->indx);
- if (!B_DISSET(bk->type) && flags == DB_NOOVERWRITE) {
- ret = DB_KEYEXIST;
- goto err;
- }
- }
-
- /*
* Select the arguments for __bam_iitem() and do the insert. If the
* key is an exact match, or we're replacing the data item with a
* new data item, replace the current item. If the key isn't an exact
@@ -1353,7 +1311,7 @@ retry: /* Find the slot for insertion. */
(void)__bam_stkrel(dbc, STK_CLRDBC);
stack = 0;
- if ((ret = __bam_split(dbc, recnop)) != 0)
+ if ((ret = __bam_split(dbc, recnop, NULL)) != 0)
goto err;
goto retry;
diff --git a/bdb/btree/bt_rsearch.c b/bdb/btree/bt_rsearch.c
index 7102cd715aa..a75181b44e2 100644
--- a/bdb/btree/bt_rsearch.c
+++ b/bdb/btree/bt_rsearch.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -40,7 +40,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_rsearch.c,v 11.21 2000/03/28 21:50:04 ubell Exp $";
+static const char revid[] = "$Id: bt_rsearch.c,v 11.34 2002/07/03 19:03:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -48,10 +48,10 @@ static const char revid[] = "$Id: bt_rsearch.c,v 11.21 2000/03/28 21:50:04 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "db_shash.h"
-#include "lock.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
/*
* __bam_rsearch --
@@ -70,6 +70,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
BTREE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
RINTERNAL *ri;
db_indx_t adjust, deloffset, indx, top;
@@ -79,6 +80,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
int ret, stack;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
BT_STK_CLR(cp);
@@ -99,11 +101,11 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
* Retrieve the root page.
*/
pg = cp->root;
- stack = LF_ISSET(S_STACK);
+ stack = LF_ISSET(S_STACK) ? 1 : 0;
lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
@@ -120,12 +122,12 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
if (!stack &&
((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
(LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__LPUT(dbc, lock);
lock_mode = DB_LOCK_WRITE;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
@@ -164,7 +166,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
* eliminate any concurrency. A possible fix
* would be to lock the last leaf page instead.
*/
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__TLPUT(dbc, lock);
return (DB_NOTFOUND);
}
@@ -202,8 +204,8 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
goto err;
}
}
- if (!B_DISSET(
- GET_BKEYDATA(h, indx + deloffset)->type) &&
+ if (!B_DISSET(GET_BKEYDATA(dbp, h,
+ indx + deloffset)->type) &&
++t_recno == recno)
break;
}
@@ -216,7 +218,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
return (0);
case P_IBTREE:
for (indx = 0, top = NUM_ENT(h);;) {
- bi = GET_BINTERNAL(h, indx);
+ bi = GET_BINTERNAL(dbp, h, indx);
if (++indx == top || total + bi->nrecs >= recno)
break;
total += bi->nrecs;
@@ -235,7 +237,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
return (0);
case P_IRECNO:
for (indx = 0, top = NUM_ENT(h);;) {
- ri = GET_RINTERNAL(h, indx);
+ ri = GET_RINTERNAL(dbp, h, indx);
if (++indx == top || total + ri->nrecs >= recno)
break;
total += ri->nrecs;
@@ -243,7 +245,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
pg = ri->pgno;
break;
default:
- return (__db_pgfmt(dbp, h->pgno));
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
}
--indx;
@@ -276,12 +278,12 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
(h->level - 1) == LEAFLEVEL)
stack = 1;
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
lock_mode = stack &&
LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
if ((ret = __db_lget(dbc,
- LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* If we fail, discard the lock we held. This
* is OK because this only happens when we are
@@ -292,7 +294,7 @@ __bam_rsearch(dbc, recnop, flags, stop, exactp)
}
}
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
goto err;
}
/* NOTREACHED */
@@ -315,12 +317,14 @@ __bam_adjust(dbc, adjust)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
EPG *epg;
PAGE *h;
db_pgno_t root_pgno;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
root_pgno = cp->root;
@@ -328,22 +332,27 @@ __bam_adjust(dbc, adjust)
for (epg = cp->sp; epg <= cp->csp; ++epg) {
h = epg->page;
if (TYPE(h) == P_IBTREE || TYPE(h) == P_IRECNO) {
- if (DB_LOGGING(dbc) &&
- (ret = __bam_cadjust_log(dbp->dbenv,
- dbc->txn, &LSN(h), 0, dbp->log_fileid,
- PGNO(h), &LSN(h), (u_int32_t)epg->indx, adjust,
- PGNO(h) == root_pgno ? CAD_UPDATEROOT : 0)) != 0)
- return (ret);
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(h), 0, PGNO(h), &LSN(h),
+ (u_int32_t)epg->indx, adjust,
+ PGNO(h) == root_pgno ?
+ CAD_UPDATEROOT : 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(h));
if (TYPE(h) == P_IBTREE)
- GET_BINTERNAL(h, epg->indx)->nrecs += adjust;
+ GET_BINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
else
- GET_RINTERNAL(h, epg->indx)->nrecs += adjust;
+ GET_RINTERNAL(dbp, h, epg->indx)->nrecs +=
+ adjust;
if (PGNO(h) == root_pgno)
RE_NREC_ADJ(h, adjust);
- if ((ret = memp_fset(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, h, DB_MPOOL_DIRTY)) != 0)
return (ret);
}
}
@@ -363,21 +372,23 @@ __bam_nrecs(dbc, rep)
{
DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
pgno = dbc->internal->root;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
*rep = RE_NREC(h);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__TLPUT(dbc, lock);
return (0);
@@ -387,10 +398,11 @@ __bam_nrecs(dbc, rep)
* __bam_total --
* Return the number of records below a page.
*
- * PUBLIC: db_recno_t __bam_total __P((PAGE *));
+ * PUBLIC: db_recno_t __bam_total __P((DB *, PAGE *));
*/
db_recno_t
-__bam_total(h)
+__bam_total(dbp, h)
+ DB *dbp;
PAGE *h;
{
db_recno_t nrecs;
@@ -403,25 +415,26 @@ __bam_total(h)
case P_LBTREE:
/* Check for logically deleted records. */
for (indx = 0; indx < top; indx += P_INDX)
- if (!B_DISSET(GET_BKEYDATA(h, indx + O_INDX)->type))
+ if (!B_DISSET(
+ GET_BKEYDATA(dbp, h, indx + O_INDX)->type))
++nrecs;
break;
case P_LDUP:
/* Check for logically deleted records. */
for (indx = 0; indx < top; indx += O_INDX)
- if (!B_DISSET(GET_BKEYDATA(h, indx)->type))
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
++nrecs;
break;
case P_IBTREE:
for (indx = 0; indx < top; indx += O_INDX)
- nrecs += GET_BINTERNAL(h, indx)->nrecs;
+ nrecs += GET_BINTERNAL(dbp, h, indx)->nrecs;
break;
case P_LRECNO:
nrecs = NUM_ENT(h);
break;
case P_IRECNO:
for (indx = 0; indx < top; indx += O_INDX)
- nrecs += GET_RINTERNAL(h, indx)->nrecs;
+ nrecs += GET_RINTERNAL(dbp, h, indx)->nrecs;
break;
}
diff --git a/bdb/btree/bt_search.c b/bdb/btree/bt_search.c
index d822198f243..92b2106311d 100644
--- a/bdb/btree/bt_search.c
+++ b/bdb/btree/bt_search.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_search.c,v 11.32 2001/01/17 20:19:46 bostic Exp $";
+static const char revid[] = "$Id: bt_search.c,v 11.43 2002/07/03 19:03:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,21 +53,22 @@ static const char revid[] = "$Id: bt_search.c,v 11.32 2001/01/17 20:19:46 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "lock.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
/*
* __bam_search --
* Search a btree for a key.
*
- * PUBLIC: int __bam_search __P((DBC *,
+ * PUBLIC: int __bam_search __P((DBC *, db_pgno_t,
* PUBLIC: const DBT *, u_int32_t, int, db_recno_t *, int *));
*/
int
-__bam_search(dbc, key, flags, stop, recnop, exactp)
+__bam_search(dbc, root_pgno, key, flags, stop, recnop, exactp)
DBC *dbc;
+ db_pgno_t root_pgno;
const DBT *key;
u_int32_t flags;
int stop, *exactp;
@@ -77,8 +78,9 @@ __bam_search(dbc, key, flags, stop, recnop, exactp)
BTREE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
- db_indx_t base, i, indx, lim;
+ db_indx_t base, i, indx, *inp, lim;
db_lockmode_t lock_mode;
db_pgno_t pg;
db_recno_t recno;
@@ -86,6 +88,7 @@ __bam_search(dbc, key, flags, stop, recnop, exactp)
int (*func) __P((DB *, const DBT *, const DBT *));
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
t = dbp->bt_internal;
recno = 0;
@@ -109,12 +112,12 @@ __bam_search(dbc, key, flags, stop, recnop, exactp)
* Retrieve the root page.
*/
try_again:
- pg = cp->root;
+ pg = root_pgno == PGNO_INVALID ? cp->root : root_pgno;
stack = LF_ISSET(S_STACK) && F_ISSET(cp, C_RECNUM);
lock_mode = stack ? DB_LOCK_WRITE : DB_LOCK_READ;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
@@ -131,21 +134,21 @@ try_again:
if (!stack &&
((LF_ISSET(S_PARENT) && (u_int8_t)(stop + 1) >= h->level) ||
(LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__LPUT(dbc, lock);
lock_mode = DB_LOCK_WRITE;
if ((ret = __db_lget(dbc, 0, pg, lock_mode, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0) {
/* Did not read it, so we can release the lock */
(void)__LPUT(dbc, lock);
return (ret);
}
- if (!((LF_ISSET(S_PARENT)
- && (u_int8_t)(stop + 1) >= h->level) ||
+ if (!((LF_ISSET(S_PARENT) &&
+ (u_int8_t)(stop + 1) >= h->level) ||
(LF_ISSET(S_WRITE) && h->level == LEAFLEVEL))) {
/* Someone else split the root, start over. */
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__LPUT(dbc, lock);
goto try_again;
}
@@ -158,6 +161,7 @@ try_again:
t->bt_compare;
for (;;) {
+ inp = P_INP(dbp, h);
/*
* Do a binary search on the current page. If we're searching
* a Btree leaf page, we have to walk the indices in groups of
@@ -199,7 +203,7 @@ try_again:
if (LF_ISSET(S_STK_ONLY)) {
BT_STK_NUM(dbp->dbenv, cp, h, base, ret);
__LPUT(dbc, lock);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
return (ret);
}
@@ -232,21 +236,21 @@ try_again:
*/
next: if (recnop != NULL)
for (i = 0; i < indx; ++i)
- recno += GET_BINTERNAL(h, i)->nrecs;
+ recno += GET_BINTERNAL(dbp, h, i)->nrecs;
- pg = GET_BINTERNAL(h, indx)->pgno;
+ pg = GET_BINTERNAL(dbp, h, indx)->pgno;
if (LF_ISSET(S_STK_ONLY)) {
if (stop == h->level) {
BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
__LPUT(dbc, lock);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
return (ret);
}
BT_STK_NUMPUSH(dbp->dbenv, cp, h, indx, ret);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
if ((ret = __db_lget(dbc,
- LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* Discard our lock and return on failure. This
* is OK because it only happens when descending
@@ -284,12 +288,12 @@ next: if (recnop != NULL)
(h->level - 1) == LEAFLEVEL)
stack = 1;
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
lock_mode = stack &&
LF_ISSET(S_WRITE) ? DB_LOCK_WRITE : DB_LOCK_READ;
if ((ret = __db_lget(dbc,
- LCK_COUPLE, pg, lock_mode, 0, &lock)) != 0) {
+ LCK_COUPLE_ALWAYS, pg, lock_mode, 0, &lock)) != 0) {
/*
* If we fail, discard the lock we held. This
* is OK because this only happens when we are
@@ -299,7 +303,7 @@ next: if (recnop != NULL)
goto err;
}
}
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pg, 0, &h)) != 0)
goto err;
}
/* NOTREACHED */
@@ -327,11 +331,11 @@ found: *exactp = 1;
if (TYPE(h) == P_LBTREE) {
if (LF_ISSET(S_DUPLAST))
while (indx < (db_indx_t)(NUM_ENT(h) - P_INDX) &&
- h->inp[indx] == h->inp[indx + P_INDX])
+ inp[indx] == inp[indx + P_INDX])
indx += P_INDX;
else
while (indx > 0 &&
- h->inp[indx] == h->inp[indx - P_INDX])
+ inp[indx] == inp[indx - P_INDX])
indx -= P_INDX;
}
@@ -344,29 +348,29 @@ found: *exactp = 1;
if (LF_ISSET(S_DELNO)) {
deloffset = TYPE(h) == P_LBTREE ? O_INDX : 0;
if (LF_ISSET(S_DUPLAST))
- while (B_DISSET(GET_BKEYDATA(
+ while (B_DISSET(GET_BKEYDATA(dbp,
h, indx + deloffset)->type) && indx > 0 &&
- h->inp[indx] == h->inp[indx - adjust])
+ inp[indx] == inp[indx - adjust])
indx -= adjust;
else
- while (B_DISSET(GET_BKEYDATA(
+ while (B_DISSET(GET_BKEYDATA(dbp,
h, indx + deloffset)->type) &&
indx < (db_indx_t)(NUM_ENT(h) - adjust) &&
- h->inp[indx] == h->inp[indx + adjust])
+ inp[indx] == inp[indx + adjust])
indx += adjust;
/*
* If we weren't able to find a non-deleted duplicate, return
* DB_NOTFOUND.
*/
- if (B_DISSET(GET_BKEYDATA(h, indx + deloffset)->type))
+ if (B_DISSET(GET_BKEYDATA(dbp, h, indx + deloffset)->type))
goto notfound;
}
if (LF_ISSET(S_STK_ONLY)) {
BT_STK_NUM(dbp->dbenv, cp, h, indx, ret);
__LPUT(dbc, lock);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
} else {
BT_STK_ENTER(dbp->dbenv, cp, h, indx, lock, lock_mode, ret);
if (ret != 0)
@@ -376,7 +380,7 @@ found: *exactp = 1;
notfound:
/* Keep the page locked for serializability. */
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
(void)__TLPUT(dbc, lock);
ret = DB_NOTFOUND;
@@ -398,10 +402,12 @@ __bam_stkrel(dbc, flags)
{
BTREE_CURSOR *cp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
EPG *epg;
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (BTREE_CURSOR *)dbc->internal;
/*
@@ -414,10 +420,10 @@ __bam_stkrel(dbc, flags)
if (epg->page != NULL) {
if (LF_ISSET(STK_CLRDBC) && cp->page == epg->page) {
cp->page = NULL;
- cp->lock.off = LOCK_INVALID;
+ LOCK_INIT(cp->lock);
}
- if ((t_ret = memp_fput(
- dbp->mpf, epg->page, 0)) != 0 && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, epg->page, 0)) != 0 && ret == 0)
ret = t_ret;
/*
* XXX
@@ -428,12 +434,10 @@ __bam_stkrel(dbc, flags)
*/
epg->page = NULL;
}
- if (epg->lock.off != LOCK_INVALID) {
- if (LF_ISSET(STK_NOLOCK))
- (void)__LPUT(dbc, epg->lock);
- else
- (void)__TLPUT(dbc, epg->lock);
- }
+ if (LF_ISSET(STK_NOLOCK))
+ (void)__LPUT(dbc, epg->lock);
+ else
+ (void)__TLPUT(dbc, epg->lock);
}
/* Clear the stack, all pages have been released. */
@@ -463,7 +467,7 @@ __bam_stkgrow(dbenv, cp)
return (ret);
memcpy(p, cp->sp, entries * sizeof(EPG));
if (cp->sp != cp->stack)
- __os_free(cp->sp, entries * sizeof(EPG));
+ __os_free(dbenv, cp->sp);
cp->sp = p;
cp->csp = p + entries;
cp->esp = p + entries * 2;
diff --git a/bdb/btree/bt_split.c b/bdb/btree/bt_split.c
index f76337b1944..f3302a6905f 100644
--- a/bdb/btree/bt_split.c
+++ b/bdb/btree/bt_split.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -40,7 +40,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_split.c,v 11.31 2000/12/22 19:08:27 bostic Exp $";
+static const char revid[] = "$Id: bt_split.c,v 11.58 2002/07/03 19:03:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -51,10 +51,10 @@ static const char revid[] = "$Id: bt_split.c,v 11.31 2000/12/22 19:08:27 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/btree.h"
static int __bam_broot __P((DBC *, PAGE *, PAGE *, PAGE *));
static int __bam_page __P((DBC *, EPG *, EPG *));
@@ -67,21 +67,19 @@ static int __ram_root __P((DBC *, PAGE *, PAGE *, PAGE *));
* __bam_split --
* Split a page.
*
- * PUBLIC: int __bam_split __P((DBC *, void *));
+ * PUBLIC: int __bam_split __P((DBC *, void *, db_pgno_t *));
*/
int
-__bam_split(dbc, arg)
+__bam_split(dbc, arg, root_pgnop)
DBC *dbc;
void *arg;
+ db_pgno_t *root_pgnop;
{
- BTREE *t;
BTREE_CURSOR *cp;
- DB *dbp;
enum { UP, DOWN } dir;
db_pgno_t root_pgno;
int exact, level, ret;
- dbp = dbc->dbp;
cp = (BTREE_CURSOR *)dbc->internal;
root_pgno = cp->root;
@@ -112,17 +110,20 @@ __bam_split(dbc, arg)
* split. This would be an easy change for this code, but I have no
* numbers that indicate it's worthwhile.
*/
- t = dbp->bt_internal;
for (dir = UP, level = LEAFLEVEL;; dir == UP ? ++level : --level) {
/*
* Acquire a page and its parent, locked.
*/
if ((ret = (dbc->dbtype == DB_BTREE ?
- __bam_search(dbc, arg, S_WRPAIR, level, NULL, &exact) :
+ __bam_search(dbc, PGNO_INVALID,
+ arg, S_WRPAIR, level, NULL, &exact) :
__bam_rsearch(dbc,
(db_recno_t *)arg, S_WRPAIR, level, &exact))) != 0)
return (ret);
+ if (root_pgnop != NULL)
+ *root_pgnop = cp->csp[0].page->pgno == root_pgno ?
+ root_pgno : cp->csp[-1].page->pgno;
/*
* Split the page if it still needs it (it's possible another
* thread of control has already split the page). If we are
@@ -130,7 +131,7 @@ __bam_split(dbc, arg)
* is no longer necessary.
*/
if (2 * B_MAXSIZEONPAGE(cp->ovflsize)
- <= (db_indx_t)P_FREESPACE(cp->csp[0].page)) {
+ <= (db_indx_t)P_FREESPACE(dbc->dbp, cp->csp[0].page)) {
__bam_stkrel(dbc, STK_NOLOCK);
return (0);
}
@@ -178,12 +179,14 @@ __bam_root(dbc, cp)
DB *dbp;
DBT log_dbt;
DB_LSN log_lsn;
+ DB_MPOOLFILE *mpf;
PAGE *lp, *rp;
db_indx_t split;
u_int32_t opflags;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
/* Yeah, right. */
if (cp->page->level >= MAXBTREELEVEL) {
@@ -210,21 +213,22 @@ __bam_root(dbc, cp)
goto err;
/* Log the change. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
memset(&log_dbt, 0, sizeof(log_dbt));
log_dbt.data = cp->page;
log_dbt.size = dbp->pgsize;
ZERO_LSN(log_lsn);
opflags = F_ISSET(
(BTREE_CURSOR *)dbc->internal, C_RECNUM) ? SPL_NRECS : 0;
- if ((ret = __bam_split_log(dbp->dbenv, dbc->txn,
- &LSN(cp->page), 0, dbp->log_fileid, PGNO(lp), &LSN(lp),
- PGNO(rp), &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn,
+ if ((ret = __bam_split_log(dbp,
+ dbc->txn, &LSN(cp->page), 0, PGNO(lp), &LSN(lp), PGNO(rp),
+ &LSN(rp), (u_int32_t)NUM_ENT(lp), 0, &log_lsn,
dbc->internal->root, &log_dbt, opflags)) != 0)
goto err;
- LSN(lp) = LSN(cp->page);
- LSN(rp) = LSN(cp->page);
- }
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
/* Clean up the new root page. */
if ((ret = (dbc->dbtype == DB_RECNO ?
@@ -238,18 +242,18 @@ __bam_root(dbc, cp)
goto err;
/* Success -- write the real pages back to the store. */
- (void)memp_fput(dbp->mpf, cp->page, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, cp->page, DB_MPOOL_DIRTY);
(void)__TLPUT(dbc, cp->lock);
- (void)memp_fput(dbp->mpf, lp, DB_MPOOL_DIRTY);
- (void)memp_fput(dbp->mpf, rp, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, lp, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, rp, DB_MPOOL_DIRTY);
return (0);
err: if (lp != NULL)
- (void)__db_free(dbc, lp);
+ (void)mpf->put(mpf, lp, 0);
if (rp != NULL)
- (void)__db_free(dbc, rp);
- (void)memp_fput(dbp->mpf, cp->page, 0);
+ (void)mpf->put(mpf, rp, 0);
+ (void)mpf->put(mpf, cp->page, 0);
(void)__TLPUT(dbc, cp->lock);
return (ret);
}
@@ -267,7 +271,8 @@ __bam_page(dbc, pp, cp)
DBT log_dbt;
DB_LSN log_lsn;
DB *dbp;
- DB_LOCK tplock;
+ DB_LOCK rplock, tplock;
+ DB_MPOOLFILE *mpf;
DB_LSN save_lsn;
PAGE *lp, *rp, *alloc_rp, *tp;
db_indx_t split;
@@ -275,8 +280,10 @@ __bam_page(dbc, pp, cp)
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
alloc_rp = lp = rp = tp = NULL;
- tplock.off = LOCK_INVALID;
+ LOCK_INIT(rplock);
+ LOCK_INIT(tplock);
ret = -1;
/*
@@ -296,7 +303,7 @@ __bam_page(dbc, pp, cp)
* up the tree badly, because we've violated the rule of always locking
* down the tree, and never up.
*/
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &rp)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &rp)) != 0)
goto err;
P_INIT(rp, dbp->pgsize, 0,
ISINTERNAL(cp->page) ? PGNO_INVALID : PGNO(cp->page),
@@ -307,7 +314,7 @@ __bam_page(dbc, pp, cp)
* Create new left page for the split, and fill in everything
* except its LSN and next-page page number.
*/
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &lp)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &lp)) != 0)
goto err;
P_INIT(lp, dbp->pgsize, PGNO(cp->page),
ISINTERNAL(cp->page) ? PGNO_INVALID : PREV_PGNO(cp->page),
@@ -351,8 +358,7 @@ __bam_page(dbc, pp, cp)
if ((ret = __db_lget(dbc,
0, NEXT_PGNO(cp->page), DB_LOCK_WRITE, 0, &tplock)) != 0)
goto err;
- if ((ret =
- memp_fget(dbp->mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0)
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(cp->page), 0, &tp)) != 0)
goto err;
}
@@ -364,6 +370,15 @@ __bam_page(dbc, pp, cp)
goto err;
/*
+ * Lock the new page. We need to do this because someone
+ * could get here through bt_lpgno if this page was recently
+ * dealocated. They can't look at it before we commit.
+ */
+ if ((ret = __db_lget(dbc,
+ 0, PGNO(alloc_rp), DB_LOCK_WRITE, 0, &rplock)) != 0)
+ goto err;
+
+ /*
* Fix up the page numbers we didn't have before. We have to do this
* before calling __bam_pinsert because it may copy a page number onto
* the parent page and it takes the page number from its page argument.
@@ -376,29 +391,30 @@ __bam_page(dbc, pp, cp)
bc = (BTREE_CURSOR *)dbc->internal;
/* Log the change. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
memset(&log_dbt, 0, sizeof(log_dbt));
log_dbt.data = cp->page;
log_dbt.size = dbp->pgsize;
if (tp == NULL)
ZERO_LSN(log_lsn);
opflags = F_ISSET(bc, C_RECNUM) ? SPL_NRECS : 0;
- if ((ret = __bam_split_log(dbp->dbenv, dbc->txn,
- &LSN(cp->page), 0, dbp->log_fileid, PGNO(cp->page),
- &LSN(cp->page), PGNO(alloc_rp), &LSN(alloc_rp),
- (u_int32_t)NUM_ENT(lp),
+ if ((ret = __bam_split_log(dbp, dbc->txn, &LSN(cp->page), 0,
+ PGNO(cp->page), &LSN(cp->page), PGNO(alloc_rp),
+ &LSN(alloc_rp), (u_int32_t)NUM_ENT(lp),
tp == NULL ? 0 : PGNO(tp),
tp == NULL ? &log_lsn : &LSN(tp),
- bc->root, &log_dbt, opflags)) != 0)
+ PGNO_INVALID, &log_dbt, opflags)) != 0)
goto err;
- /* Update the LSNs for all involved pages. */
- LSN(alloc_rp) = LSN(cp->page);
- LSN(lp) = LSN(cp->page);
- LSN(rp) = LSN(cp->page);
- if (tp != NULL)
- LSN(tp) = LSN(cp->page);
- }
+ } else
+ LSN_NOT_LOGGED(LSN(cp->page));
+
+ /* Update the LSNs for all involved pages. */
+ LSN(alloc_rp) = LSN(cp->page);
+ LSN(lp) = LSN(cp->page);
+ LSN(rp) = LSN(cp->page);
+ if (tp != NULL)
+ LSN(tp) = LSN(cp->page);
/*
* Copy the left and right pages into place. There are two paths
@@ -411,13 +427,13 @@ __bam_page(dbc, pp, cp)
* do the copy.
*/
save_lsn = alloc_rp->lsn;
- memcpy(alloc_rp, rp, LOFFSET(rp));
+ memcpy(alloc_rp, rp, LOFFSET(dbp, rp));
memcpy((u_int8_t *)alloc_rp + HOFFSET(rp),
(u_int8_t *)rp + HOFFSET(rp), dbp->pgsize - HOFFSET(rp));
alloc_rp->lsn = save_lsn;
save_lsn = cp->page->lsn;
- memcpy(cp->page, lp, LOFFSET(lp));
+ memcpy(cp->page, lp, LOFFSET(dbp, lp));
memcpy((u_int8_t *)cp->page + HOFFSET(lp),
(u_int8_t *)lp + HOFFSET(lp), dbp->pgsize - HOFFSET(lp));
cp->page->lsn = save_lsn;
@@ -431,8 +447,8 @@ __bam_page(dbc, pp, cp)
PGNO(cp->page), PGNO(cp->page), PGNO(rp), split, 0)) != 0)
goto err;
- __os_free(lp, dbp->pgsize);
- __os_free(rp, dbp->pgsize);
+ __os_free(dbp->dbenv, lp);
+ __os_free(dbp->dbenv, rp);
/*
* Success -- write the real pages back to the store. As we never
@@ -440,45 +456,43 @@ __bam_page(dbc, pp, cp)
* releasing locks on the pages that reference it. We're finished
* modifying the page so it's not really necessary, but it's neater.
*/
- if ((t_ret =
- memp_fput(dbp->mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, alloc_rp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
- if ((t_ret =
- memp_fput(dbp->mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ (void)__TLPUT(dbc, rplock);
+ if ((t_ret = mpf->put(mpf, pp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
(void)__TLPUT(dbc, pp->lock);
- if ((t_ret =
- memp_fput(dbp->mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
(void)__TLPUT(dbc, cp->lock);
if (tp != NULL) {
if ((t_ret =
- memp_fput(dbp->mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ mpf->put(mpf, tp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
(void)__TLPUT(dbc, tplock);
}
return (ret);
err: if (lp != NULL)
- __os_free(lp, dbp->pgsize);
+ __os_free(dbp->dbenv, lp);
if (rp != NULL)
- __os_free(rp, dbp->pgsize);
+ __os_free(dbp->dbenv, rp);
if (alloc_rp != NULL)
- (void)__db_free(dbc, alloc_rp);
-
+ (void)mpf->put(mpf, alloc_rp, 0);
if (tp != NULL)
- (void)memp_fput(dbp->mpf, tp, 0);
- if (tplock.off != LOCK_INVALID)
- /* We never updated the next page, we can release it. */
- (void)__LPUT(dbc, tplock);
+ (void)mpf->put(mpf, tp, 0);
+
+ /* We never updated the new or next pages, we can release them. */
+ (void)__LPUT(dbc, rplock);
+ (void)__LPUT(dbc, tplock);
- (void)memp_fput(dbp->mpf, pp->page, 0);
+ (void)mpf->put(mpf, pp->page, 0);
if (ret == DB_NEEDSPLIT)
(void)__LPUT(dbc, pp->lock);
else
(void)__TLPUT(dbc, pp->lock);
- (void)memp_fput(dbp->mpf, cp->page, 0);
+ (void)mpf->put(mpf, cp->page, 0);
if (ret == DB_NEEDSPLIT)
(void)__LPUT(dbc, cp->lock);
else
@@ -529,7 +543,7 @@ __bam_broot(dbc, rootp, lp, rp)
B_TSET(bi.type, B_KEYDATA, 0);
bi.pgno = lp->pgno;
if (F_ISSET(cp, C_RECNUM)) {
- bi.nrecs = __bam_total(lp);
+ bi.nrecs = __bam_total(dbp, lp);
RE_NREC_SET(rootp, bi.nrecs);
}
hdr.data = &bi;
@@ -541,13 +555,13 @@ __bam_broot(dbc, rootp, lp, rp)
switch (TYPE(rp)) {
case P_IBTREE:
/* Copy the first key of the child page onto the root page. */
- child_bi = GET_BINTERNAL(rp, 0);
+ child_bi = GET_BINTERNAL(dbp, rp, 0);
bi.len = child_bi->len;
B_TSET(bi.type, child_bi->type, 0);
bi.pgno = rp->pgno;
if (F_ISSET(cp, C_RECNUM)) {
- bi.nrecs = __bam_total(rp);
+ bi.nrecs = __bam_total(dbp, rp);
RE_NREC_ADJ(rootp, bi.nrecs);
}
hdr.data = &bi;
@@ -567,14 +581,14 @@ __bam_broot(dbc, rootp, lp, rp)
case P_LDUP:
case P_LBTREE:
/* Copy the first key of the child page onto the root page. */
- child_bk = GET_BKEYDATA(rp, 0);
+ child_bk = GET_BKEYDATA(dbp, rp, 0);
switch (B_TYPE(child_bk->type)) {
case B_KEYDATA:
bi.len = child_bk->len;
B_TSET(bi.type, child_bk->type, 0);
bi.pgno = rp->pgno;
if (F_ISSET(cp, C_RECNUM)) {
- bi.nrecs = __bam_total(rp);
+ bi.nrecs = __bam_total(dbp, rp);
RE_NREC_ADJ(rootp, bi.nrecs);
}
hdr.data = &bi;
@@ -591,7 +605,7 @@ __bam_broot(dbc, rootp, lp, rp)
B_TSET(bi.type, child_bk->type, 0);
bi.pgno = rp->pgno;
if (F_ISSET(cp, C_RECNUM)) {
- bi.nrecs = __bam_total(rp);
+ bi.nrecs = __bam_total(dbp, rp);
RE_NREC_ADJ(rootp, bi.nrecs);
}
hdr.data = &bi;
@@ -609,11 +623,11 @@ __bam_broot(dbc, rootp, lp, rp)
return (ret);
break;
default:
- return (__db_pgfmt(dbp, rp->pgno));
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
}
break;
default:
- return (__db_pgfmt(dbp, rp->pgno));
+ return (__db_pgfmt(dbp->dbenv, rp->pgno));
}
return (0);
}
@@ -647,12 +661,12 @@ __ram_root(dbc, rootp, lp, rp)
/* Insert the left and right keys, set the header information. */
ri.pgno = lp->pgno;
- ri.nrecs = __bam_total(lp);
+ ri.nrecs = __bam_total(dbp, lp);
if ((ret = __db_pitem(dbc, rootp, 0, RINTERNAL_SIZE, &hdr, NULL)) != 0)
return (ret);
RE_NREC_SET(rootp, ri.nrecs);
ri.pgno = rp->pgno;
- ri.nrecs = __bam_total(rp);
+ ri.nrecs = __bam_total(dbp, rp);
if ((ret = __db_pitem(dbc, rootp, 1, RINTERNAL_SIZE, &hdr, NULL)) != 0)
return (ret);
RE_NREC_ADJ(rootp, ri.nrecs);
@@ -690,7 +704,8 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check)
ppage = parent->page;
/* If handling record numbers, count records split to the right page. */
- nrecs = F_ISSET(cp, C_RECNUM) && !space_check ? __bam_total(rchild) : 0;
+ nrecs = F_ISSET(cp, C_RECNUM) &&
+ !space_check ? __bam_total(dbp, rchild) : 0;
/*
* Now we insert the new page's first key into the parent page, which
@@ -721,10 +736,10 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check)
*/
switch (TYPE(rchild)) {
case P_IBTREE:
- child_bi = GET_BINTERNAL(rchild, 0);
+ child_bi = GET_BINTERNAL(dbp, rchild, 0);
nbytes = BINTERNAL_PSIZE(child_bi->len);
- if (P_FREESPACE(ppage) < nbytes)
+ if (P_FREESPACE(dbp, ppage) < nbytes)
return (DB_NEEDSPLIT);
if (space_check)
return (0);
@@ -753,7 +768,7 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check)
break;
case P_LDUP:
case P_LBTREE:
- child_bk = GET_BKEYDATA(rchild, 0);
+ child_bk = GET_BKEYDATA(dbp, rchild, 0);
switch (B_TYPE(child_bk->type)) {
case B_KEYDATA:
/*
@@ -783,7 +798,7 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check)
goto noprefix;
if (ppage->prev_pgno == PGNO_INVALID && off <= 1)
goto noprefix;
- tmp_bk = GET_BKEYDATA(lchild, NUM_ENT(lchild) -
+ tmp_bk = GET_BKEYDATA(dbp, lchild, NUM_ENT(lchild) -
(TYPE(lchild) == P_LDUP ? O_INDX : P_INDX));
if (B_TYPE(tmp_bk->type) != B_KEYDATA)
goto noprefix;
@@ -793,13 +808,13 @@ __bam_pinsert(dbc, parent, lchild, rchild, space_check)
memset(&b, 0, sizeof(b));
b.size = child_bk->len;
b.data = child_bk->data;
- nksize = func(dbp, &a, &b);
+ nksize = (u_int32_t)func(dbp, &a, &b);
if ((n = BINTERNAL_PSIZE(nksize)) < nbytes)
nbytes = n;
else
noprefix: nksize = child_bk->len;
- if (P_FREESPACE(ppage) < nbytes)
+ if (P_FREESPACE(dbp, ppage) < nbytes)
return (DB_NEEDSPLIT);
if (space_check)
return (0);
@@ -823,7 +838,7 @@ noprefix: nksize = child_bk->len;
case B_OVERFLOW:
nbytes = BINTERNAL_PSIZE(BOVERFLOW_SIZE);
- if (P_FREESPACE(ppage) < nbytes)
+ if (P_FREESPACE(dbp, ppage) < nbytes)
return (DB_NEEDSPLIT);
if (space_check)
return (0);
@@ -850,14 +865,14 @@ noprefix: nksize = child_bk->len;
return (ret);
break;
default:
- return (__db_pgfmt(dbp, rchild->pgno));
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
}
break;
case P_IRECNO:
case P_LRECNO:
nbytes = RINTERNAL_PSIZE;
- if (P_FREESPACE(ppage) < nbytes)
+ if (P_FREESPACE(dbp, ppage) < nbytes)
return (DB_NEEDSPLIT);
if (space_check)
return (0);
@@ -873,7 +888,7 @@ noprefix: nksize = child_bk->len;
return (ret);
break;
default:
- return (__db_pgfmt(dbp, rchild->pgno));
+ return (__db_pgfmt(dbp->dbenv, rchild->pgno));
}
/*
@@ -882,17 +897,19 @@ noprefix: nksize = child_bk->len;
*/
if (F_ISSET(cp, C_RECNUM)) {
/* Log the change. */
- if (DB_LOGGING(dbc) &&
- (ret = __bam_cadjust_log(dbp->dbenv, dbc->txn,
- &LSN(ppage), 0, dbp->log_fileid, PGNO(ppage),
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __bam_cadjust_log(dbp, dbc->txn,
+ &LSN(ppage), 0, PGNO(ppage),
&LSN(ppage), parent->indx, -(int32_t)nrecs, 0)) != 0)
return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(ppage));
/* Update the left page count. */
if (dbc->dbtype == DB_RECNO)
- GET_RINTERNAL(ppage, parent->indx)->nrecs -= nrecs;
+ GET_RINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
else
- GET_BINTERNAL(ppage, parent->indx)->nrecs -= nrecs;
+ GET_BINTERNAL(dbp, ppage, parent->indx)->nrecs -= nrecs;
}
return (0);
@@ -911,28 +928,52 @@ __bam_psplit(dbc, cp, lp, rp, splitret)
{
DB *dbp;
PAGE *pp;
- db_indx_t half, nbytes, off, splitp, top;
+ db_indx_t half, *inp, nbytes, off, splitp, top;
int adjust, cnt, iflag, isbigkey, ret;
dbp = dbc->dbp;
pp = cp->page;
+ inp = P_INP(dbp, pp);
adjust = TYPE(pp) == P_LBTREE ? P_INDX : O_INDX;
/*
* If we're splitting the first (last) page on a level because we're
* inserting (appending) a key to it, it's likely that the data is
* sorted. Moving a single item to the new page is less work and can
- * push the fill factor higher than normal. If we're wrong it's not
- * a big deal, we'll just do the split the right way next time.
+ * push the fill factor higher than normal. This is trivial when we
+ * are splitting a new page before the beginning of the tree, all of
+ * the interesting tests are against values of 0.
+ *
+ * Catching appends to the tree is harder. In a simple append, we're
+ * inserting an item that sorts past the end of the tree; the cursor
+ * will point past the last element on the page. But, in trees with
+ * duplicates, the cursor may point to the last entry on the page --
+ * in this case, the entry will also be the last element of a duplicate
+ * set (the last because the search call specified the S_DUPLAST flag).
+ * The only way to differentiate between an insert immediately before
+ * the last item in a tree or an append after a duplicate set which is
+ * also the last item in the tree is to call the comparison function.
+ * When splitting internal pages during an append, the search code
+ * guarantees the cursor always points to the largest page item less
+ * than the new internal entry. To summarize, we want to catch three
+ * possible index values:
+ *
+ * NUM_ENT(page) Btree/Recno leaf insert past end-of-tree
+ * NUM_ENT(page) - O_INDX Btree or Recno internal insert past EOT
+ * NUM_ENT(page) - P_INDX Btree leaf insert past EOT after a set
+ * of duplicates
+ *
+ * two of which, (NUM_ENT(page) - O_INDX or P_INDX) might be an insert
+ * near the end of the tree, and not after the end of the tree at all.
+ * Do a simple test which might be wrong because calling the comparison
+ * functions is expensive. Regardless, it's not a big deal if we're
+ * wrong, we'll do the split the right way next time.
*/
off = 0;
- if (NEXT_PGNO(pp) == PGNO_INVALID &&
- ((ISINTERNAL(pp) && cp->indx == NUM_ENT(cp->page) - 1) ||
- (!ISINTERNAL(pp) && cp->indx == NUM_ENT(cp->page))))
- off = NUM_ENT(cp->page) - adjust;
+ if (NEXT_PGNO(pp) == PGNO_INVALID && cp->indx >= NUM_ENT(pp) - adjust)
+ off = NUM_ENT(pp) - adjust;
else if (PREV_PGNO(pp) == PGNO_INVALID && cp->indx == 0)
off = adjust;
-
if (off != 0)
goto sort;
@@ -962,16 +1003,18 @@ __bam_psplit(dbc, cp, lp, rp, splitret)
for (nbytes = 0, off = 0; off < top && nbytes < half; ++off)
switch (TYPE(pp)) {
case P_IBTREE:
- if (B_TYPE(GET_BINTERNAL(pp, off)->type) == B_KEYDATA)
- nbytes +=
- BINTERNAL_SIZE(GET_BINTERNAL(pp, off)->len);
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA)
+ nbytes += BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, off)->len);
else
nbytes += BINTERNAL_SIZE(BOVERFLOW_SIZE);
break;
case P_LBTREE:
- if (B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)
- nbytes +=
- BKEYDATA_SIZE(GET_BKEYDATA(pp, off)->len);
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
else
nbytes += BOVERFLOW_SIZE;
@@ -979,9 +1022,10 @@ __bam_psplit(dbc, cp, lp, rp, splitret)
/* FALLTHROUGH */
case P_LDUP:
case P_LRECNO:
- if (B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)
- nbytes +=
- BKEYDATA_SIZE(GET_BKEYDATA(pp, off)->len);
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)
+ nbytes += BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, off)->len);
else
nbytes += BOVERFLOW_SIZE;
break;
@@ -989,7 +1033,7 @@ __bam_psplit(dbc, cp, lp, rp, splitret)
nbytes += RINTERNAL_SIZE;
break;
default:
- return (__db_pgfmt(dbp, pp->pgno));
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
}
sort: splitp = off;
@@ -1002,12 +1046,14 @@ sort: splitp = off;
switch (TYPE(pp)) {
case P_IBTREE:
iflag = 1;
- isbigkey = B_TYPE(GET_BINTERNAL(pp, off)->type) != B_KEYDATA;
+ isbigkey =
+ B_TYPE(GET_BINTERNAL(dbp, pp, off)->type) != B_KEYDATA;
break;
case P_LBTREE:
case P_LDUP:
iflag = 0;
- isbigkey = B_TYPE(GET_BKEYDATA(pp, off)->type) != B_KEYDATA;
+ isbigkey = B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) !=
+ B_KEYDATA;
break;
default:
iflag = isbigkey = 0;
@@ -1016,18 +1062,20 @@ sort: splitp = off;
for (cnt = 1; cnt <= 3; ++cnt) {
off = splitp + cnt * adjust;
if (off < (db_indx_t)NUM_ENT(pp) &&
- ((iflag &&
- B_TYPE(GET_BINTERNAL(pp,off)->type) == B_KEYDATA) ||
- B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA)) {
+ ((iflag && B_TYPE(
+ GET_BINTERNAL(dbp, pp,off)->type) == B_KEYDATA) ||
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA)) {
splitp = off;
break;
}
if (splitp <= (db_indx_t)(cnt * adjust))
continue;
off = splitp - cnt * adjust;
- if (iflag ?
- B_TYPE(GET_BINTERNAL(pp, off)->type) == B_KEYDATA :
- B_TYPE(GET_BKEYDATA(pp, off)->type) == B_KEYDATA) {
+ if (iflag ? B_TYPE(
+ GET_BINTERNAL(dbp, pp, off)->type) == B_KEYDATA :
+ B_TYPE(GET_BKEYDATA(dbp, pp, off)->type) ==
+ B_KEYDATA) {
splitp = off;
break;
}
@@ -1040,18 +1088,18 @@ sort: splitp = off;
* page set. So, this loop can't be unbounded.
*/
if (TYPE(pp) == P_LBTREE &&
- pp->inp[splitp] == pp->inp[splitp - adjust])
+ inp[splitp] == inp[splitp - adjust])
for (cnt = 1;; ++cnt) {
off = splitp + cnt * adjust;
if (off < NUM_ENT(pp) &&
- pp->inp[splitp] != pp->inp[off]) {
+ inp[splitp] != inp[off]) {
splitp = off;
break;
}
if (splitp <= (db_indx_t)(cnt * adjust))
continue;
off = splitp - cnt * adjust;
- if (pp->inp[splitp] != pp->inp[off]) {
+ if (inp[splitp] != inp[off]) {
splitp = off + adjust;
break;
}
@@ -1079,18 +1127,20 @@ __bam_copy(dbp, pp, cp, nxt, stop)
PAGE *pp, *cp;
u_int32_t nxt, stop;
{
- db_indx_t nbytes, off;
+ db_indx_t *cinp, nbytes, off, *pinp;
+ cinp = P_INP(dbp, cp);
+ pinp = P_INP(dbp, pp);
/*
- * Copy the rest of the data to the right page. Nxt is the next
- * offset placed on the target page.
+ * Nxt is the offset of the next record to be placed on the target page.
*/
for (off = 0; nxt < stop; ++nxt, ++NUM_ENT(cp), ++off) {
switch (TYPE(pp)) {
case P_IBTREE:
- if (B_TYPE(GET_BINTERNAL(pp, nxt)->type) == B_KEYDATA)
- nbytes =
- BINTERNAL_SIZE(GET_BINTERNAL(pp, nxt)->len);
+ if (B_TYPE(
+ GET_BINTERNAL(dbp, pp, nxt)->type) == B_KEYDATA)
+ nbytes = BINTERNAL_SIZE(
+ GET_BINTERNAL(dbp, pp, nxt)->len);
else
nbytes = BINTERNAL_SIZE(BOVERFLOW_SIZE);
break;
@@ -1100,16 +1150,17 @@ __bam_copy(dbp, pp, cp, nxt, stop)
* the offset.
*/
if (off != 0 && (nxt % P_INDX) == 0 &&
- pp->inp[nxt] == pp->inp[nxt - P_INDX]) {
- cp->inp[off] = cp->inp[off - P_INDX];
+ pinp[nxt] == pinp[nxt - P_INDX]) {
+ cinp[off] = cinp[off - P_INDX];
continue;
}
/* FALLTHROUGH */
case P_LDUP:
case P_LRECNO:
- if (B_TYPE(GET_BKEYDATA(pp, nxt)->type) == B_KEYDATA)
- nbytes =
- BKEYDATA_SIZE(GET_BKEYDATA(pp, nxt)->len);
+ if (B_TYPE(GET_BKEYDATA(dbp, pp, nxt)->type) ==
+ B_KEYDATA)
+ nbytes = BKEYDATA_SIZE(GET_BKEYDATA(dbp,
+ pp, nxt)->len);
else
nbytes = BOVERFLOW_SIZE;
break;
@@ -1117,10 +1168,10 @@ __bam_copy(dbp, pp, cp, nxt, stop)
nbytes = RINTERNAL_SIZE;
break;
default:
- return (__db_pgfmt(dbp, pp->pgno));
+ return (__db_pgfmt(dbp->dbenv, pp->pgno));
}
- cp->inp[off] = HOFFSET(cp) -= nbytes;
- memcpy(P_ENTRY(cp, off), P_ENTRY(pp, nxt), nbytes);
+ cinp[off] = HOFFSET(cp) -= nbytes;
+ memcpy(P_ENTRY(dbp, cp, off), P_ENTRY(dbp, pp, nxt), nbytes);
}
return (0);
}
diff --git a/bdb/btree/bt_stat.c b/bdb/btree/bt_stat.c
index 349bb40cf8b..4428de98294 100644
--- a/bdb/btree/bt_stat.c
+++ b/bdb/btree/bt_stat.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_stat.c,v 11.29 2000/11/28 21:42:27 bostic Exp $";
+static const char revid[] = "$Id: bt_stat.c,v 11.52 2002/05/30 15:40:27 krinsky Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,22 +18,22 @@ static const char revid[] = "$Id: bt_stat.c,v 11.29 2000/11/28 21:42:27 bostic E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
/*
* __bam_stat --
* Gather/print the btree statistics
*
- * PUBLIC: int __bam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ * PUBLIC: int __bam_stat __P((DB *, void *, u_int32_t));
*/
int
-__bam_stat(dbp, spp, db_malloc, flags)
+__bam_stat(dbp, spp, flags)
DB *dbp;
void *spp;
- void *(*db_malloc) __P((size_t));
u_int32_t flags;
{
BTMETA *meta;
@@ -42,9 +42,10 @@ __bam_stat(dbp, spp, db_malloc, flags)
DBC *dbc;
DB_BTREE_STAT *sp;
DB_LOCK lock, metalock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
- int ret, t_ret;
+ int ret, t_ret, write_meta;
PANIC_CHECK(dbp->dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
@@ -52,9 +53,12 @@ __bam_stat(dbp, spp, db_malloc, flags)
meta = NULL;
t = dbp->bt_internal;
sp = NULL;
- metalock.off = lock.off = LOCK_INVALID;
+ LOCK_INIT(metalock);
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
h = NULL;
ret = 0;
+ write_meta = 0;
/* Check for invalid flags. */
if ((ret = __db_statchk(dbp, flags)) != 0)
@@ -68,52 +72,31 @@ __bam_stat(dbp, spp, db_malloc, flags)
DEBUG_LWRITE(dbc, NULL, "bam_stat", NULL, NULL, flags);
/* Allocate and clear the structure. */
- if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
goto err;
memset(sp, 0, sizeof(*sp));
- /* If the app just wants the record count, make it fast. */
- if (flags == DB_RECORDCOUNT) {
- if ((ret = __db_lget(dbc, 0,
- cp->root, DB_LOCK_READ, 0, &lock)) != 0)
- goto err;
- if ((ret = memp_fget(dbp->mpf,
- &cp->root, 0, (PAGE **)&h)) != 0)
- goto err;
-
- sp->bt_nkeys = RE_NREC(h);
-
- goto done;
- }
- if (flags == DB_CACHED_COUNTS) {
- if ((ret = __db_lget(dbc,
- 0, t->bt_meta, DB_LOCK_READ, 0, &lock)) != 0)
- goto err;
- if ((ret =
- memp_fget(dbp->mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
- goto err;
- sp->bt_nkeys = meta->dbmeta.key_count;
- sp->bt_ndata = meta->dbmeta.record_count;
-
- goto done;
- }
-
/* Get the metadata page for the entire database. */
pgno = PGNO_BASE_MD;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
goto err;
+ if (flags == DB_RECORDCOUNT || flags == DB_CACHED_COUNTS)
+ flags = DB_FAST_STAT;
+ if (flags == DB_FAST_STAT)
+ goto meta_only;
+
/* Walk the metadata free list, counting pages. */
for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
++sp->bt_free;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
pgno = h->next_pgno;
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
goto err;
h = NULL;
}
@@ -122,14 +105,14 @@ __bam_stat(dbp, spp, db_malloc, flags)
pgno = cp->root;
if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
/* Get the levels from the root page. */
sp->bt_levels = h->level;
/* Discard the root page. */
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
goto err;
h = NULL;
__LPUT(dbc, lock);
@@ -143,20 +126,36 @@ __bam_stat(dbp, spp, db_malloc, flags)
* Get the subdatabase metadata page if it's not the same as the
* one we already have.
*/
- if (t->bt_meta != PGNO_BASE_MD || !F_ISSET(dbp, DB_AM_RDONLY)) {
- if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ write_meta = !F_ISSET(dbp, DB_AM_RDONLY);
+meta_only:
+ if (t->bt_meta != PGNO_BASE_MD || write_meta != 0) {
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
goto err;
meta = NULL;
__LPUT(dbc, metalock);
if ((ret = __db_lget(dbc,
- 0, t->bt_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
+ 0, t->bt_meta, write_meta == 0 ?
DB_LOCK_READ : DB_LOCK_WRITE, 0, &metalock)) != 0)
goto err;
- if ((ret =
- memp_fget(dbp->mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &t->bt_meta, 0, (PAGE **)&meta)) != 0)
goto err;
}
+ if (flags == DB_FAST_STAT) {
+ if (dbp->type == DB_RECNO ||
+ (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) {
+ if ((ret = __db_lget(dbc, 0,
+ cp->root, DB_LOCK_READ, 0, &lock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &cp->root, 0, (PAGE **)&h)) != 0)
+ goto err;
+
+ sp->bt_nkeys = RE_NREC(h);
+ } else
+ sp->bt_nkeys = meta->dbmeta.key_count;
+ sp->bt_ndata = meta->dbmeta.record_count;
+ }
/* Get metadata page statistics. */
sp->bt_metaflags = meta->dbmeta.flags;
@@ -167,39 +166,33 @@ __bam_stat(dbp, spp, db_malloc, flags)
sp->bt_pagesize = meta->dbmeta.pagesize;
sp->bt_magic = meta->dbmeta.magic;
sp->bt_version = meta->dbmeta.version;
- if (!F_ISSET(dbp, DB_AM_RDONLY)) {
+
+ if (write_meta != 0) {
meta->dbmeta.key_count = sp->bt_nkeys;
meta->dbmeta.record_count = sp->bt_ndata;
}
- /* Discard the metadata page. */
- if ((ret = memp_fput(dbp->mpf,
- meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
- goto err;
- meta = NULL;
- __LPUT(dbc, metalock);
-
-done: *(DB_BTREE_STAT **)spp = sp;
-
- if (0) {
-err: if (sp != NULL)
- __os_free(sp, sizeof(*sp));
- }
+ *(DB_BTREE_STAT **)spp = sp;
- if (h != NULL &&
- (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+err: /* Discard the second page. */
+ __LPUT(dbc, lock);
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
- if (meta != NULL &&
- (t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0)
+ /* Discard the metadata page. */
+ __LPUT(dbc, metalock);
+ if (meta != NULL && (t_ret = mpf->put(
+ mpf, meta, write_meta == 0 ? 0 : DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
- if (lock.off != LOCK_INVALID)
- __LPUT(dbc, lock);
-
if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
+ if (ret != 0 && sp != NULL) {
+ __os_ufree(dbp->dbenv, sp);
+ *(DB_BTREE_STAT **)spp = NULL;
+ }
+
return (ret);
}
@@ -222,22 +215,27 @@ __bam_traverse(dbc, mode, root_pgno, callback, cookie)
BKEYDATA *bk;
DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *h;
RINTERNAL *ri;
db_indx_t indx;
int already_put, ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ already_put = 0;
if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &root_pgno, 0, &h)) != 0)
- goto err;
+ if ((ret = mpf->get(mpf, &root_pgno, 0, &h)) != 0) {
+ __LPUT(dbc, lock);
+ return (ret);
+ }
switch (TYPE(h)) {
case P_IBTREE:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
- bi = GET_BINTERNAL(h, indx);
+ bi = GET_BINTERNAL(dbp, h, indx);
if (B_TYPE(bi->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
((BOVERFLOW *)bi->data)->pgno,
@@ -245,34 +243,34 @@ __bam_traverse(dbc, mode, root_pgno, callback, cookie)
goto err;
if ((ret = __bam_traverse(
dbc, mode, bi->pgno, callback, cookie)) != 0)
- break;
+ goto err;
}
break;
case P_IRECNO:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
- ri = GET_RINTERNAL(h, indx);
+ ri = GET_RINTERNAL(dbp, h, indx);
if ((ret = __bam_traverse(
dbc, mode, ri->pgno, callback, cookie)) != 0)
- break;
+ goto err;
}
break;
case P_LBTREE:
for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
- GET_BOVERFLOW(h, indx)->pgno,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
callback, cookie)) != 0)
goto err;
- bk = GET_BKEYDATA(h, indx + O_INDX);
+ bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
if (B_TYPE(bk->type) == B_DUPLICATE &&
(ret = __bam_traverse(dbc, mode,
- GET_BOVERFLOW(h, indx + O_INDX)->pgno,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
callback, cookie)) != 0)
goto err;
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
- GET_BOVERFLOW(h, indx + O_INDX)->pgno,
+ GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
callback, cookie)) != 0)
goto err;
}
@@ -280,22 +278,19 @@ __bam_traverse(dbc, mode, root_pgno, callback, cookie)
case P_LDUP:
case P_LRECNO:
for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW &&
(ret = __db_traverse_big(dbp,
- GET_BOVERFLOW(h, indx)->pgno,
+ GET_BOVERFLOW(dbp, h, indx)->pgno,
callback, cookie)) != 0)
goto err;
}
break;
}
- already_put = 0;
- if ((ret = callback(dbp, h, cookie, &already_put)) != 0)
- goto err;
+ ret = callback(dbp, h, cookie, &already_put);
-err: if (!already_put &&
- (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret != 0)
+err: if (!already_put && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret != 0)
ret = t_ret;
__LPUT(dbc, lock);
@@ -316,33 +311,34 @@ __bam_stat_callback(dbp, h, cookie, putp)
int *putp;
{
DB_BTREE_STAT *sp;
- db_indx_t indx, top;
+ db_indx_t indx, *inp, top;
u_int8_t type;
sp = cookie;
*putp = 0;
top = NUM_ENT(h);
+ inp = P_INP(dbp, h);
switch (TYPE(h)) {
case P_IBTREE:
case P_IRECNO:
++sp->bt_int_pg;
- sp->bt_int_pgfree += P_FREESPACE(h);
+ sp->bt_int_pgfree += P_FREESPACE(dbp, h);
break;
case P_LBTREE:
/* Correct for on-page duplicates and deleted items. */
for (indx = 0; indx < top; indx += P_INDX) {
if (indx + P_INDX >= top ||
- h->inp[indx] != h->inp[indx + P_INDX])
+ inp[indx] != inp[indx + P_INDX])
++sp->bt_nkeys;
- type = GET_BKEYDATA(h, indx + O_INDX)->type;
+ type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type;
if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
++sp->bt_ndata;
}
++sp->bt_leaf_pg;
- sp->bt_leaf_pgfree += P_FREESPACE(h);
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
break;
case P_LRECNO:
/*
@@ -356,39 +352,39 @@ __bam_stat_callback(dbp, h, cookie, putp)
* Correct for deleted items in non-renumbering
* Recno databases.
*/
- if (F_ISSET(dbp, DB_RE_RENUMBER))
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
sp->bt_ndata += top;
else
for (indx = 0; indx < top; indx += O_INDX) {
- type = GET_BKEYDATA(h, indx)->type;
+ type = GET_BKEYDATA(dbp, h, indx)->type;
if (!B_DISSET(type))
++sp->bt_ndata;
}
++sp->bt_leaf_pg;
- sp->bt_leaf_pgfree += P_FREESPACE(h);
+ sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
} else {
sp->bt_ndata += top;
++sp->bt_dup_pg;
- sp->bt_dup_pgfree += P_FREESPACE(h);
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
}
break;
case P_LDUP:
/* Correct for deleted items. */
for (indx = 0; indx < top; indx += O_INDX)
- if (!B_DISSET(GET_BKEYDATA(h, indx)->type))
+ if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
++sp->bt_ndata;
++sp->bt_dup_pg;
- sp->bt_dup_pgfree += P_FREESPACE(h);
+ sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
break;
case P_OVERFLOW:
++sp->bt_over_pg;
- sp->bt_over_pgfree += P_OVFLSPACE(dbp->pgsize, h);
+ sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h);
break;
default:
- return (__db_pgfmt(dbp, h->pgno));
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
}
return (0);
}
@@ -421,13 +417,18 @@ __bam_key_range(dbp, txn, dbt, kp, flags)
if (flags != 0)
return (__db_ferr(dbp->dbenv, "DB->key_range", 0));
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
/* Acquire a cursor. */
if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
return (ret);
DEBUG_LWRITE(dbc, NULL, "bam_key_range", NULL, NULL, 0);
- if ((ret = __bam_search(dbc, dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
+ if ((ret = __bam_search(dbc, PGNO_INVALID,
+ dbt, S_STK_ONLY, 1, NULL, &exact)) != 0)
goto err;
cp = (BTREE_CURSOR *)dbc->internal;
diff --git a/bdb/btree/bt_upgrade.c b/bdb/btree/bt_upgrade.c
index 4032dba3b36..9f92648d739 100644
--- a/bdb/btree/bt_upgrade.c
+++ b/bdb/btree/bt_upgrade.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_upgrade.c,v 11.19 2000/11/30 00:58:29 ubell Exp $";
+static const char revid[] = "$Id: bt_upgrade.c,v 11.25 2002/08/06 06:11:13 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,11 +18,9 @@ static const char revid[] = "$Id: bt_upgrade.c,v 11.19 2000/11/30 00:58:29 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "btree.h"
-#include "db_am.h"
-#include "db_upgrade.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_upgrade.h"
/*
* __bam_30_btreemeta --
@@ -107,7 +105,7 @@ __bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
newmeta->minkey = oldmeta->minkey;
newmeta->maxkey = oldmeta->maxkey;
memmove(newmeta->dbmeta.uid,
- oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
+ oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
newmeta->dbmeta.record_count = 0;
newmeta->dbmeta.key_count = 0;
@@ -126,7 +124,7 @@ __bam_31_btreemeta(dbp, real_name, flags, fhp, h, dirtyp)
/*
* __bam_31_lbtree --
- * Upgrade the database btree leaf pages.
+ * Upgrade the database btree leaf pages.
*
* PUBLIC: int __bam_31_lbtree
* PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
@@ -147,15 +145,15 @@ __bam_31_lbtree(dbp, real_name, flags, fhp, h, dirtyp)
ret = 0;
for (indx = O_INDX; indx < NUM_ENT(h); indx += P_INDX) {
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_DUPLICATE) {
- pgno = GET_BOVERFLOW(h, indx)->pgno;
+ pgno = GET_BOVERFLOW(dbp, h, indx)->pgno;
if ((ret = __db_31_offdup(dbp, real_name, fhp,
LF_ISSET(DB_DUPSORT) ? 1 : 0, &pgno)) != 0)
break;
- if (pgno != GET_BOVERFLOW(h, indx)->pgno) {
+ if (pgno != GET_BOVERFLOW(dbp, h, indx)->pgno) {
*dirtyp = 1;
- GET_BOVERFLOW(h, indx)->pgno = pgno;
+ GET_BOVERFLOW(dbp, h, indx)->pgno = pgno;
}
}
}
diff --git a/bdb/btree/bt_verify.c b/bdb/btree/bt_verify.c
index 9f8647e7e2a..0cf8a47e476 100644
--- a/bdb/btree/bt_verify.c
+++ b/bdb/btree/bt_verify.c
@@ -1,16 +1,16 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: bt_verify.c,v 1.44 2000/12/06 19:55:44 ubell Exp $
+ * $Id: bt_verify.c,v 1.76 2002/07/03 19:03:51 bostic Exp $
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: bt_verify.c,v 1.44 2000/12/06 19:55:44 ubell Exp $";
+static const char revid[] = "$Id: bt_verify.c,v 1.76 2002/07/03 19:03:51 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,9 +20,9 @@ static const char revid[] = "$Id: bt_verify.c,v 1.44 2000/12/06 19:55:44 ubell E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_verify.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
static int __bam_safe_getdata __P((DB *, PAGE *, u_int32_t, int, DBT *, int *));
static int __bam_vrfy_inp __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
@@ -79,15 +79,15 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
/* avoid division by zero */
ovflsize = meta->minkey > 0 ?
- B_MINKEY_TO_OVFLSIZE(meta->minkey, dbp->pgsize) : 0;
+ B_MINKEY_TO_OVFLSIZE(dbp, meta->minkey, dbp->pgsize) : 0;
if (meta->minkey < 2 ||
- ovflsize > B_MINKEY_TO_OVFLSIZE(DEFMINKEYPAGE, dbp->pgsize)) {
+ ovflsize > B_MINKEY_TO_OVFLSIZE(dbp, DEFMINKEYPAGE, dbp->pgsize)) {
pip->bt_minkey = 0;
isbad = 1;
EPRINT((dbp->dbenv,
- "Nonsensical bt_minkey value %lu on metadata page %lu",
- (u_long)meta->minkey, (u_long)pgno));
+ "Page %lu: nonsensical bt_minkey value %lu on metadata page",
+ (u_long)pgno, (u_long)meta->minkey));
} else
pip->bt_minkey = meta->minkey;
@@ -103,13 +103,13 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
* of the file, then the root page had better be page 1.
*/
pip->root = 0;
- if (meta->root == PGNO_INVALID
- || meta->root == pgno || !IS_VALID_PGNO(meta->root) ||
+ if (meta->root == PGNO_INVALID ||
+ meta->root == pgno || !IS_VALID_PGNO(meta->root) ||
(pgno == PGNO_BASE_MD && meta->root != 1)) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Nonsensical root page %lu on metadata page %lu",
- (u_long)meta->root, (u_long)vdp->last_pgno));
+ "Page %lu: nonsensical root page %lu on metadata page",
+ (u_long)pgno, (u_long)meta->root));
} else
pip->root = meta->root;
@@ -125,7 +125,7 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
if (F_ISSET(&meta->dbmeta, BTM_DUP) && pgno == PGNO_BASE_MD) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Btree metadata page %lu has both duplicates and multiple databases",
+"Page %lu: Btree metadata page has both duplicates and multiple databases",
(u_long)pgno));
}
F_SET(pip, VRFY_HAS_SUBDBS);
@@ -139,7 +139,7 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
F_SET(pip, VRFY_HAS_RECNUMS);
if (F_ISSET(pip, VRFY_HAS_RECNUMS) && F_ISSET(pip, VRFY_HAS_DUPS)) {
EPRINT((dbp->dbenv,
- "Btree metadata page %lu illegally has both recnums and dups",
+ "Page %lu: Btree metadata page illegally has both recnums and dups",
(u_long)pgno));
isbad = 1;
}
@@ -150,13 +150,13 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
} else if (F_ISSET(pip, VRFY_IS_RRECNO)) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Metadata page %lu has renumber flag set but is not recno",
+ "Page %lu: metadata page has renumber flag set but is not recno",
(u_long)pgno));
}
if (F_ISSET(pip, VRFY_IS_RECNO) && F_ISSET(pip, VRFY_HAS_DUPS)) {
EPRINT((dbp->dbenv,
- "Recno metadata page %lu specifies duplicates",
+ "Page %lu: recno metadata page specifies duplicates",
(u_long)pgno));
isbad = 1;
}
@@ -170,8 +170,8 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
*/
isbad = 1;
EPRINT((dbp->dbenv,
- "re_len of %lu in non-fixed-length database",
- (u_long)pip->re_len));
+ "Page %lu: re_len of %lu in non-fixed-length database",
+ (u_long)pgno, (u_long)pip->re_len));
}
/*
@@ -179,7 +179,8 @@ __bam_vrfy_meta(dbp, vdp, meta, pgno, flags)
* not be and may still be correct.
*/
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -242,7 +243,7 @@ __ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
if (F_ISSET(pip, VRFY_HAS_DUPS)) {
EPRINT((dbp->dbenv,
- "Recno database has dups on page %lu", (u_long)pgno));
+ "Page %lu: Recno database has dups", (u_long)pgno));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -255,7 +256,7 @@ __ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
*/
re_len_guess = 0;
for (i = 0; i < NUM_ENT(h); i++) {
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
/* KEYEMPTY. Go on. */
if (B_DISSET(bk->type))
continue;
@@ -266,8 +267,8 @@ __ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
else {
isbad = 1;
EPRINT((dbp->dbenv,
- "Nonsensical type for item %lu, page %lu",
- (u_long)i, (u_long)pgno));
+ "Page %lu: nonsensical type for item %lu",
+ (u_long)pgno, (u_long)i));
continue;
}
if (re_len_guess == 0)
@@ -288,9 +289,10 @@ __ram_vrfy_leaf(dbp, vdp, h, pgno, flags)
/* Save off record count. */
pip->rec_cnt = NUM_ENT(h);
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
- return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : 0);
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
/*
@@ -362,7 +364,7 @@ __bam_vrfy(dbp, vdp, h, pgno, flags)
else
goto err;
EPRINT((dbp->dbenv,
- "item order check on page %lu unsafe: skipping",
+ "Page %lu: item order check unsafe: skipping",
(u_long)pgno));
} else if (!LF_ISSET(DB_NOORDERCHK) && (ret =
__bam_vrfy_itemorder(dbp, vdp, h, pgno, 0, 0, 0, flags)) != 0) {
@@ -377,9 +379,10 @@ __bam_vrfy(dbp, vdp, h, pgno, flags)
goto err;
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
- return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : 0);
+ return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
/*
@@ -403,6 +406,7 @@ __ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
VRFY_PAGEINFO *pip;
int ret, t_ret, isbad;
u_int32_t himark, i, offset, nentries;
+ db_indx_t *inp;
u_int8_t *pagelayout, *p;
isbad = 0;
@@ -422,30 +426,31 @@ __ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
himark = dbp->pgsize;
if ((ret =
- __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &pagelayout)) != 0)
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
goto err;
memset(pagelayout, 0, dbp->pgsize);
+ inp = P_INP(dbp, h);
for (i = 0; i < NUM_ENT(h); i++) {
- if ((u_int8_t *)h->inp + i >= (u_int8_t *)h + himark) {
+ if ((u_int8_t *)inp + i >= (u_int8_t *)h + himark) {
EPRINT((dbp->dbenv,
- "Page %lu entries listing %lu overlaps data",
+ "Page %lu: entries listing %lu overlaps data",
(u_long)pgno, (u_long)i));
ret = DB_VERIFY_BAD;
goto err;
}
- offset = h->inp[i];
+ offset = inp[i];
/*
* Check that the item offset is reasonable: it points
* somewhere after the inp array and before the end of the
* page.
*/
- if (offset <= (u_int32_t)((u_int8_t *)h->inp + i -
+ if (offset <= (u_int32_t)((u_int8_t *)inp + i -
(u_int8_t *)h) ||
offset > (u_int32_t)(dbp->pgsize - RINTERNAL_SIZE)) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Bad offset %lu at page %lu index %lu",
- (u_long)offset, (u_long)pgno, (u_long)i));
+ "Page %lu: bad offset %lu at index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
continue;
}
@@ -456,7 +461,7 @@ __ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
nentries++;
/* Make sure this RINTERNAL is not multiply referenced. */
- ri = GET_RINTERNAL(h, i);
+ ri = GET_RINTERNAL(dbp, h, i);
if (pagelayout[offset] == 0) {
pagelayout[offset] = 1;
child.pgno = ri->pgno;
@@ -466,8 +471,8 @@ __ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
goto err;
} else {
EPRINT((dbp->dbenv,
- "RINTERNAL structure at offset %lu, page %lu referenced twice",
- (u_long)offset, (u_long)pgno));
+ "Page %lu: RINTERNAL structure at offset %lu referenced twice",
+ (u_long)pgno, (u_long)offset));
isbad = 1;
}
}
@@ -477,23 +482,25 @@ __ram_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
p += RINTERNAL_SIZE)
if (*p != 1) {
EPRINT((dbp->dbenv,
- "Gap between items at offset %lu, page %lu",
- (u_long)(p - pagelayout), (u_long)pgno));
+ "Page %lu: gap between items at offset %lu",
+ (u_long)pgno, (u_long)(p - pagelayout)));
isbad = 1;
}
if ((db_indx_t)himark != HOFFSET(h)) {
- EPRINT((dbp->dbenv, "Bad HOFFSET %lu, appears to be %lu",
- (u_long)(HOFFSET(h)), (u_long)himark));
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)(HOFFSET(h)), (u_long)himark));
isbad = 1;
}
*nentriesp = nentries;
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
if (pagelayout != NULL)
- __os_free(pagelayout, dbp->pgsize);
+ __os_free(dbp->dbenv, pagelayout);
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -558,22 +565,24 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
* it and the region immediately after it.
*/
himark = dbp->pgsize;
- if ((ret = __os_malloc(dbp->dbenv,
- dbp->pgsize, NULL, &pagelayout)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &pagelayout)) != 0)
goto err;
memset(pagelayout, 0, dbp->pgsize);
for (i = 0; i < NUM_ENT(h); i++) {
-
- ret = __db_vrfy_inpitem(dbp,
- h, pgno, i, 1, flags, &himark, &offset);
- if (ret == DB_VERIFY_BAD) {
+ switch (ret = __db_vrfy_inpitem(dbp,
+ h, pgno, i, 1, flags, &himark, &offset)) {
+ case 0:
+ break;
+ case DB_VERIFY_BAD:
isbad = 1;
continue;
- } else if (ret == DB_VERIFY_FATAL) {
+ case DB_VERIFY_FATAL:
isbad = 1;
goto err;
- } else if (ret != 0)
- DB_ASSERT(0);
+ default:
+ DB_ASSERT(ret != 0);
+ break;
+ }
/*
* We now have a plausible beginning for the item, and we know
@@ -582,7 +591,7 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
* Mark the beginning and end in pagelayout so we can make sure
* items have no overlaps or gaps.
*/
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
#define ITEM_BEGIN 1
#define ITEM_END 2
if (pagelayout[offset] == 0)
@@ -609,8 +618,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
} else {
isbad = 1;
EPRINT((dbp->dbenv,
- "Duplicated item %lu on page %lu",
- (u_long)i, (u_long)pgno));
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
}
}
@@ -662,8 +671,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
*/
if (isdupitem && pagelayout[endoff] != ITEM_END) {
EPRINT((dbp->dbenv,
- "Duplicated item %lu on page %lu",
- (u_long)i, (u_long)pgno));
+ "Page %lu: duplicated item %lu",
+ (u_long)pgno, (u_long)i));
isbad = 1;
} else if (pagelayout[endoff] == 0)
pagelayout[endoff] = ITEM_END;
@@ -676,8 +685,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
if (B_DISSET(bk->type) && TYPE(h) != P_LRECNO) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Item %lu on page %lu marked deleted",
- (u_long)i, (u_long)pgno));
+ "Page %lu: item %lu marked deleted",
+ (u_long)pgno, (u_long)i));
}
/*
@@ -696,13 +705,13 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
if (TYPE(h) == P_IBTREE) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Duplicate page referenced by internal btree page %lu at item %lu",
+ "Page %lu: duplicate page referenced by internal btree page at item %lu",
(u_long)pgno, (u_long)i));
break;
} else if (TYPE(h) == P_LRECNO) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Duplicate page referenced by recno page %lu at item %lu",
+ "Page %lu: duplicate page referenced by recno page at item %lu",
(u_long)pgno, (u_long)i));
break;
}
@@ -717,9 +726,9 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
if (bo->tlen > dbp->pgsize * vdp->last_pgno) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Impossible tlen %lu, item %lu, page %lu",
- (u_long)bo->tlen, (u_long)i,
- (u_long)pgno));
+ "Page %lu: impossible tlen %lu, item %lu",
+ (u_long)pgno,
+ (u_long)bo->tlen, (u_long)i));
/* Don't save as a child. */
break;
}
@@ -728,8 +737,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
bo->pgno == PGNO_INVALID) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Offpage item %lu, page %lu has bad pgno",
- (u_long)i, (u_long)pgno));
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pgno, (u_long)i, (u_long)bo->pgno));
/* Don't save as a child. */
break;
}
@@ -744,8 +753,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
default:
isbad = 1;
EPRINT((dbp->dbenv,
- "Item %lu on page %lu of invalid type %lu",
- (u_long)i, (u_long)pgno));
+ "Page %lu: item %lu of invalid type %lu",
+ (u_long)pgno, (u_long)i));
break;
}
}
@@ -765,7 +774,7 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
isbad = 1;
EPRINT((dbp->dbenv,
- "Gap between items, page %lu offset %lu",
+ "Page %lu: gap between items at offset %lu",
(u_long)pgno, (u_long)i));
/* Find the end of the gap */
for ( ; pagelayout[i + 1] == 0 &&
@@ -777,8 +786,8 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
if (i != ALIGN(i, sizeof(u_int32_t))) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Offset %lu page %lu unaligned",
- (u_long)i, (u_long)pgno));
+ "Page %lu: offset %lu unaligned",
+ (u_long)pgno, (u_long)i));
}
initem = 1;
nentries++;
@@ -791,7 +800,7 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
*/
isbad = 1;
EPRINT((dbp->dbenv,
- "Overlapping items, page %lu offset %lu",
+ "Page %lu: overlapping items at offset %lu",
(u_long)pgno, (u_long)i));
break;
default:
@@ -816,24 +825,26 @@ __bam_vrfy_inp(dbp, vdp, h, pgno, nentriesp, flags)
*/
isbad = 1;
EPRINT((dbp->dbenv,
- "Overlapping items, page %lu offset %lu",
+ "Page %lu: overlapping items at offset %lu",
(u_long)pgno, (u_long)i));
break;
}
- (void)__os_free(pagelayout, dbp->pgsize);
+ (void)__os_free(dbp->dbenv, pagelayout);
/* Verify HOFFSET. */
if ((db_indx_t)himark != HOFFSET(h)) {
- EPRINT((dbp->dbenv, "Bad HOFFSET %lu, appears to be %lu",
- (u_long)HOFFSET(h), (u_long)himark));
+ EPRINT((dbp->dbenv,
+ "Page %lu: bad HOFFSET %lu, appears to be %lu",
+ (u_long)pgno, (u_long)HOFFSET(h), (u_long)himark));
isbad = 1;
}
err: if (nentriesp != NULL)
*nentriesp = nentries;
- if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
@@ -865,14 +876,14 @@ __bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
int ovflok, hasdups;
u_int32_t flags;
{
- DBT dbta, dbtb, dup1, dup2, *p1, *p2, *tmp;
+ DBT dbta, dbtb, dup_1, dup_2, *p1, *p2, *tmp;
BTREE *bt;
BINTERNAL *bi;
BKEYDATA *bk;
BOVERFLOW *bo;
VRFY_PAGEINFO *pip;
db_indx_t i;
- int cmp, freedup1, freedup2, isbad, ret, t_ret;
+ int cmp, freedup_1, freedup_2, isbad, ret, t_ret;
int (*dupfunc) __P((DB *, const DBT *, const DBT *));
int (*func) __P((DB *, const DBT *, const DBT *));
void *buf1, *buf2, *tmpbuf;
@@ -949,7 +960,7 @@ __bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
*/
switch (TYPE(h)) {
case P_IBTREE:
- bi = GET_BINTERNAL(h, i);
+ bi = GET_BINTERNAL(dbp, h, i);
if (B_TYPE(bi->type) == B_OVERFLOW) {
bo = (BOVERFLOW *)(bi->data);
goto overflow;
@@ -972,14 +983,14 @@ __bam_vrfy_itemorder(dbp, vdp, h, pgno, nentries, ovflok, hasdups, flags)
if (i == 0 && bi->len != 0) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Lowest key on internal page %lu of nonzero length",
+ "Page %lu: lowest key on internal page of nonzero length",
(u_long)pgno));
}
#endif
break;
case P_LBTREE:
case P_LDUP:
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
if (B_TYPE(bk->type) == B_OVERFLOW) {
bo = (BOVERFLOW *)bk;
goto overflow;
@@ -1030,8 +1041,8 @@ overflow: if (!ovflok) {
p2, bo->tlen, bo->pgno, NULL, NULL)) != 0) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Error %lu in fetching overflow item %lu, page %lu",
- (u_long)ret, (u_long)i, (u_long)pgno));
+ "Page %lu: error %lu in fetching overflow item %lu",
+ (u_long)pgno, (u_long)ret, (u_long)i));
}
/* In case it got realloc'ed and thus changed. */
buf2 = p2->data;
@@ -1045,7 +1056,7 @@ overflow: if (!ovflok) {
if (cmp > 0) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Out-of-order key, page %lu item %lu",
+ "Page %lu: out-of-order key at entry %lu",
(u_long)pgno, (u_long)i));
/* proceed */
} else if (cmp == 0) {
@@ -1060,7 +1071,7 @@ overflow: if (!ovflok) {
else if (hasdups == 0) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Database with no duplicates has duplicated keys on page %lu",
+ "Page %lu: database with no duplicates has duplicated keys",
(u_long)pgno));
}
@@ -1092,11 +1103,11 @@ overflow: if (!ovflok) {
* dups are probably (?) rare.
*/
if (((ret = __bam_safe_getdata(dbp,
- h, i - 1, ovflok, &dup1,
- &freedup1)) != 0) ||
+ h, i - 1, ovflok, &dup_1,
+ &freedup_1)) != 0) ||
((ret = __bam_safe_getdata(dbp,
- h, i + 1, ovflok, &dup2,
- &freedup2)) != 0))
+ h, i + 1, ovflok, &dup_2,
+ &freedup_2)) != 0))
goto err;
/*
@@ -1105,8 +1116,8 @@ overflow: if (!ovflok) {
* it's not safe to chase them now.
* Mark an incomplete and return.
*/
- if (dup1.data == NULL ||
- dup2.data == NULL) {
+ if (dup_1.data == NULL ||
+ dup_2.data == NULL) {
DB_ASSERT(!ovflok);
F_SET(pip, VRFY_INCOMPLETE);
goto err;
@@ -1118,26 +1129,28 @@ overflow: if (!ovflok) {
* until we do the structure check
* and see whether DUPSORT is set.
*/
- if (dupfunc(dbp, &dup1, &dup2) > 0)
+ if (dupfunc(dbp, &dup_1, &dup_2) > 0)
F_SET(pip, VRFY_DUPS_UNSORTED);
- if (freedup1)
- __os_free(dup1.data, 0);
- if (freedup2)
- __os_free(dup2.data, 0);
+ if (freedup_1)
+ __os_ufree(dbp->dbenv,
+ dup_1.data);
+ if (freedup_2)
+ __os_ufree(dbp->dbenv,
+ dup_2.data);
}
}
}
}
-err: if (pip != NULL &&
- ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0) && ret == 0)
+err: if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
ret = t_ret;
if (buf1 != NULL)
- __os_free(buf1, 0);
+ __os_ufree(dbp->dbenv, buf1);
if (buf2 != NULL)
- __os_free(buf2, 0);
+ __os_ufree(dbp->dbenv, buf2);
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -1173,7 +1186,7 @@ __bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
goto err;
if (p != 0) {
EPRINT((dbp->dbenv,
- "Btree metadata page number %lu observed twice",
+ "Page %lu: btree metadata page observed twice",
(u_long)meta_pgno));
ret = DB_VERIFY_BAD;
goto err;
@@ -1185,7 +1198,8 @@ __bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
if (root == 0) {
EPRINT((dbp->dbenv,
- "Btree metadata page %lu has no root", (u_long)meta_pgno));
+ "Page %lu: btree metadata page has no root",
+ (u_long)meta_pgno));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -1222,7 +1236,7 @@ __bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
*/
if (mip->re_len > 0 && relen > 0 && mip->re_len != relen) {
EPRINT((dbp->dbenv,
- "Recno database with meta page %lu has bad re_len %lu",
+ "Page %lu: recno database has bad re_len %lu",
(u_long)meta_pgno, (u_long)relen));
ret = DB_VERIFY_BAD;
goto err;
@@ -1231,24 +1245,24 @@ __bam_vrfy_structure(dbp, vdp, meta_pgno, flags)
break;
case P_LDUP:
EPRINT((dbp->dbenv,
- "Duplicate tree referenced from metadata page %lu",
+ "Page %lu: duplicate tree referenced from metadata page",
(u_long)meta_pgno));
ret = DB_VERIFY_BAD;
break;
default:
EPRINT((dbp->dbenv,
- "Btree root of incorrect type %lu on meta page %lu",
- (u_long)rip->type, (u_long)meta_pgno));
+ "Page %lu: btree root of incorrect type %lu on metadata page",
+ (u_long)meta_pgno, (u_long)rip->type));
ret = DB_VERIFY_BAD;
break;
}
-err: if (mip != NULL &&
- ((t_ret = __db_vrfy_putpageinfo(vdp, mip)) != 0) && ret == 0)
- t_ret = ret;
- if (rip != NULL &&
- ((t_ret = __db_vrfy_putpageinfo(vdp, rip)) != 0) && ret == 0)
- t_ret = ret;
+err: if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
+ ret = t_ret;
+ if (rip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, rip)) != 0) && ret == 0)
+ ret = t_ret;
return (ret);
}
@@ -1273,20 +1287,26 @@ __bam_vrfy_subtree(dbp,
{
BINTERNAL *li, *ri, *lp, *rp;
DB *pgset;
+ DB_MPOOLFILE *mpf;
DBC *cc;
PAGE *h;
VRFY_CHILDINFO *child;
VRFY_PAGEINFO *pip;
- db_recno_t nrecs, child_nrecs;
db_indx_t i;
- int ret, t_ret, isbad, toplevel, p;
+ db_pgno_t next_pgno, prev_pgno;
+ db_recno_t child_nrecs, nrecs;
+ u_int32_t child_level, child_relen, level, relen, stflags;
+ u_int8_t leaf_type;
int (*func) __P((DB *, const DBT *, const DBT *));
- u_int32_t level, child_level, stflags, child_relen, relen;
+ int isbad, p, ret, t_ret, toplevel;
+ mpf = dbp->mpf;
ret = isbad = 0;
nrecs = 0;
h = NULL;
relen = 0;
+ leaf_type = P_INVALID;
+ next_pgno = prev_pgno = PGNO_INVALID;
rp = (BINTERNAL *)r;
lp = (BINTERNAL *)l;
@@ -1300,10 +1320,33 @@ __bam_vrfy_subtree(dbp,
cc = NULL;
level = pip->bt_level;
- toplevel = LF_ISSET(ST_TOPLEVEL);
+ toplevel = LF_ISSET(ST_TOPLEVEL) ? 1 : 0;
LF_CLR(ST_TOPLEVEL);
/*
+ * If this is the root, initialize the vdp's prev- and next-pgno
+ * accounting.
+ *
+ * For each leaf page we hit, we'll want to make sure that
+ * vdp->prev_pgno is the same as pip->prev_pgno and vdp->next_pgno is
+ * our page number. Then, we'll set vdp->next_pgno to pip->next_pgno
+ * and vdp->prev_pgno to our page number, and the next leaf page in
+ * line should be able to do the same verification.
+ */
+ if (toplevel) {
+ /*
+ * Cache the values stored in the vdp so that if we're an
+ * auxiliary tree such as an off-page duplicate set, our
+ * caller's leaf page chain doesn't get lost.
+ */
+ prev_pgno = vdp->prev_pgno;
+ next_pgno = vdp->next_pgno;
+ leaf_type = vdp->leaf_type;
+ vdp->next_pgno = vdp->prev_pgno = PGNO_INVALID;
+ vdp->leaf_type = P_INVALID;
+ }
+
+ /*
* We are recursively descending a btree, starting from the root
* and working our way out to the leaves.
*
@@ -1333,8 +1376,53 @@ __bam_vrfy_subtree(dbp,
case P_LDUP:
case P_LBTREE:
/*
- * Cases 1, 2 and 3 (overflow pages are common to all three);
- * traverse child list, looking for overflows.
+ * Cases 1, 2 and 3.
+ *
+ * We're some sort of leaf page; verify
+ * that our linked list of leaves is consistent.
+ */
+ if (vdp->leaf_type == P_INVALID) {
+ /*
+ * First leaf page. Set the type that all its
+ * successors should be, and verify that our prev_pgno
+ * is PGNO_INVALID.
+ */
+ vdp->leaf_type = pip->type;
+ if (pip->prev_pgno != PGNO_INVALID)
+ goto bad_prev;
+ } else {
+ /*
+ * Successor leaf page. Check our type, the previous
+ * page's next_pgno, and our prev_pgno.
+ */
+ if (pip->type != vdp->leaf_type) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: unexpected page type %lu found in leaf chain (expected %lu)",
+ (u_long)pip->pgno, (u_long)pip->type,
+ (u_long)vdp->leaf_type));
+ isbad = 1;
+ }
+ if (pip->pgno != vdp->next_pgno) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: incorrect next_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)vdp->prev_pgno,
+ (u_long)vdp->next_pgno, (u_long)pip->pgno));
+ isbad = 1;
+ }
+ if (pip->prev_pgno != vdp->prev_pgno) {
+bad_prev: EPRINT((dbp->dbenv,
+ "Page %lu: incorrect prev_pgno %lu found in leaf chain (should be %lu)",
+ (u_long)pip->pgno, (u_long)pip->prev_pgno,
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+ }
+ vdp->prev_pgno = pip->pgno;
+ vdp->next_pgno = pip->next_pgno;
+
+ /*
+ * Overflow pages are common to all three leaf types;
+ * traverse the child list, looking for overflows.
*/
if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
goto err;
@@ -1360,7 +1448,7 @@ __bam_vrfy_subtree(dbp,
!(LF_ISSET(ST_DUPOK) && !LF_ISSET(ST_DUPSORT))) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Recno leaf page %lu in non-recno tree",
+ "Page %lu: recno leaf page non-recno tree",
(u_long)pgno));
goto done;
}
@@ -1372,7 +1460,7 @@ __bam_vrfy_subtree(dbp,
*/
isbad = 1;
EPRINT((dbp->dbenv,
- "Non-recno leaf page %lu in recno tree",
+ "Page %lu: non-recno leaf page in recno tree",
(u_long)pgno));
goto done;
}
@@ -1389,7 +1477,7 @@ __bam_vrfy_subtree(dbp,
if (!LF_ISSET(ST_DUPOK)) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Duplicates on page %lu in non-dup btree",
+ "Page %lu: duplicates in non-dup btree",
(u_long)pgno));
} else {
/*
@@ -1414,8 +1502,8 @@ __bam_vrfy_subtree(dbp,
}
if ((ret = __bam_vrfy_subtree(
dbp, vdp, child->pgno, NULL,
- NULL, stflags, NULL, NULL,
- NULL)) != 0) {
+ NULL, stflags | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
if (ret !=
DB_VERIFY_BAD)
goto err;
@@ -1436,14 +1524,13 @@ __bam_vrfy_subtree(dbp,
if (F_ISSET(pip, VRFY_DUPS_UNSORTED) &&
LF_ISSET(ST_DUPSORT)) {
EPRINT((dbp->dbenv,
- "Unsorted duplicate set at page %lu in sorted-dup database",
+ "Page %lu: unsorted duplicate set in sorted-dup database",
(u_long)pgno));
isbad = 1;
}
}
}
goto leaf;
- break;
case P_IBTREE:
case P_IRECNO:
/* We handle these below. */
@@ -1455,10 +1542,18 @@ __bam_vrfy_subtree(dbp,
* Note that the code at the "done" label assumes that the
* current page is a btree/recno one of some sort; this
* is not the case here, so we goto err.
+ *
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
*/
- EPRINT((dbp->dbenv,
- "Page %lu is of inappropriate type %lu",
- (u_long)pgno, (u_long)pip->type));
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbp->dbenv,
+ pgno, "btree or recno page");
+ else
+ EPRINT((dbp->dbenv,
+ "Page %lu: btree or recno page is of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -1500,8 +1595,9 @@ __bam_vrfy_subtree(dbp,
relen != child_relen) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Recno page %lu returned bad re_len",
- (u_long)child->pgno));
+ "Page %lu: recno page returned bad re_len %lu",
+ (u_long)child->pgno,
+ (u_long)child_relen));
}
if (relenp)
*relenp = relen;
@@ -1510,10 +1606,8 @@ __bam_vrfy_subtree(dbp,
nrecs += child_nrecs;
if (level != child_level + 1) {
isbad = 1;
- EPRINT((dbp->dbenv, "%s%lu%s%lu%s%lu",
- "Recno level incorrect on page ",
- (u_long)child->pgno, ": got ",
- (u_long)child_level, ", expected ",
+ EPRINT((dbp->dbenv, "Page %lu: recno level incorrect: got %lu, expected %lu",
+ (u_long)child->pgno, (u_long)child_level,
(u_long)(level - 1)));
}
} else if (child->type == V_OVERFLOW &&
@@ -1543,12 +1637,12 @@ __bam_vrfy_subtree(dbp,
* itself, which must sort lower than all entries on its child;
* ri will be the key to its right, which must sort greater.
*/
- if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
for (i = 0; i < pip->entries; i += O_INDX) {
- li = GET_BINTERNAL(h, i);
+ li = GET_BINTERNAL(dbp, h, i);
ri = (i + O_INDX < pip->entries) ?
- GET_BINTERNAL(h, i + O_INDX) : NULL;
+ GET_BINTERNAL(dbp, h, i + O_INDX) : NULL;
/*
* The leftmost key is forcibly sorted less than all entries,
@@ -1578,18 +1672,18 @@ __bam_vrfy_subtree(dbp,
if (li->nrecs != child_nrecs) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Item %lu page %lu has incorrect record count of %lu, should be %lu",
- (u_long)i, (u_long)pgno, (u_long)li->nrecs,
+ "Page %lu: item %lu has incorrect record count of %lu, should be %lu",
+ (u_long)pgno, (u_long)i, (u_long)li->nrecs,
(u_long)child_nrecs));
}
}
if (level != child_level + 1) {
isbad = 1;
- EPRINT((dbp->dbenv, "%s%lu%s%lu%s%lu",
- "Btree level incorrect on page ", (u_long)li->pgno,
- ": got ", (u_long)child_level, ", expected ",
- (u_long)(level - 1)));
+ EPRINT((dbp->dbenv,
+ "Page %lu: Btree level incorrect: got %lu, expected %lu",
+ (u_long)li->pgno,
+ (u_long)child_level, (u_long)(level - 1)));
}
}
@@ -1616,7 +1710,7 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
* isbad == 0, though, it's now safe to do so, as we've
* traversed any child overflow pages. Do it.
*/
- if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
if ((ret = __bam_vrfy_itemorder(dbp,
vdp, h, pgno, 0, 1, 0, flags)) != 0)
@@ -1625,12 +1719,35 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
}
/*
+ * It's possible to get to this point with a page that has no
+ * items, but without having detected any sort of failure yet.
+ * Having zero items is legal if it's a leaf--it may be the
+ * root page in an empty tree, or the tree may have been
+ * modified with the DB_REVSPLITOFF flag set (there's no way
+ * to tell from what's on disk). For an internal page,
+ * though, having no items is a problem (all internal pages
+ * must have children).
+ */
+ if (isbad == 0 && ret == 0) {
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
+ goto err;
+
+ if (NUM_ENT(h) == 0 && ISINTERNAL(h)) {
+ EPRINT((dbp->dbenv,
+ "Page %lu: internal page is empty and should not be",
+ (u_long)pgno));
+ isbad = 1;
+ goto err;
+ }
+ }
+
+ /*
* Our parent has sent us BINTERNAL pointers to parent records
* so that we can verify our place with respect to them. If it's
* appropriate--we have a default sort function--verify this.
*/
if (isbad == 0 && ret == 0 && !LF_ISSET(DB_NOORDERCHK) && lp != NULL) {
- if (h == NULL && (ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if (h == NULL && (ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
/*
@@ -1662,7 +1779,7 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
if (LF_ISSET(ST_RECNUM) && nrecs != pip->rec_cnt && toplevel) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Bad record count on page %lu: got %lu, expected %lu",
+ "Page %lu: bad record count: has %lu records, claims %lu",
(u_long)pgno, (u_long)nrecs, (u_long)pip->rec_cnt));
}
@@ -1676,13 +1793,32 @@ done: if (F_ISSET(pip, VRFY_INCOMPLETE) && isbad == 0 && ret == 0) {
goto err;
if (p != 0) {
isbad = 1;
- EPRINT((dbp->dbenv, "Page %lu linked twice", (u_long)pgno));
+ EPRINT((dbp->dbenv, "Page %lu: linked twice", (u_long)pgno));
} else if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
goto err;
-err: if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+ if (toplevel)
+ /*
+ * The last page's next_pgno in the leaf chain should have been
+ * PGNO_INVALID.
+ */
+ if (vdp->next_pgno != PGNO_INVALID) {
+ EPRINT((dbp->dbenv, "Page %lu: unterminated leaf chain",
+ (u_long)vdp->prev_pgno));
+ isbad = 1;
+ }
+
+err: if (toplevel) {
+ /* Restore our caller's settings. */
+ vdp->next_pgno = next_pgno;
+ vdp->prev_pgno = prev_pgno;
+ vdp->leaf_type = leaf_type;
+ }
+
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
- if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
ret = t_ret;
@@ -1720,6 +1856,14 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
F_SET(&dbt, DB_DBT_MALLOC);
ret = 0;
+ /*
+ * Empty pages are sorted correctly by definition. We check
+ * to see whether they ought to be empty elsewhere; leaf
+ * pages legally may be.
+ */
+ if (NUM_ENT(h) == 0)
+ return (0);
+
switch (TYPE(h)) {
case P_IBTREE:
case P_LDUP:
@@ -1760,7 +1904,8 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
} else {
DB_ASSERT(0);
EPRINT((dbp->dbenv,
- "Unknown type for internal record"));
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
return (EINVAL);
}
@@ -1768,17 +1913,17 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
if ((ret = __bam_cmp(dbp, &dbt, h, 0, func, &cmp)) == 0) {
if (cmp > 0) {
EPRINT((dbp->dbenv,
- "First item on page %lu sorted greater than parent entry",
+ "Page %lu: first item on page sorted greater than parent entry",
(u_long)PGNO(h)));
ret = DB_VERIFY_BAD;
}
} else
EPRINT((dbp->dbenv,
- "First item on page %lu had comparison error",
+ "Page %lu: first item on page had comparison error",
(u_long)PGNO(h)));
if (dbt.data != lp->data)
- __os_free(dbt.data, 0);
+ __os_ufree(dbp->dbenv, dbt.data);
if (ret != 0)
return (ret);
}
@@ -1795,7 +1940,8 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
} else {
DB_ASSERT(0);
EPRINT((dbp->dbenv,
- "Unknown type for internal record"));
+ "Page %lu: unknown type for internal record",
+ (u_long)PGNO(h)));
return (EINVAL);
}
@@ -1803,17 +1949,17 @@ __bam_vrfy_treeorder(dbp, pgno, h, lp, rp, func, flags)
if ((ret = __bam_cmp(dbp, &dbt, h, last, func, &cmp)) == 0) {
if (cmp < 0) {
EPRINT((dbp->dbenv,
- "Last item on page %lu sorted greater than parent entry",
+ "Page %lu: last item on page sorted greater than parent entry",
(u_long)PGNO(h)));
ret = DB_VERIFY_BAD;
}
} else
EPRINT((dbp->dbenv,
- "Last item on page %lu had comparison error",
+ "Page %lu: last item on page had comparison error",
(u_long)PGNO(h)));
if (dbt.data != rp->data)
- __os_free(dbt.data, 0);
+ __os_ufree(dbp->dbenv, dbt.data);
}
return (ret);
@@ -1843,7 +1989,7 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
DBT dbt, unkdbt;
BKEYDATA *bk;
BOVERFLOW *bo;
- db_indx_t i, beg, end;
+ db_indx_t i, beg, end, *inp;
u_int32_t himark;
u_int8_t *pgmap;
void *ovflbuf;
@@ -1854,24 +2000,25 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
ovflbuf = pgmap = NULL;
err_ret = ret = 0;
+ inp = P_INP(dbp, h);
memset(&dbt, 0, sizeof(DBT));
dbt.flags = DB_DBT_REALLOC;
memset(&unkdbt, 0, sizeof(DBT));
- unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.size = (u_int32_t)(strlen("UNKNOWN") + 1);
unkdbt.data = "UNKNOWN";
/*
* Allocate a buffer for overflow items. Start at one page;
* __db_safe_goff will realloc as needed.
*/
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &ovflbuf)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &ovflbuf)) != 0)
return (ret);
if (LF_ISSET(DB_AGGRESSIVE)) {
if ((ret =
- __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &pgmap)) != 0)
+ __os_malloc(dbp->dbenv, dbp->pgsize, &pgmap)) != 0)
goto err;
memset(pgmap, 0, dbp->pgsize);
}
@@ -1914,7 +2061,7 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
* We only want to print deleted items if
* DB_AGGRESSIVE is set.
*/
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
if (!LF_ISSET(DB_AGGRESSIVE) && B_DISSET(bk->type))
continue;
@@ -1927,10 +2074,10 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
if (key != NULL &&
(i != 0 || !LF_ISSET(SA_SKIPFIRSTKEY)))
if ((ret = __db_prdbt(key,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
- beg = h->inp[i];
+ beg = inp[i];
switch (B_TYPE(bk->type)) {
case B_DUPLICATE:
end = beg + BOVERFLOW_SIZE - 1;
@@ -1958,23 +2105,24 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
(i % P_INDX == 0)) {
/* Not much to do on failure. */
if ((ret = __db_prdbt(&unkdbt, 0, " ",
- handle, callback, 0, NULL)) != 0)
+ handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
}
if ((ret = __db_salvage_duptree(dbp,
vdp, bo->pgno, &dbt, handle, callback,
- flags | SA_SKIPFIRSTKEY)) != 0)
+ flags | SA_SKIPFIRSTKEY)) != 0)
err_ret = ret;
break;
case B_KEYDATA:
- end = ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1;
+ end =
+ ALIGN(beg + bk->len, sizeof(u_int32_t)) - 1;
dbt.data = bk->data;
dbt.size = bk->len;
if ((ret = __db_prdbt(&dbt,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
case B_OVERFLOW:
@@ -1985,11 +2133,11 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
err_ret = ret;
/* We care about err_ret more. */
(void)__db_prdbt(&unkdbt, 0, " ",
- handle, callback, 0, NULL);
+ handle, callback, 0, vdp);
break;
}
if ((ret = __db_prdbt(&dbt,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
default:
@@ -2020,12 +2168,12 @@ __bam_salvage(dbp, vdp, pgno, pgtype, h, handle, callback, key, flags)
* a datum; fix this imbalance by printing an "UNKNOWN".
*/
if (pgtype == P_LBTREE && (i % P_INDX == 1) && ((ret =
- __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, NULL)) != 0))
+ __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0))
err_ret = ret;
err: if (pgmap != NULL)
- __os_free(pgmap, 0);
- __os_free(ovflbuf, 0);
+ __os_free(dbp->dbenv, pgmap);
+ __os_free(dbp->dbenv, ovflbuf);
/* Mark this page as done. */
if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
@@ -2061,12 +2209,13 @@ __bam_salvage_walkdupint(dbp, vdp, h, key, handle, callback, flags)
for (i = 0; i < NUM_ENT(h); i++) {
switch (TYPE(h)) {
case P_IBTREE:
- bi = GET_BINTERNAL(h, i);
+ bi = GET_BINTERNAL(dbp, h, i);
if ((t_ret = __db_salvage_duptree(dbp,
vdp, bi->pgno, key, handle, callback, flags)) != 0)
ret = t_ret;
+ break;
case P_IRECNO:
- ri = GET_RINTERNAL(h, i);
+ ri = GET_RINTERNAL(dbp, h, i);
if ((t_ret = __db_salvage_duptree(dbp,
vdp, ri->pgno, key, handle, callback, flags)) != 0)
ret = t_ret;
@@ -2110,11 +2259,13 @@ __bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
DB *pgset;
{
BINTERNAL *bi;
+ DB_MPOOLFILE *mpf;
PAGE *h;
RINTERNAL *ri;
db_pgno_t current, p;
int err_ret, ret;
+ mpf = dbp->mpf;
h = NULL;
ret = err_ret = 0;
DB_ASSERT(pgset != NULL);
@@ -2123,7 +2274,7 @@ __bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
err_ret = DB_VERIFY_BAD;
goto err;
}
- if ((ret = memp_fget(dbp->mpf, &current, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &current, 0, &h)) != 0) {
err_ret = ret;
goto err;
}
@@ -2137,10 +2288,10 @@ __bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
goto err;
}
if (TYPE(h) == P_IBTREE) {
- bi = GET_BINTERNAL(h, 0);
+ bi = GET_BINTERNAL(dbp, h, 0);
current = bi->pgno;
} else { /* P_IRECNO */
- ri = GET_RINTERNAL(h, 0);
+ ri = GET_RINTERNAL(dbp, h, 0);
current = ri->pgno;
}
break;
@@ -2152,7 +2303,7 @@ __bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
goto err;
}
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
err_ret = ret;
h = NULL;
}
@@ -2163,8 +2314,7 @@ __bam_meta2pgset(dbp, vdp, btmeta, flags, pgset)
*/
traverse:
while (IS_VALID_PGNO(current) && current != PGNO_INVALID) {
- if (h == NULL &&
- (ret = memp_fget(dbp->mpf, &current, 0, &h) != 0)) {
+ if (h == NULL && (ret = mpf->get(mpf, &current, 0, &h)) != 0) {
err_ret = ret;
break;
}
@@ -2184,13 +2334,13 @@ traverse:
goto err;
current = NEXT_PGNO(h);
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
err_ret = ret;
h = NULL;
}
err: if (h != NULL)
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
return (ret == 0 ? err_ret : ret);
}
@@ -2218,7 +2368,7 @@ __bam_safe_getdata(dbp, h, i, ovflok, dbt, freedbtp)
memset(dbt, 0, sizeof(DBT));
*freedbtp = 0;
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
if (B_TYPE(bk->type) == B_OVERFLOW) {
if (!ovflok)
return (0);
diff --git a/bdb/btree/btree.src b/bdb/btree/btree.src
index a1eba7d7fc7..73f4abac874 100644
--- a/bdb/btree/btree.src
+++ b/bdb/btree/btree.src
@@ -1,13 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: btree.src,v 10.26 2000/12/12 17:40:23 bostic Exp $
+ * $Id: btree.src,v 10.35 2002/04/17 19:02:56 krinsky Exp $
*/
-PREFIX bam
+PREFIX __bam
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -15,69 +16,23 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "btree.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/btree.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
- * BTREE-pg_alloc: used to record allocating a new page.
- *
- * meta_lsn: the meta-data page's original lsn.
- * page_lsn: the allocated page's original lsn.
- * pgno: the page allocated.
- * next: the next page on the free list.
+ * NOTE: pg_alloc and pg_free have been moved to db.src, where they belong.
*/
-BEGIN pg_alloc 51
-ARG fileid int32_t ld
-POINTER meta_lsn DB_LSN * lu
-POINTER page_lsn DB_LSN * lu
-ARG pgno db_pgno_t lu
-ARG ptype u_int32_t lu
-ARG next db_pgno_t lu
-END
-
-DEPRECATED pg_alloc1 60
-ARG fileid int32_t ld
-POINTER meta_lsn DB_LSN * lu
-POINTER alloc_lsn DB_LSN * lu
-POINTER page_lsn DB_LSN * lu
-ARG pgno db_pgno_t lu
-ARG ptype u_int32_t lu
-ARG next db_pgno_t lu
-END
-
-/*
- * BTREE-pg_free: used to record freeing a page.
- *
- * pgno: the page being freed.
- * meta_lsn: the meta-data page's original lsn.
- * header: the header from the free'd page.
- * next: the previous next pointer on the metadata page.
- */
-BEGIN pg_free 52
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-POINTER meta_lsn DB_LSN * lu
-DBT header DBT s
-ARG next db_pgno_t lu
-END
-
-DEPRECATED pg_free1 61
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-POINTER meta_lsn DB_LSN * lu
-POINTER alloc_lsn DB_LSN * lu
-DBT header DBT s
-ARG next db_pgno_t lu
-END
/*
* BTREE-split: used to log a page split.
@@ -89,46 +44,21 @@ END
* indx: the number of entries that went to the left page.
* npgno: the next page number
* nlsn: the next page's original LSN (or 0 if no next page).
- * pg: the split page's contents before the split.
- */
-DEPRECATED split1 53
-ARG fileid int32_t ld
-ARG left db_pgno_t lu
-POINTER llsn DB_LSN * lu
-ARG right db_pgno_t lu
-POINTER rlsn DB_LSN * lu
-ARG indx u_int32_t lu
-ARG npgno db_pgno_t lu
-POINTER nlsn DB_LSN * lu
-DBT pg DBT s
-END
-
-/*
- * BTREE-split: used to log a page split.
- *
- * left: the page number for the low-order contents.
- * llsn: the left page's original LSN.
- * right: the page number for the high-order contents.
- * rlsn: the right page's original LSN.
- * indx: the number of entries that went to the left page.
- * npgno: the next page number
- * npgno: the next page number
- * nlsn: the next page's original LSN (or 0 if no next page).
* root_pgno: the root page number
* pg: the split page's contents before the split.
* opflags: SPL_NRECS: if splitting a tree that maintains a record count.
*/
BEGIN split 62
-ARG fileid int32_t ld
-ARG left db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK left db_pgno_t lu
POINTER llsn DB_LSN * lu
-ARG right db_pgno_t lu
+WRLOCK right db_pgno_t lu
POINTER rlsn DB_LSN * lu
ARG indx u_int32_t lu
ARG npgno db_pgno_t lu
POINTER nlsn DB_LSN * lu
-ARG root_pgno db_pgno_t lu
-DBT pg DBT s
+WRLOCKNZ root_pgno db_pgno_t lu
+PGDBT pg DBT s
ARG opflags u_int32_t lu
END
@@ -137,34 +67,16 @@ END
*
* pgno: the page number of the page copied over the root.
* pgdbt: the page being copied on the root page.
- * nrec: the tree's record count.
- * rootent: last entry on the root page.
- * rootlsn: the root page's original lsn.
- */
-DEPRECATED rsplit1 54
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-DBT pgdbt DBT s
-ARG nrec db_pgno_t lu
-DBT rootent DBT s
-POINTER rootlsn DB_LSN * lu
-END
-
-/*
- * BTREE-rsplit: used to log a reverse-split
- *
- * pgno: the page number of the page copied over the root.
- * pgdbt: the page being copied on the root page.
* root_pgno: the root page number.
* nrec: the tree's record count.
* rootent: last entry on the root page.
* rootlsn: the root page's original lsn.
*/
BEGIN rsplit 63
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-DBT pgdbt DBT s
-ARG root_pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT pgdbt DBT s
+WRLOCK root_pgno db_pgno_t lu
ARG nrec db_pgno_t lu
DBT rootent DBT s
POINTER rootlsn DB_LSN * lu
@@ -180,8 +92,8 @@ END
* is_insert: 0 if a delete, 1 if an insert.
*/
BEGIN adj 55
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER lsn DB_LSN * lu
ARG indx u_int32_t lu
ARG indx_copy u_int32_t lu
@@ -198,8 +110,8 @@ END
* opflags: CAD_UPDATEROOT: if root page count was adjusted.
*/
BEGIN cadjust 56
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER lsn DB_LSN * lu
ARG indx u_int32_t lu
ARG adjust int32_t ld
@@ -214,8 +126,8 @@ END
* indx: the index to be deleted.
*/
BEGIN cdel 57
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER lsn DB_LSN * lu
ARG indx u_int32_t lu
END
@@ -230,8 +142,8 @@ END
* duplicate: the prefix of the replacement that matches the original.
*/
BEGIN repl 58
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER lsn DB_LSN * lu
ARG indx u_int32_t lu
ARG isdeleted u_int32_t lu
@@ -245,9 +157,9 @@ END
* BTREE-root: log the assignment of a root btree page.
*/
BEGIN root 59
-ARG fileid int32_t ld
-ARG meta_pgno db_pgno_t lu
-ARG root_pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK meta_pgno db_pgno_t lu
+WRLOCK root_pgno db_pgno_t lu
POINTER meta_lsn DB_LSN * lu
END
@@ -260,7 +172,7 @@ END
*/
BEGIN curadj 64
/* Fileid of db affected. */
-ARG fileid int32_t ld
+DB fileid int32_t ld
/* Which adjustment. */
ARG mode db_ca_mode ld
/* Page entry is from. */
@@ -284,7 +196,7 @@ END
*/
BEGIN rcuradj 65
/* Fileid of db affected. */
-ARG fileid int32_t ld
+DB fileid int32_t ld
/* Which adjustment. */
ARG mode ca_recno_arg ld
/* Root page number. */
diff --git a/bdb/build_vxworks/BerkeleyDB.wpj b/bdb/build_vxworks/BerkeleyDB.wpj
index fa8aa61c14e..45b15a62687 100644
--- a/bdb/build_vxworks/BerkeleyDB.wpj
+++ b/bdb/build_vxworks/BerkeleyDB.wpj
@@ -1,26 +1,26 @@
Document file - DO NOT EDIT
-<BEGIN> BUILD_PENTIUM_RPCdebug_BUILDRULE
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
BerkeleyDB.out
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_AR
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
ar386
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_ARCHIVE
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_AS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
cc386
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CC
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
cc386
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CFLAGS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
-g \
-mpentium \
-ansi \
@@ -36,13 +36,12 @@ cc386
-DCPU=PENTIUM \
-O0 \
-I$(PRJ_DIR) \
- -I/export/home/db/include \
+ -I$(PRJ_DIR)/.. \
-DDIAGNOSTIC \
- -DDEBUG \
- -DHAVE_RPC
+ -DDEBUG
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CFLAGS_AS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
-g \
-mpentium \
-ansi \
@@ -59,77 +58,77 @@ cc386
-DCPU=PENTIUM
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_CPP
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
cc386 -E -P -xc
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LD
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
ld386
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LDFLAGS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
-X -N
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_LD_PARTIAL_FLAGS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
-X -r
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_NM
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
nm386 -g
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_OPTION_DEFINE_MACRO
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
-D
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_OPTION_INCLUDE_DIR
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
-I
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_POST_BUILD_RULE
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_PRJ_LIBS
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_MACRO_SIZE
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
size386
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_RO_DEPEND_PATH
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
{$(WIND_BASE)/target/h/} \
{$(WIND_BASE)/target/src/} \
{$(WIND_BASE)/target/config/}
<END>
-<BEGIN> BUILD_PENTIUM_RPCdebug_TC
+<BEGIN> BUILD_PENTIUM_debug_TC
::tc_PENTIUMgnu
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_BUILDRULE
+<BEGIN> BUILD_PENTIUM_release_BUILDRULE
BerkeleyDB.out
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_AR
+<BEGIN> BUILD_PENTIUM_release_MACRO_AR
ar386
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_ARCHIVE
+<BEGIN> BUILD_PENTIUM_release_MACRO_ARCHIVE
$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_AS
+<BEGIN> BUILD_PENTIUM_release_MACRO_AS
cc386
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CC
+<BEGIN> BUILD_PENTIUM_release_MACRO_CC
cc386
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CFLAGS
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS
-mpentium \
-ansi \
-nostdinc \
@@ -144,11 +143,10 @@ cc386
-DCPU=PENTIUM \
-O2 \
-I$(PRJ_DIR) \
- -I/export/home/db/include \
- -DHAVE_RPC
+ -I$(PRJ_DIR)/..
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CFLAGS_AS
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS_AS
-g \
-mpentium \
-ansi \
@@ -165,5902 +163,3344 @@ cc386
-DCPU=PENTIUM
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_CPP
+<BEGIN> BUILD_PENTIUM_release_MACRO_CPP
cc386 -E -P -xc
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LD
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD
ld386
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LDDEPS
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDDEPS
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LDFLAGS
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDFLAGS
-X -N
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_LD_PARTIAL_FLAGS
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS
-X -r
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_NM
+<BEGIN> BUILD_PENTIUM_release_MACRO_NM
nm386 -g
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_OPTION_DEFINE_MACRO
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO
-D
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_OPTION_INCLUDE_DIR
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR
-I
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_POST_BUILD_RULE
+<BEGIN> BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_PRJ_LIBS
+<BEGIN> BUILD_PENTIUM_release_MACRO_PRJ_LIBS
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_MACRO_SIZE
+<BEGIN> BUILD_PENTIUM_release_MACRO_SIZE
size386
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_RO_DEPEND_PATH
+<BEGIN> BUILD_PENTIUM_release_RO_DEPEND_PATH
{$(WIND_BASE)/target/h/} \
{$(WIND_BASE)/target/src/} \
{$(WIND_BASE)/target/config/}
<END>
-<BEGIN> BUILD_PENTIUM_RPCnodebug_TC
+<BEGIN> BUILD_PENTIUM_release_TC
::tc_PENTIUMgnu
<END>
-<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
-BerkeleyDB.out
-<END>
+<BEGIN> BUILD_RULE_BerkeleyDB.out
-<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
-ar386
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
-<END>
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
-<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
-cc386
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
-cc386
+<BEGIN> BUILD_RULE_archive
+
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -O0 \
- -I$(PRJ_DIR) \
- -I/export/home/db/include \
- -DDIAGNOSTIC \
- -DDEBUG
+<BEGIN> BUILD_RULE_objects
+
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
+<BEGIN> BUILD__CURRENT
+PENTIUM_debug
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
-cc386 -E -P -xc
+<BEGIN> BUILD__LIST
+PENTIUM_release PENTIUM_debug
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
-ld386
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
--X -N
+<BEGIN> CORE_INFO_VERSION
+2.0
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
--X -r
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
-nm386 -g
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
--D
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_objects
+bt_compare.o
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
--I
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_compare.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependDone
+TRUE
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_conv.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
-size386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_debug_TC
-::tc_PENTIUMgnu
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_objects
+bt_curadj.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_BUILDRULE
-BerkeleyDB.out
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_curadj.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_AR
-ar386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_AS
-cc386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_objects
+bt_cursor.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CC
-cc386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_cursor.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CFLAGS
--mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -O2 \
- -I$(PRJ_DIR) \
- -I/export/home/db/include
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_CPP
-cc386 -E -P -xc
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_objects
+bt_delete.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LD
-ld386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_delete.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LDFLAGS
--X -N
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_LD_PARTIAL_FLAGS
--X -r
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_NM
-nm386 -g
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_objects
+bt_method.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_OPTION_DEFINE_MACRO
--D
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_method.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_OPTION_INCLUDE_DIR
--I
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_POST_BUILD_RULE
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_objects
+bt_open.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_PRJ_LIBS
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_open.c_tool
+C/C++ compiler
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_MACRO_SIZE
-size386
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_objects
+bt_put.o
<END>
-<BEGIN> BUILD_PENTIUM_nodebug_TC
-::tc_PENTIUMgnu
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_put.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_RULE_BerkeleyDB.out
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependDone
+TRUE
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rec.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_RULE_archive
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependDone
+TRUE
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_RULE_objects
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_reclaim.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/BerkeleyDB.a
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_objects
+bt_recno.o
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_recno.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS \
- -O2 \
- -I$(PRJ_DIR) \
- -I/export/home/db/include
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_objects
+bt_rsearch.o
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_rsearch.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_objects
+bt_search.o
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_search.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_objects
+bt_split.o
<END>
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_split.c_tool
+C/C++ compiler
<END>
-<BEGIN> BUILD__CURRENT
-PENTIUM_RPCdebug
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependDone
+TRUE
<END>
-<BEGIN> BUILD__LIST
-PENTIUM_nodebug \
- PENTIUM_RPCdebug \
- PENTIUM_RPCnodebug \
- PENTIUM_debug \
- SIMSPARCSOLARISgnu
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_objects
+bt_stat.o
<END>
-<BEGIN> CORE_INFO_VERSION
-2.0
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_stat.c_tool
+C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_compare.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_compare.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_compare.c_objects
-bt_compare.o
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_objects
+bt_upgrade.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_compare.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_upgrade.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_conv.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_conv.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_conv.c_objects
-bt_conv.o
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_objects
+bt_verify.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_conv.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../btree/bt_verify.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_objects
-bt_curadj.o
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_objects
+btree_auto.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_curadj.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../btree/btree_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_objects
-bt_cursor.o
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_objects
+getopt.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_cursor.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../clib/getopt.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_delete.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_delete.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_delete.c_objects
-bt_delete.o
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_objects
+snprintf.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_delete.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../clib/snprintf.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_method.c_objects
-bt_method.o
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_objects
+strcasecmp.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strcasecmp.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_open.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_open.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_open.c_objects
-bt_open.o
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_objects
+strdup.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_open.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../clib/strdup.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_put.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_put.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_put.c_objects
-bt_put.o
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_objects
+vsnprintf.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_put.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../clib/vsnprintf.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rec.c_objects
-bt_rec.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_objects
+db_byteorder.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_byteorder.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_objects
-bt_reclaim.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_objects
+db_err.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_reclaim.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_err.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_recno.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_recno.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_recno.c_objects
-bt_recno.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_objects
+db_getlong.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_recno.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_getlong.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_objects
-bt_rsearch.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_objects
+db_idspace.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_rsearch.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_idspace.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_search.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_search.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_search.c_objects
-bt_search.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_objects
+db_log2.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_search.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/db_log2.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_split.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_split.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_split.c_objects
-bt_split.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_objects
+util_arg.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_split.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_arg.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_stat.c_objects
-bt_stat.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_objects
+util_cache.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_cache.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_upgrade.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_objects
-bt_upgrade.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_objects
+util_log.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_upgrade.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_log.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_verify.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_verify.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_verify.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_verify.c_objects
-bt_verify.o
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_objects
+util_sig.o
<END>
-<BEGIN> FILE_/export/home/db/btree/bt_verify.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../common/util_sig.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/btree/btree_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/btree/btree_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/btree/btree_auto.c_objects
-btree_auto.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_objects
+crdel_auto.o
<END>
-<BEGIN> FILE_/export/home/db/btree/btree_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/clib/getopt.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/clib/getopt.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/clib/getopt.c_objects
-getopt.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_objects
+crdel_rec.o
<END>
-<BEGIN> FILE_/export/home/db/clib/getopt.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/crdel_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/clib/snprintf.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/clib/snprintf.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/clib/snprintf.c_objects
-snprintf.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_objects
+db.o
<END>
-<BEGIN> FILE_/export/home/db/clib/snprintf.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_dependencies
-$(PRJ_DIR)/db_config.h
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_objects
-strcasecmp.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_objects
+db_am.o
<END>
-<BEGIN> FILE_/export/home/db/clib/strcasecmp.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_am.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_objects
-vsnprintf.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_objects
+db_auto.o
<END>
-<BEGIN> FILE_/export/home/db/clib/vsnprintf.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/db_byteorder.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/db_byteorder.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/db_byteorder.c_objects
-db_byteorder.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_objects
+db_cam.o
<END>
-<BEGIN> FILE_/export/home/db/common/db_byteorder.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_cam.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/db_err.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/db_err.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/clib_ext.h \
- /export/home/db/include/db_auto.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/db_err.c_objects
-db_err.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_objects
+db_conv.o
<END>
-<BEGIN> FILE_/export/home/db/common/db_err.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_conv.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/db_getlong.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/db_getlong.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/clib_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/db_getlong.c_objects
-db_getlong.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_objects
+db_dispatch.o
<END>
-<BEGIN> FILE_/export/home/db/common/db_getlong.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dispatch.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/db_log2.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/db_log2.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/db_log2.c_objects
-db_log2.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_objects
+db_dup.o
<END>
-<BEGIN> FILE_/export/home/db/common/db_log2.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_dup.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/util_log.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/util_log.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/util_log.c_objects
-util_log.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_objects
+db_iface.o
<END>
-<BEGIN> FILE_/export/home/db/common/util_log.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_iface.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/common/util_sig.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/common/util_sig.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/common/util_sig.c_objects
-util_sig.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_objects
+db_join.o
<END>
-<BEGIN> FILE_/export/home/db/common/util_sig.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_join.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_auto.c_objects
-crdel_auto.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_objects
+db_meta.o
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_meta.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_rec.c_objects
-crdel_rec.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_objects
+db_method.o
<END>
-<BEGIN> FILE_/export/home/db/db/crdel_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
-<END>
-
-<BEGIN> FILE_/export/home/db/db/db.c_objects
-db.o
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> FILE_/export/home/db/db/db_am.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/db/db_am.c_dependencies
-$(PRJ_DIR)/db_config.h \
- $(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
-<END>
-
-<BEGIN> FILE_/export/home/db/db/db_am.c_objects
-db_am.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_objects
+db_open.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_am.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_open.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_auto.c_objects
-db_auto.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_objects
+db_overflow.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_overflow.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_cam.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_cam.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_cam.c_objects
-db_cam.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_objects
+db_pr.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_cam.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_pr.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_conv.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_conv.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_conv.c_objects
-db_conv.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_objects
+db_rec.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_conv.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_dispatch.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_dispatch.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_dispatch.c_objects
-db_dispatch.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_objects
+db_reclaim.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_dispatch.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_reclaim.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_dup.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_dup.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_dup.c_objects
-db_dup.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_objects
+db_remove.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_dup.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_remove.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_iface.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_iface.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_iface.c_objects
-db_iface.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_objects
+db_rename.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_iface.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_rename.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_join.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_join.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_join.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_join.c_objects
-db_join.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_objects
+db_ret.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_join.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_ret.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_meta.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_meta.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_meta.c_objects
-db_meta.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_objects
+db_truncate.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_meta.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_truncate.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_method.c_objects
-db_method.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_objects
+db_upg.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_overflow.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_overflow.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_verify.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_overflow.c_objects
-db_overflow.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_objects
+db_upg_opd.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_overflow.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_upg_opd.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_pr.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_pr.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/db_verify.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_pr.c_objects
-db_pr.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_objects
+db_vrfy.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_pr.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfy.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_rec.c_objects
-db_rec.o
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_objects
+db_vrfyutil.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../db/db_vrfyutil.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_reclaim.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_reclaim.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_reclaim.c_objects
-db_reclaim.o
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_objects
+dbreg.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_reclaim.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_ret.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_ret.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_ret.c_objects
-db_ret.o
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_ret.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg.c_objects
-db_upg.o
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_objects
-db_upg_opd.o
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_objects
+dbreg_util.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_upg_opd.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../dbreg/dbreg_util.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfy.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfy.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/db_verify.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfy.c_objects
-db_vrfy.o
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_objects
+db_salloc.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfy.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_salloc.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_verify.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_objects
-db_vrfyutil.o
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_objects
+db_shash.o
<END>
-<BEGIN> FILE_/export/home/db/db/db_vrfyutil.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/db_shash.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/env/db_salloc.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/env/db_salloc.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/env/db_salloc.c_objects
-db_salloc.o
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_objects
+env_file.o
<END>
-<BEGIN> FILE_/export/home/db/env/db_salloc.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_file.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/env/db_shash.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/env/db_shash.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/env/db_shash.c_objects
-db_shash.o
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_objects
+env_method.o
<END>
-<BEGIN> FILE_/export/home/db/env/db_shash.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/env/env_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/env/env_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/env/env_method.c_objects
-env_method.o
-<END>
-
-<BEGIN> FILE_/export/home/db/env/env_method.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> FILE_/export/home/db/env/env_open.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/env/env_open.c_dependencies
-$(PRJ_DIR)/db_config.h \
- $(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/clib_ext.h
-<END>
-
-<BEGIN> FILE_/export/home/db/env/env_open.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_objects
env_open.o
<END>
-<BEGIN> FILE_/export/home/db/env/env_open.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_open.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/env/env_recover.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/env/env_recover.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/env/env_recover.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_objects
env_recover.o
<END>
-<BEGIN> FILE_/export/home/db/env/env_recover.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_recover.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/env/env_region.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/env/env_region.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/env/env_region.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_objects
env_region.o
<END>
-<BEGIN> FILE_/export/home/db/env/env_region.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../env/env_region.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash.c_objects
-hash.o
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_objects
+fileops_auto.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fileops_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_auto.c_objects
-hash_auto.o
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_objects
+fop_basic.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_basic.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_conv.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_conv.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_conv.c_objects
-hash_conv.o
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_objects
+fop_rec.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_conv.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_dup.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_dup.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_dup.c_objects
-hash_dup.o
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_objects
+fop_util.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_dup.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../fileops/fop_util.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_func.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_func.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_func.c_objects
-hash_func.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_objects
+hash.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_func.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_meta.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_meta.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_meta.c_objects
-hash_meta.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_objects
+hash_auto.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_meta.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_method.c_objects
-hash_method.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_objects
+hash_conv.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_conv.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_page.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_page.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_page.c_objects
-hash_page.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_objects
+hash_dup.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_page.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_dup.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_rec.c_objects
-hash_rec.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_objects
+hash_func.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_func.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_objects
-hash_reclaim.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_objects
+hash_meta.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_reclaim.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_meta.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_stat.c_objects
-hash_stat.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_objects
+hash_method.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_upgrade.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_objects
-hash_upgrade.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_objects
+hash_open.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_upgrade.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_open.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_verify.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_verify.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_verify.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_verify.c_objects
-hash_verify.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_objects
+hash_page.o
<END>
-<BEGIN> FILE_/export/home/db/hash/hash_verify.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_page.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_objects
-hsearch.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_objects
+hash_rec.o
<END>
-<BEGIN> FILE_/export/home/db/hsearch/hsearch.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/include/tcl_db.h_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_reclaim.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock.c_objects
-lock.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_objects
+hash_stat.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_stat.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_objects
-lock_conflict.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_objects
+hash_upgrade.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_conflict.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_upgrade.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_objects
-lock_deadlock.o
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_objects
+hash_verify.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_deadlock.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hash/hash_verify.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_method.c_objects
-lock_method.o
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_objects
+hmac.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/hmac.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_region.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_region.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_region.c_objects
-lock_region.o
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_objects
+sha1.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_region.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hmac/sha1.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_stat.c_objects
-lock_stat.o
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_objects
+hsearch.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../hsearch/hsearch.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_util.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_util.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_util.c_objects
-lock_util.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_objects
+lock.o
<END>
-<BEGIN> FILE_/export/home/db/lock/lock_util.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log.c_objects
-log.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_objects
+lock_deadlock.o
<END>
-<BEGIN> FILE_/export/home/db/log/log.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_deadlock.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_archive.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_archive.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/clib_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_archive.c_objects
-log_archive.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_objects
+lock_method.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_archive.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_auto.c_objects
-log_auto.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_objects
+lock_region.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_region.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_compare.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_compare.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_compare.c_objects
-log_compare.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_objects
+lock_stat.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_compare.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_stat.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_findckp.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_findckp.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_findckp.c_objects
-log_findckp.o
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_objects
+lock_util.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_findckp.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../lock/lock_util.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_get.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_get.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_get.c_objects
-log_get.o
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_objects
+log.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_get.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../log/log.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_method.c_objects
-log_method.o
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_objects
+log_archive.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_archive.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_put.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_put.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/hash.h \
- /export/home/db/include/hash_auto.h \
- /export/home/db/include/hash_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/clib_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_put.c_objects
-log_put.o
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_objects
+log_compare.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_put.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_compare.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_rec.c_objects
-log_rec.o
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_get.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_objects
+log_method.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/log/log_register.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/log/log_register.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/log/log_register.c_objects
-log_register.o
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_objects
+log_put.o
<END>
-<BEGIN> FILE_/export/home/db/log/log_register.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../log/log_put.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_objects
mp_alloc.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_alloc.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_alloc.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_bh.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_bh.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/db_page.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_bh.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_objects
mp_bh.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_bh.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_bh.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fget.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fget.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fget.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_objects
mp_fget.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fget.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fget.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_objects
mp_fopen.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fopen.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fopen.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fput.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fput.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fput.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_objects
mp_fput.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fput.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fput.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fset.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fset.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fset.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_objects
mp_fset.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_fset.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_fset.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_method.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_objects
mp_method.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_region.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_region.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_region.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_objects
mp_region.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_region.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_region.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_register.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_register.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_register.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_objects
mp_register.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_register.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_register.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_stat.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_objects
mp_stat.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_stat.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_sync.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_sync.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_sync.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_objects
mp_sync.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_sync.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_sync.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_objects
mp_trickle.o
<END>
-<BEGIN> FILE_/export/home/db/mp/mp_trickle.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mp/mp_trickle.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_objects
mut_tas.o
<END>
-<BEGIN> FILE_/export/home/db/mutex/mut_tas.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mut_tas.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/mutex/mutex.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/mutex/mutex.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/mutex/mutex.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_objects
mutex.o
<END>
-<BEGIN> FILE_/export/home/db/mutex/mutex.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../mutex/mutex.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_alloc.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_alloc.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_alloc.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_objects
os_alloc.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_alloc.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_alloc.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_dir.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_dir.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_objects
+os_clock.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_dir.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_clock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_objects
os_dir.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_dir.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_dir.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_errno.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_errno.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_errno.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_objects
os_errno.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_errno.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_errno.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_fid.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_fid.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_fid.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_objects
os_fid.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_fid.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fid.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_fsync.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_fsync.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_fsync.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_objects
os_fsync.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_fsync.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_fsync.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_handle.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_handle.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_handle.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_objects
os_handle.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_handle.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_handle.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_id.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_method.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_objects
os_method.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_oflags.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_oflags.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_oflags.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_objects
os_oflags.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_oflags.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_oflags.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_open.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_open.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_open.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_objects
os_open.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_open.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_open.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_region.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_region.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_region.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_objects
os_region.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_region.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_region.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_rename.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_rename.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_rename.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_objects
os_rename.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_rename.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rename.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_root.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_root.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_root.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_objects
os_root.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_root.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_root.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_rpath.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_rpath.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_rpath.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_objects
os_rpath.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_rpath.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rpath.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_rw.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_rw.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_rw.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_objects
os_rw.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_rw.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_rw.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_seek.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_seek.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_seek.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_objects
os_seek.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_seek.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_seek.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_sleep.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_sleep.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_sleep.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_objects
os_sleep.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_sleep.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_sleep.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_spin.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_spin.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_spin.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_objects
os_spin.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_spin.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_spin.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_stat.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_objects
os_stat.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_stat.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_objects
os_tmpdir.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_tmpdir.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_tmpdir.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os/os_unlink.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os/os_unlink.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/os_jump.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os/os_unlink.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_objects
os_unlink.o
<END>
-<BEGIN> FILE_/export/home/db/os/os_unlink.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os/os_unlink.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_objects
-os_abs.o
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_abs.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_abs.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_objects
-os_finit.o
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_finit.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_config.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_objects
-os_map.o
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
<END>
-<BEGIN> FILE_/export/home/db/os_vxworks/os_map.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../os_vxworks/os_vx_map.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_objects
qam.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_auto.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_objects
qam_auto.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_conv.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_conv.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_conv.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_objects
qam_conv.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_conv.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_conv.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_files.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_files.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/btree.h \
- /export/home/db/include/btree_auto.h \
- /export/home/db/include/btree_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_files.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_objects
qam_files.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_files.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_files.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_method.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_method.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_method.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_objects
qam_method.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_method.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_method.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_open.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_open.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_open.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_objects
qam_open.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_open.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_open.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_rec.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_objects
qam_rec.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_rec.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_stat.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_stat.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_stat.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_objects
qam_stat.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_stat.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_stat.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_swap.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/db_upgrade.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_objects
qam_upgrade.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_upgrade.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_upgrade.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_verify.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_verify.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_verify.h \
- /export/home/db/include/qam.h \
- /export/home/db/include/qam_auto.h \
- /export/home/db/include/qam_ext.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_verify.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_objects
qam_verify.o
<END>
-<BEGIN> FILE_/export/home/db/qam/qam_verify.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../qam/qam_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_record.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_region.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../rep/rep_util.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/client.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/client.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/client.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_objects
client.o
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/client.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/client.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_dependencies
-$(PRJ_DIR)/db_config.h /export/home/db/include/db_server.h
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_objects
db_server_clnt.o
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/db_server_clnt.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/db_server_clnt.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/mp.h \
- /export/home/db/include/mp_ext.h \
- /export/home/db/include/rpc_client_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/gen_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_objects
gen_client.o
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_objects
gen_client_ret.o
<END>
-<BEGIN> FILE_/export/home/db/rpc_client/gen_client_ret.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_client/gen_client_ret.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_dependencies
-$(PRJ_DIR)/db_config.h /export/home/db/include/db_server.h
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_objects
db_server_xdr.o
<END>
-<BEGIN> FILE_/export/home/db/rpc_server/db_server_xdr.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../rpc_server/c/db_server_xdr.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/txn/txn.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/txn/txn.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_shash.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/lock.h \
- /export/home/db/include/lock_ext.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/txn/txn.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_objects
txn.o
<END>
-<BEGIN> FILE_/export/home/db/txn/txn.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_auto.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_auto.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_auto.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_objects
txn_auto.o
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_auto.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_auto.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_rec.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_rec.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_objects
+txn_method.o
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_rec.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_method.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_objects
txn_rec.o
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_rec.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_rec.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_recover.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_region.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_region.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_dependencies
$(PRJ_DIR)/db_config.h \
- /export/home/db/include/db_server.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h \
- /export/home/db/include/gen_client_ext.h \
- /export/home/db/include/rpc_client_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_region.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_objects
txn_region.o
<END>
-<BEGIN> FILE_/export/home/db/txn/txn_region.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_region.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/xa/xa.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/xa/xa.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/db_page.h \
- /export/home/db/include/log.h \
- /export/home/db/include/log_auto.h \
- /export/home/db/include/log_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/db_am.h \
- /export/home/db/include/db_dispatch.h \
- /export/home/db/include/db_auto.h \
- /export/home/db/include/crdel_auto.h \
- /export/home/db/include/db_ext.h
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_dependDone
+TRUE
<END>
-<BEGIN> FILE_/export/home/db/xa/xa.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../txn/txn_util.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_dependencies
+$(PRJ_DIR)/db_config.h \
+ $(PRJ_DIR)/db_int.h \
+ $(PRJ_DIR)/db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_objects
xa.o
<END>
-<BEGIN> FILE_/export/home/db/xa/xa.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_db.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_db.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/xa_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_db.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_objects
xa_db.o
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_db.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_db.c_tool
C/C++ compiler
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_map.c_dependDone
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_dependDone
TRUE
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_map.c_dependencies
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_dependencies
$(PRJ_DIR)/db_config.h \
$(PRJ_DIR)/db_int.h \
- $(PRJ_DIR)/db.h \
- /export/home/db/include/queue.h \
- /export/home/db/include/shqueue.h \
- /export/home/db/include/debug.h \
- /export/home/db/include/mutex.h \
- /export/home/db/include/region.h \
- /export/home/db/include/mutex_ext.h \
- /export/home/db/include/env_ext.h \
- /export/home/db/include/os.h \
- /export/home/db/include/os_ext.h \
- /export/home/db/include/common_ext.h \
- /export/home/db/include/txn.h \
- /export/home/db/include/xa.h \
- /export/home/db/include/txn_auto.h \
- /export/home/db/include/txn_ext.h \
- /export/home/db/include/xa_ext.h
+ $(PRJ_DIR)/db.h
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_map.c_objects
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_objects
xa_map.o
<END>
-<BEGIN> FILE_/export/home/db/xa/xa_map.c_tool
+<BEGIN> FILE_$(PRJ_DIR)/../xa/xa_map.c_tool
C/C++ compiler
<END>
<BEGIN> PROJECT_FILES
-/export/home/db/hsearch/hsearch.c \
- /export/home/db/mp/mp_trickle.c \
- /export/home/db/mp/mp_bh.c \
- /export/home/db/mp/mp_fget.c \
- /export/home/db/mp/mp_fopen.c \
- /export/home/db/mp/mp_fput.c \
- /export/home/db/mp/mp_fset.c \
- /export/home/db/mp/mp_method.c \
- /export/home/db/mp/mp_region.c \
- /export/home/db/mp/mp_register.c \
- /export/home/db/mp/mp_stat.c \
- /export/home/db/mp/mp_sync.c \
- /export/home/db/mp/mp_alloc.c \
- /export/home/db/db/crdel_rec.c \
- /export/home/db/db/db.c \
- /export/home/db/db/db_am.c \
- /export/home/db/db/db_auto.c \
- /export/home/db/db/db_cam.c \
- /export/home/db/db/db_conv.c \
- /export/home/db/db/db_dispatch.c \
- /export/home/db/db/db_dup.c \
- /export/home/db/db/db_iface.c \
- /export/home/db/db/db_join.c \
- /export/home/db/db/db_meta.c \
- /export/home/db/db/db_method.c \
- /export/home/db/db/db_overflow.c \
- /export/home/db/db/db_pr.c \
- /export/home/db/db/db_rec.c \
- /export/home/db/db/db_reclaim.c \
- /export/home/db/db/db_ret.c \
- /export/home/db/db/crdel_auto.c \
- /export/home/db/clib/getopt.c \
- /export/home/db/clib/snprintf.c \
- /export/home/db/clib/strcasecmp.c \
- /export/home/db/os/os_unlink.c \
- /export/home/db/os/os_alloc.c \
- /export/home/db/os/os_dir.c \
- /export/home/db/os/os_errno.c \
- /export/home/db/os/os_fid.c \
- /export/home/db/os/os_fsync.c \
- /export/home/db/os/os_handle.c \
- /export/home/db/os/os_method.c \
- /export/home/db/os/os_oflags.c \
- /export/home/db/os/os_open.c \
- /export/home/db/os/os_region.c \
- /export/home/db/os/os_rename.c \
- /export/home/db/os/os_root.c \
- /export/home/db/os/os_rpath.c \
- /export/home/db/os/os_rw.c \
- /export/home/db/os/os_seek.c \
- /export/home/db/os/os_spin.c \
- /export/home/db/os/os_stat.c \
- /export/home/db/os/os_tmpdir.c \
- /export/home/db/qam/qam_upgrade.c \
- /export/home/db/qam/qam_auto.c \
- /export/home/db/qam/qam_conv.c \
- /export/home/db/qam/qam_method.c \
- /export/home/db/qam/qam_open.c \
- /export/home/db/qam/qam_rec.c \
- /export/home/db/qam/qam_stat.c \
- /export/home/db/qam/qam.c \
- /export/home/db/hash/hash_upgrade.c \
- /export/home/db/hash/hash_auto.c \
- /export/home/db/hash/hash_conv.c \
- /export/home/db/hash/hash_dup.c \
- /export/home/db/hash/hash_func.c \
- /export/home/db/hash/hash_meta.c \
- /export/home/db/hash/hash_method.c \
- /export/home/db/hash/hash_page.c \
- /export/home/db/hash/hash_rec.c \
- /export/home/db/hash/hash_reclaim.c \
- /export/home/db/hash/hash_stat.c \
- /export/home/db/hash/hash.c \
- /export/home/db/xa/xa_map.c \
- /export/home/db/xa/xa_db.c \
- /export/home/db/xa/xa.c \
- /export/home/db/btree/btree_auto.c \
- /export/home/db/btree/bt_conv.c \
- /export/home/db/btree/bt_curadj.c \
- /export/home/db/btree/bt_cursor.c \
- /export/home/db/btree/bt_delete.c \
- /export/home/db/btree/bt_method.c \
- /export/home/db/btree/bt_open.c \
- /export/home/db/btree/bt_put.c \
- /export/home/db/btree/bt_rec.c \
- /export/home/db/btree/bt_reclaim.c \
- /export/home/db/btree/bt_recno.c \
- /export/home/db/btree/bt_rsearch.c \
- /export/home/db/btree/bt_search.c \
- /export/home/db/btree/bt_split.c \
- /export/home/db/btree/bt_stat.c \
- /export/home/db/btree/bt_upgrade.c \
- /export/home/db/btree/bt_compare.c \
- /export/home/db/common/db_log2.c \
- /export/home/db/common/db_err.c \
- /export/home/db/common/db_getlong.c \
- /export/home/db/common/db_byteorder.c \
- /export/home/db/env/env_region.c \
- /export/home/db/env/db_shash.c \
- /export/home/db/env/env_method.c \
- /export/home/db/env/env_open.c \
- /export/home/db/env/env_recover.c \
- /export/home/db/env/db_salloc.c \
- /export/home/db/lock/lock_util.c \
- /export/home/db/lock/lock_conflict.c \
- /export/home/db/lock/lock_deadlock.c \
- /export/home/db/lock/lock_region.c \
- /export/home/db/lock/lock.c \
- /export/home/db/txn/txn_region.c \
- /export/home/db/txn/txn_auto.c \
- /export/home/db/txn/txn_rec.c \
- /export/home/db/txn/txn.c \
- /export/home/db/log/log_register.c \
- /export/home/db/log/log_archive.c \
- /export/home/db/log/log_auto.c \
- /export/home/db/log/log_compare.c \
- /export/home/db/log/log_findckp.c \
- /export/home/db/log/log_get.c \
- /export/home/db/log/log_method.c \
- /export/home/db/log/log_put.c \
- /export/home/db/log/log_rec.c \
- /export/home/db/log/log.c \
- /export/home/db/mutex/mut_tas.c \
- /export/home/db/mutex/mutex.c \
- /export/home/db/clib/vsnprintf.c \
- /export/home/db/common/util_log.c \
- /export/home/db/common/util_sig.c \
- /export/home/db/os/os_sleep.c \
- /export/home/db/btree/bt_verify.c \
- /export/home/db/hash/hash_verify.c \
- /export/home/db/qam/qam_verify.c \
- /export/home/db/db/db_upg_opd.c \
- /export/home/db/rpc_client/gen_client_ret.c \
- /export/home/db/rpc_client/db_server_clnt.c \
- /export/home/db/rpc_client/gen_client.c \
- /export/home/db/rpc_client/client.c \
- /export/home/db/include/tcl_db.h \
- /export/home/db/rpc_server/db_server_xdr.c \
- /export/home/db/os_vxworks/os_map.c \
- /export/home/db/db/db_vrfy.c \
- /export/home/db/db/db_upg.c \
- /export/home/db/db/db_vrfyutil.c \
- /export/home/db/os_vxworks/os_finit.c \
- /export/home/db/os_vxworks/os_abs.c \
- /export/home/db/lock/lock_method.c \
- /export/home/db/lock/lock_stat.c \
- /export/home/db/qam/qam_files.c
+$(PRJ_DIR)/../btree/bt_compare.c \
+ $(PRJ_DIR)/../btree/bt_conv.c \
+ $(PRJ_DIR)/../btree/bt_curadj.c \
+ $(PRJ_DIR)/../btree/bt_cursor.c \
+ $(PRJ_DIR)/../btree/bt_delete.c \
+ $(PRJ_DIR)/../btree/bt_method.c \
+ $(PRJ_DIR)/../btree/bt_open.c \
+ $(PRJ_DIR)/../btree/bt_put.c \
+ $(PRJ_DIR)/../btree/bt_rec.c \
+ $(PRJ_DIR)/../btree/bt_reclaim.c \
+ $(PRJ_DIR)/../btree/bt_recno.c \
+ $(PRJ_DIR)/../btree/bt_rsearch.c \
+ $(PRJ_DIR)/../btree/bt_search.c \
+ $(PRJ_DIR)/../btree/bt_split.c \
+ $(PRJ_DIR)/../btree/bt_stat.c \
+ $(PRJ_DIR)/../btree/bt_upgrade.c \
+ $(PRJ_DIR)/../btree/bt_verify.c \
+ $(PRJ_DIR)/../btree/btree_auto.c \
+ $(PRJ_DIR)/../clib/getopt.c \
+ $(PRJ_DIR)/../clib/snprintf.c \
+ $(PRJ_DIR)/../clib/strcasecmp.c \
+ $(PRJ_DIR)/../clib/strdup.c \
+ $(PRJ_DIR)/../clib/vsnprintf.c \
+ $(PRJ_DIR)/../common/db_byteorder.c \
+ $(PRJ_DIR)/../common/db_err.c \
+ $(PRJ_DIR)/../common/db_getlong.c \
+ $(PRJ_DIR)/../common/db_idspace.c \
+ $(PRJ_DIR)/../common/db_log2.c \
+ $(PRJ_DIR)/../common/util_arg.c \
+ $(PRJ_DIR)/../common/util_cache.c \
+ $(PRJ_DIR)/../common/util_log.c \
+ $(PRJ_DIR)/../common/util_sig.c \
+ $(PRJ_DIR)/../db/crdel_auto.c \
+ $(PRJ_DIR)/../db/crdel_rec.c \
+ $(PRJ_DIR)/../db/db.c \
+ $(PRJ_DIR)/../db/db_am.c \
+ $(PRJ_DIR)/../db/db_auto.c \
+ $(PRJ_DIR)/../db/db_cam.c \
+ $(PRJ_DIR)/../db/db_conv.c \
+ $(PRJ_DIR)/../db/db_dispatch.c \
+ $(PRJ_DIR)/../db/db_dup.c \
+ $(PRJ_DIR)/../db/db_iface.c \
+ $(PRJ_DIR)/../db/db_join.c \
+ $(PRJ_DIR)/../db/db_meta.c \
+ $(PRJ_DIR)/../db/db_method.c \
+ $(PRJ_DIR)/../db/db_open.c \
+ $(PRJ_DIR)/../db/db_overflow.c \
+ $(PRJ_DIR)/../db/db_pr.c \
+ $(PRJ_DIR)/../db/db_rec.c \
+ $(PRJ_DIR)/../db/db_reclaim.c \
+ $(PRJ_DIR)/../db/db_remove.c \
+ $(PRJ_DIR)/../db/db_rename.c \
+ $(PRJ_DIR)/../db/db_ret.c \
+ $(PRJ_DIR)/../db/db_truncate.c \
+ $(PRJ_DIR)/../db/db_upg.c \
+ $(PRJ_DIR)/../db/db_upg_opd.c \
+ $(PRJ_DIR)/../db/db_vrfy.c \
+ $(PRJ_DIR)/../db/db_vrfyutil.c \
+ $(PRJ_DIR)/../dbreg/dbreg.c \
+ $(PRJ_DIR)/../dbreg/dbreg_auto.c \
+ $(PRJ_DIR)/../dbreg/dbreg_rec.c \
+ $(PRJ_DIR)/../dbreg/dbreg_util.c \
+ $(PRJ_DIR)/../env/db_salloc.c \
+ $(PRJ_DIR)/../env/db_shash.c \
+ $(PRJ_DIR)/../env/env_file.c \
+ $(PRJ_DIR)/../env/env_method.c \
+ $(PRJ_DIR)/../env/env_open.c \
+ $(PRJ_DIR)/../env/env_recover.c \
+ $(PRJ_DIR)/../env/env_region.c \
+ $(PRJ_DIR)/../fileops/fileops_auto.c \
+ $(PRJ_DIR)/../fileops/fop_basic.c \
+ $(PRJ_DIR)/../fileops/fop_rec.c \
+ $(PRJ_DIR)/../fileops/fop_util.c \
+ $(PRJ_DIR)/../hash/hash.c \
+ $(PRJ_DIR)/../hash/hash_auto.c \
+ $(PRJ_DIR)/../hash/hash_conv.c \
+ $(PRJ_DIR)/../hash/hash_dup.c \
+ $(PRJ_DIR)/../hash/hash_func.c \
+ $(PRJ_DIR)/../hash/hash_meta.c \
+ $(PRJ_DIR)/../hash/hash_method.c \
+ $(PRJ_DIR)/../hash/hash_open.c \
+ $(PRJ_DIR)/../hash/hash_page.c \
+ $(PRJ_DIR)/../hash/hash_rec.c \
+ $(PRJ_DIR)/../hash/hash_reclaim.c \
+ $(PRJ_DIR)/../hash/hash_stat.c \
+ $(PRJ_DIR)/../hash/hash_upgrade.c \
+ $(PRJ_DIR)/../hash/hash_verify.c \
+ $(PRJ_DIR)/../hmac/hmac.c \
+ $(PRJ_DIR)/../hmac/sha1.c \
+ $(PRJ_DIR)/../hsearch/hsearch.c \
+ $(PRJ_DIR)/../lock/lock.c \
+ $(PRJ_DIR)/../lock/lock_deadlock.c \
+ $(PRJ_DIR)/../lock/lock_method.c \
+ $(PRJ_DIR)/../lock/lock_region.c \
+ $(PRJ_DIR)/../lock/lock_stat.c \
+ $(PRJ_DIR)/../lock/lock_util.c \
+ $(PRJ_DIR)/../log/log.c \
+ $(PRJ_DIR)/../log/log_archive.c \
+ $(PRJ_DIR)/../log/log_compare.c \
+ $(PRJ_DIR)/../log/log_get.c \
+ $(PRJ_DIR)/../log/log_method.c \
+ $(PRJ_DIR)/../log/log_put.c \
+ $(PRJ_DIR)/../mp/mp_alloc.c \
+ $(PRJ_DIR)/../mp/mp_bh.c \
+ $(PRJ_DIR)/../mp/mp_fget.c \
+ $(PRJ_DIR)/../mp/mp_fopen.c \
+ $(PRJ_DIR)/../mp/mp_fput.c \
+ $(PRJ_DIR)/../mp/mp_fset.c \
+ $(PRJ_DIR)/../mp/mp_method.c \
+ $(PRJ_DIR)/../mp/mp_region.c \
+ $(PRJ_DIR)/../mp/mp_register.c \
+ $(PRJ_DIR)/../mp/mp_stat.c \
+ $(PRJ_DIR)/../mp/mp_sync.c \
+ $(PRJ_DIR)/../mp/mp_trickle.c \
+ $(PRJ_DIR)/../mutex/mut_tas.c \
+ $(PRJ_DIR)/../mutex/mutex.c \
+ $(PRJ_DIR)/../os/os_alloc.c \
+ $(PRJ_DIR)/../os/os_clock.c \
+ $(PRJ_DIR)/../os/os_dir.c \
+ $(PRJ_DIR)/../os/os_errno.c \
+ $(PRJ_DIR)/../os/os_fid.c \
+ $(PRJ_DIR)/../os/os_fsync.c \
+ $(PRJ_DIR)/../os/os_handle.c \
+ $(PRJ_DIR)/../os/os_id.c \
+ $(PRJ_DIR)/../os/os_method.c \
+ $(PRJ_DIR)/../os/os_oflags.c \
+ $(PRJ_DIR)/../os/os_open.c \
+ $(PRJ_DIR)/../os/os_region.c \
+ $(PRJ_DIR)/../os/os_rename.c \
+ $(PRJ_DIR)/../os/os_root.c \
+ $(PRJ_DIR)/../os/os_rpath.c \
+ $(PRJ_DIR)/../os/os_rw.c \
+ $(PRJ_DIR)/../os/os_seek.c \
+ $(PRJ_DIR)/../os/os_sleep.c \
+ $(PRJ_DIR)/../os/os_spin.c \
+ $(PRJ_DIR)/../os/os_stat.c \
+ $(PRJ_DIR)/../os/os_tmpdir.c \
+ $(PRJ_DIR)/../os/os_unlink.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_abs.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_config.c \
+ $(PRJ_DIR)/../os_vxworks/os_vx_map.c \
+ $(PRJ_DIR)/../qam/qam.c \
+ $(PRJ_DIR)/../qam/qam_auto.c \
+ $(PRJ_DIR)/../qam/qam_conv.c \
+ $(PRJ_DIR)/../qam/qam_files.c \
+ $(PRJ_DIR)/../qam/qam_method.c \
+ $(PRJ_DIR)/../qam/qam_open.c \
+ $(PRJ_DIR)/../qam/qam_rec.c \
+ $(PRJ_DIR)/../qam/qam_stat.c \
+ $(PRJ_DIR)/../qam/qam_upgrade.c \
+ $(PRJ_DIR)/../qam/qam_verify.c \
+ $(PRJ_DIR)/../rep/rep_method.c \
+ $(PRJ_DIR)/../rep/rep_record.c \
+ $(PRJ_DIR)/../rep/rep_region.c \
+ $(PRJ_DIR)/../rep/rep_util.c \
+ $(PRJ_DIR)/../rpc_client/client.c \
+ $(PRJ_DIR)/../rpc_client/db_server_clnt.c \
+ $(PRJ_DIR)/../rpc_client/gen_client.c \
+ $(PRJ_DIR)/../rpc_client/gen_client_ret.c \
+ $(PRJ_DIR)/../rpc_server/c/db_server_xdr.c \
+ $(PRJ_DIR)/../txn/txn.c \
+ $(PRJ_DIR)/../txn/txn_auto.c \
+ $(PRJ_DIR)/../txn/txn_method.c \
+ $(PRJ_DIR)/../txn/txn_rec.c \
+ $(PRJ_DIR)/../txn/txn_recover.c \
+ $(PRJ_DIR)/../txn/txn_region.c \
+ $(PRJ_DIR)/../txn/txn_stat.c \
+ $(PRJ_DIR)/../txn/txn_util.c \
+ $(PRJ_DIR)/../xa/xa.c \
+ $(PRJ_DIR)/../xa/xa_db.c \
+ $(PRJ_DIR)/../xa/xa_map.c
<END>
<BEGIN> userComments
BerkeleyDB
<END>
-
diff --git a/bdb/build_vxworks/BerkeleyDB.wsp b/bdb/build_vxworks/BerkeleyDB.wsp
index cffcf00dec9..ce2e71b0eb3 100644
--- a/bdb/build_vxworks/BerkeleyDB.wsp
+++ b/bdb/build_vxworks/BerkeleyDB.wsp
@@ -10,12 +10,17 @@ Workspace
<BEGIN> projectList
$(PRJ_DIR)/BerkeleyDB.wpj \
- $(PRJ_DIR)/ex_access/ex_access.wpj \
- $(PRJ_DIR)/ex_btrec/ex_btrec.wpj \
- $(PRJ_DIR)/ex_env/ex_env.wpj \
- $(PRJ_DIR)/ex_mpool/ex_mpool.wpj \
- $(PRJ_DIR)/ex_tpcb/ex_tpcb.wpj \
- $(PRJ_DIR)/ex_dbclient/ex_dbclient.wpj
+ $(PRJ_DIR)/db_archive/db_archive.wpj \
+ $(PRJ_DIR)/db_checkpoint/db_checkpoint.wpj \
+ $(PRJ_DIR)/db_deadlock/db_deadlock.wpj \
+ $(PRJ_DIR)/db_dump/db_dump.wpj \
+ $(PRJ_DIR)/db_load/db_load.wpj \
+ $(PRJ_DIR)/db_printlog/db_printlog.wpj \
+ $(PRJ_DIR)/db_recover/db_recover.wpj \
+ $(PRJ_DIR)/db_stat/db_stat.wpj \
+ $(PRJ_DIR)/db_upgrade/db_upgrade.wpj \
+ $(PRJ_DIR)/db_verify/db_verify.wpj \
+ $(PRJ_DIR)/dbdemo/dbdemo.wpj
<END>
<BEGIN> userComments
diff --git a/bdb/build_vxworks/BerkeleyDB/Makefile.custom b/bdb/build_vxworks/BerkeleyDB/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/BerkeleyDB/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/BerkeleyDB/component.cdf b/bdb/build_vxworks/BerkeleyDB/component.cdf
new file mode 100755
index 00000000000..4b3e6f101c3
--- /dev/null
+++ b/bdb/build_vxworks/BerkeleyDB/component.cdf
@@ -0,0 +1,1220 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_idspace.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_open.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_remove.o \
+ db_rename.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_truncate.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ dbreg.o \
+ dbreg_auto.o \
+ dbreg_rec.o \
+ dbreg_util.o \
+ env_file.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ fileops_auto.o \
+ fop_basic.o \
+ fop_rec.o \
+ fop_util.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_open.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hmac.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_compare.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_id.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_config.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ sha1.o \
+ snprintf.o \
+ strcasecmp.o \
+ strdup.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ txn_util.o \
+ util_arg.o \
+ util_cache.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module bt_compare.o {
+
+ NAME bt_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_compare.c
+}
+
+Module bt_conv.o {
+
+ NAME bt_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_conv.c
+}
+
+Module bt_curadj.o {
+
+ NAME bt_curadj.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_curadj.c
+}
+
+Module bt_cursor.o {
+
+ NAME bt_cursor.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_cursor.c
+}
+
+Module bt_delete.o {
+
+ NAME bt_delete.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_delete.c
+}
+
+Module bt_method.o {
+
+ NAME bt_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_method.c
+}
+
+Module bt_open.o {
+
+ NAME bt_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_open.c
+}
+
+Module bt_put.o {
+
+ NAME bt_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_put.c
+}
+
+Module bt_rec.o {
+
+ NAME bt_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rec.c
+}
+
+Module bt_reclaim.o {
+
+ NAME bt_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_reclaim.c
+}
+
+Module bt_recno.o {
+
+ NAME bt_recno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_recno.c
+}
+
+Module bt_rsearch.o {
+
+ NAME bt_rsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rsearch.c
+}
+
+Module bt_search.o {
+
+ NAME bt_search.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_search.c
+}
+
+Module bt_split.o {
+
+ NAME bt_split.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_split.c
+}
+
+Module bt_stat.o {
+
+ NAME bt_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_stat.c
+}
+
+Module bt_upgrade.o {
+
+ NAME bt_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_upgrade.c
+}
+
+Module bt_verify.o {
+
+ NAME bt_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_verify.c
+}
+
+Module btree_auto.o {
+
+ NAME btree_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/btree_auto.c
+}
+
+Module getopt.o {
+
+ NAME getopt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/getopt.c
+}
+
+Module snprintf.o {
+
+ NAME snprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/snprintf.c
+}
+
+Module strcasecmp.o {
+
+ NAME strcasecmp.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/strcasecmp.c
+}
+
+Module strdup.o {
+
+ NAME strdup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/strdup.c
+}
+
+Module vsnprintf.o {
+
+ NAME vsnprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/vsnprintf.c
+}
+
+Module db_byteorder.o {
+
+ NAME db_byteorder.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_byteorder.c
+}
+
+Module db_err.o {
+
+ NAME db_err.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_err.c
+}
+
+Module db_getlong.o {
+
+ NAME db_getlong.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_getlong.c
+}
+
+Module db_idspace.o {
+
+ NAME db_idspace.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_idspace.c
+}
+
+Module db_log2.o {
+
+ NAME db_log2.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_log2.c
+}
+
+Module util_arg.o {
+
+ NAME util_arg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_arg.c
+}
+
+Module util_cache.o {
+
+ NAME util_cache.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_cache.c
+}
+
+Module util_log.o {
+
+ NAME util_log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_log.c
+}
+
+Module util_sig.o {
+
+ NAME util_sig.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_sig.c
+}
+
+Module crdel_auto.o {
+
+ NAME crdel_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_auto.c
+}
+
+Module crdel_rec.o {
+
+ NAME crdel_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_rec.c
+}
+
+Module db.o {
+
+ NAME db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db.c
+}
+
+Module db_am.o {
+
+ NAME db_am.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_am.c
+}
+
+Module db_auto.o {
+
+ NAME db_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_auto.c
+}
+
+Module db_cam.o {
+
+ NAME db_cam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_cam.c
+}
+
+Module db_conv.o {
+
+ NAME db_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_conv.c
+}
+
+Module db_dispatch.o {
+
+ NAME db_dispatch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dispatch.c
+}
+
+Module db_dup.o {
+
+ NAME db_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dup.c
+}
+
+Module db_iface.o {
+
+ NAME db_iface.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_iface.c
+}
+
+Module db_join.o {
+
+ NAME db_join.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_join.c
+}
+
+Module db_meta.o {
+
+ NAME db_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_meta.c
+}
+
+Module db_method.o {
+
+ NAME db_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_method.c
+}
+
+Module db_open.o {
+
+ NAME db_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_open.c
+}
+
+Module db_overflow.o {
+
+ NAME db_overflow.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_overflow.c
+}
+
+Module db_pr.o {
+
+ NAME db_pr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_pr.c
+}
+
+Module db_rec.o {
+
+ NAME db_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_rec.c
+}
+
+Module db_reclaim.o {
+
+ NAME db_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_reclaim.c
+}
+
+Module db_remove.o {
+
+ NAME db_remove.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_remove.c
+}
+
+Module db_rename.o {
+
+ NAME db_rename.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_rename.c
+}
+
+Module db_ret.o {
+
+ NAME db_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_ret.c
+}
+
+Module db_truncate.o {
+
+ NAME db_truncate.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_truncate.c
+}
+
+Module db_upg.o {
+
+ NAME db_upg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg.c
+}
+
+Module db_upg_opd.o {
+
+ NAME db_upg_opd.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg_opd.c
+}
+
+Module db_vrfy.o {
+
+ NAME db_vrfy.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfy.c
+}
+
+Module db_vrfyutil.o {
+
+ NAME db_vrfyutil.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfyutil.c
+}
+
+Module dbreg.o {
+
+ NAME dbreg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg.c
+}
+
+Module dbreg_auto.o {
+
+ NAME dbreg_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_auto.c
+}
+
+Module dbreg_rec.o {
+
+ NAME dbreg_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_rec.c
+}
+
+Module dbreg_util.o {
+
+ NAME dbreg_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../dbreg/dbreg_util.c
+}
+
+Module db_salloc.o {
+
+ NAME db_salloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_salloc.c
+}
+
+Module db_shash.o {
+
+ NAME db_shash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_shash.c
+}
+
+Module env_file.o {
+
+ NAME env_file.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_file.c
+}
+
+Module env_method.o {
+
+ NAME env_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_method.c
+}
+
+Module env_open.o {
+
+ NAME env_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_open.c
+}
+
+Module env_recover.o {
+
+ NAME env_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_recover.c
+}
+
+Module env_region.o {
+
+ NAME env_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_region.c
+}
+
+Module fileops_auto.o {
+
+ NAME fileops_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fileops_auto.c
+}
+
+Module fop_basic.o {
+
+ NAME fop_basic.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_basic.c
+}
+
+Module fop_rec.o {
+
+ NAME fop_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_rec.c
+}
+
+Module fop_util.o {
+
+ NAME fop_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../fileops/fop_util.c
+}
+
+Module hash.o {
+
+ NAME hash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash.c
+}
+
+Module hash_auto.o {
+
+ NAME hash_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_auto.c
+}
+
+Module hash_conv.o {
+
+ NAME hash_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_conv.c
+}
+
+Module hash_dup.o {
+
+ NAME hash_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_dup.c
+}
+
+Module hash_func.o {
+
+ NAME hash_func.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_func.c
+}
+
+Module hash_meta.o {
+
+ NAME hash_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_meta.c
+}
+
+Module hash_method.o {
+
+ NAME hash_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_method.c
+}
+
+Module hash_open.o {
+
+ NAME hash_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_open.c
+}
+
+Module hash_page.o {
+
+ NAME hash_page.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_page.c
+}
+
+Module hash_rec.o {
+
+ NAME hash_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_rec.c
+}
+
+Module hash_reclaim.o {
+
+ NAME hash_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_reclaim.c
+}
+
+Module hash_stat.o {
+
+ NAME hash_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_stat.c
+}
+
+Module hash_upgrade.o {
+
+ NAME hash_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_upgrade.c
+}
+
+Module hash_verify.o {
+
+ NAME hash_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_verify.c
+}
+
+Module hmac.o {
+
+ NAME hmac.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hmac/hmac.c
+}
+
+Module sha1.o {
+
+ NAME sha1.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hmac/sha1.c
+}
+
+Module hsearch.o {
+
+ NAME hsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hsearch/hsearch.c
+}
+
+Module lock.o {
+
+ NAME lock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock.c
+}
+
+Module lock_deadlock.o {
+
+ NAME lock_deadlock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_deadlock.c
+}
+
+Module lock_method.o {
+
+ NAME lock_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_method.c
+}
+
+Module lock_region.o {
+
+ NAME lock_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_region.c
+}
+
+Module lock_stat.o {
+
+ NAME lock_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_stat.c
+}
+
+Module lock_util.o {
+
+ NAME lock_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_util.c
+}
+
+Module log.o {
+
+ NAME log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log.c
+}
+
+Module log_archive.o {
+
+ NAME log_archive.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_archive.c
+}
+
+Module log_compare.o {
+
+ NAME log_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_compare.c
+}
+
+Module log_get.o {
+
+ NAME log_get.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_get.c
+}
+
+Module log_method.o {
+
+ NAME log_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_method.c
+}
+
+Module log_put.o {
+
+ NAME log_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_put.c
+}
+
+Module mp_alloc.o {
+
+ NAME mp_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_alloc.c
+}
+
+Module mp_bh.o {
+
+ NAME mp_bh.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_bh.c
+}
+
+Module mp_fget.o {
+
+ NAME mp_fget.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fget.c
+}
+
+Module mp_fopen.o {
+
+ NAME mp_fopen.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fopen.c
+}
+
+Module mp_fput.o {
+
+ NAME mp_fput.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fput.c
+}
+
+Module mp_fset.o {
+
+ NAME mp_fset.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fset.c
+}
+
+Module mp_method.o {
+
+ NAME mp_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_method.c
+}
+
+Module mp_region.o {
+
+ NAME mp_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_region.c
+}
+
+Module mp_register.o {
+
+ NAME mp_register.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_register.c
+}
+
+Module mp_stat.o {
+
+ NAME mp_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_stat.c
+}
+
+Module mp_sync.o {
+
+ NAME mp_sync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_sync.c
+}
+
+Module mp_trickle.o {
+
+ NAME mp_trickle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_trickle.c
+}
+
+Module mut_tas.o {
+
+ NAME mut_tas.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mut_tas.c
+}
+
+Module mutex.o {
+
+ NAME mutex.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mutex.c
+}
+
+Module os_alloc.o {
+
+ NAME os_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_alloc.c
+}
+
+Module os_clock.o {
+
+ NAME os_clock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_clock.c
+}
+
+Module os_dir.o {
+
+ NAME os_dir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_dir.c
+}
+
+Module os_errno.o {
+
+ NAME os_errno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_errno.c
+}
+
+Module os_fid.o {
+
+ NAME os_fid.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fid.c
+}
+
+Module os_fsync.o {
+
+ NAME os_fsync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fsync.c
+}
+
+Module os_handle.o {
+
+ NAME os_handle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_handle.c
+}
+
+Module os_id.o {
+
+ NAME os_id.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_id.c
+}
+
+Module os_method.o {
+
+ NAME os_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_method.c
+}
+
+Module os_oflags.o {
+
+ NAME os_oflags.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_oflags.c
+}
+
+Module os_open.o {
+
+ NAME os_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_open.c
+}
+
+Module os_region.o {
+
+ NAME os_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_region.c
+}
+
+Module os_rename.o {
+
+ NAME os_rename.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rename.c
+}
+
+Module os_root.o {
+
+ NAME os_root.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_root.c
+}
+
+Module os_rpath.o {
+
+ NAME os_rpath.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rpath.c
+}
+
+Module os_rw.o {
+
+ NAME os_rw.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rw.c
+}
+
+Module os_seek.o {
+
+ NAME os_seek.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_seek.c
+}
+
+Module os_sleep.o {
+
+ NAME os_sleep.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_sleep.c
+}
+
+Module os_spin.o {
+
+ NAME os_spin.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_spin.c
+}
+
+Module os_stat.o {
+
+ NAME os_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_stat.c
+}
+
+Module os_tmpdir.o {
+
+ NAME os_tmpdir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_tmpdir.c
+}
+
+Module os_unlink.o {
+
+ NAME os_unlink.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_unlink.c
+}
+
+Module os_vx_abs.o {
+
+ NAME os_vx_abs.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c
+}
+
+Module os_vx_config.o {
+
+ NAME os_vx_config.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_config.c
+}
+
+Module os_vx_map.o {
+
+ NAME os_vx_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_map.c
+}
+
+Module qam.o {
+
+ NAME qam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam.c
+}
+
+Module qam_auto.o {
+
+ NAME qam_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_auto.c
+}
+
+Module qam_conv.o {
+
+ NAME qam_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_conv.c
+}
+
+Module qam_files.o {
+
+ NAME qam_files.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_files.c
+}
+
+Module qam_method.o {
+
+ NAME qam_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_method.c
+}
+
+Module qam_open.o {
+
+ NAME qam_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_open.c
+}
+
+Module qam_rec.o {
+
+ NAME qam_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_rec.c
+}
+
+Module qam_stat.o {
+
+ NAME qam_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_stat.c
+}
+
+Module qam_upgrade.o {
+
+ NAME qam_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_upgrade.c
+}
+
+Module qam_verify.o {
+
+ NAME qam_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_verify.c
+}
+
+Module rep_method.o {
+
+ NAME rep_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_method.c
+}
+
+Module rep_record.o {
+
+ NAME rep_record.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_record.c
+}
+
+Module rep_region.o {
+
+ NAME rep_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_region.c
+}
+
+Module rep_util.o {
+
+ NAME rep_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_util.c
+}
+
+Module client.o {
+
+ NAME client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/client.c
+}
+
+Module db_server_clnt.o {
+
+ NAME db_server_clnt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/db_server_clnt.c
+}
+
+Module gen_client.o {
+
+ NAME gen_client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client.c
+}
+
+Module gen_client_ret.o {
+
+ NAME gen_client_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client_ret.c
+}
+
+Module db_server_xdr.o {
+
+ NAME db_server_xdr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c
+}
+
+Module txn.o {
+
+ NAME txn.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn.c
+}
+
+Module txn_auto.o {
+
+ NAME txn_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_auto.c
+}
+
+Module txn_method.o {
+
+ NAME txn_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_method.c
+}
+
+Module txn_rec.o {
+
+ NAME txn_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_rec.c
+}
+
+Module txn_recover.o {
+
+ NAME txn_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_recover.c
+}
+
+Module txn_region.o {
+
+ NAME txn_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_region.c
+}
+
+Module txn_stat.o {
+
+ NAME txn_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_stat.c
+}
+
+Module txn_util.o {
+
+ NAME txn_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_util.c
+}
+
+Module xa.o {
+
+ NAME xa.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa.c
+}
+
+Module xa_db.o {
+
+ NAME xa_db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_db.c
+}
+
+Module xa_map.o {
+
+ NAME xa_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_map.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/BerkeleyDB/component.wpj b/bdb/build_vxworks/BerkeleyDB/component.wpj
new file mode 100755
index 00000000000..3207bb293e8
--- /dev/null
+++ b/bdb/build_vxworks/BerkeleyDB/component.wpj
@@ -0,0 +1,6764 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../..
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_objects
+strdup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strdup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_objects
+db_idspace.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_idspace.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_objects
+util_cache.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_cache.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_objects
+db_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_objects
+db_remove.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_remove.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_objects
+db_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_objects
+db_truncate.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_truncate.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_objects
+dbreg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_objects
+dbreg_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_objects
+dbreg_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_objects
+dbreg_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../dbreg/dbreg_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_objects
+env_file.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_file.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_objects
+fileops_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fileops_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_objects
+fop_basic.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_basic.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_objects
+fop_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_objects
+fop_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../fileops/fop_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_objects
+hash_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_objects
+hmac.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/hmac.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_objects
+sha1.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hmac/sha1.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_objects
+os_id.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_id.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_objects
+os_vx_config.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_config.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_objects
+txn_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../../btree/bt_compare.c \
+ $(PRJ_DIR)/../../btree/bt_conv.c \
+ $(PRJ_DIR)/../../btree/bt_curadj.c \
+ $(PRJ_DIR)/../../btree/bt_cursor.c \
+ $(PRJ_DIR)/../../btree/bt_delete.c \
+ $(PRJ_DIR)/../../btree/bt_method.c \
+ $(PRJ_DIR)/../../btree/bt_open.c \
+ $(PRJ_DIR)/../../btree/bt_put.c \
+ $(PRJ_DIR)/../../btree/bt_rec.c \
+ $(PRJ_DIR)/../../btree/bt_reclaim.c \
+ $(PRJ_DIR)/../../btree/bt_recno.c \
+ $(PRJ_DIR)/../../btree/bt_rsearch.c \
+ $(PRJ_DIR)/../../btree/bt_search.c \
+ $(PRJ_DIR)/../../btree/bt_split.c \
+ $(PRJ_DIR)/../../btree/bt_stat.c \
+ $(PRJ_DIR)/../../btree/bt_upgrade.c \
+ $(PRJ_DIR)/../../btree/bt_verify.c \
+ $(PRJ_DIR)/../../btree/btree_auto.c \
+ $(PRJ_DIR)/../../clib/getopt.c \
+ $(PRJ_DIR)/../../clib/snprintf.c \
+ $(PRJ_DIR)/../../clib/strcasecmp.c \
+ $(PRJ_DIR)/../../clib/strdup.c \
+ $(PRJ_DIR)/../../clib/vsnprintf.c \
+ $(PRJ_DIR)/../../common/db_byteorder.c \
+ $(PRJ_DIR)/../../common/db_err.c \
+ $(PRJ_DIR)/../../common/db_getlong.c \
+ $(PRJ_DIR)/../../common/db_idspace.c \
+ $(PRJ_DIR)/../../common/db_log2.c \
+ $(PRJ_DIR)/../../common/util_arg.c \
+ $(PRJ_DIR)/../../common/util_cache.c \
+ $(PRJ_DIR)/../../common/util_log.c \
+ $(PRJ_DIR)/../../common/util_sig.c \
+ $(PRJ_DIR)/../../db/crdel_auto.c \
+ $(PRJ_DIR)/../../db/crdel_rec.c \
+ $(PRJ_DIR)/../../db/db.c \
+ $(PRJ_DIR)/../../db/db_am.c \
+ $(PRJ_DIR)/../../db/db_auto.c \
+ $(PRJ_DIR)/../../db/db_cam.c \
+ $(PRJ_DIR)/../../db/db_conv.c \
+ $(PRJ_DIR)/../../db/db_dispatch.c \
+ $(PRJ_DIR)/../../db/db_dup.c \
+ $(PRJ_DIR)/../../db/db_iface.c \
+ $(PRJ_DIR)/../../db/db_join.c \
+ $(PRJ_DIR)/../../db/db_meta.c \
+ $(PRJ_DIR)/../../db/db_method.c \
+ $(PRJ_DIR)/../../db/db_open.c \
+ $(PRJ_DIR)/../../db/db_overflow.c \
+ $(PRJ_DIR)/../../db/db_pr.c \
+ $(PRJ_DIR)/../../db/db_rec.c \
+ $(PRJ_DIR)/../../db/db_reclaim.c \
+ $(PRJ_DIR)/../../db/db_remove.c \
+ $(PRJ_DIR)/../../db/db_rename.c \
+ $(PRJ_DIR)/../../db/db_ret.c \
+ $(PRJ_DIR)/../../db/db_truncate.c \
+ $(PRJ_DIR)/../../db/db_upg.c \
+ $(PRJ_DIR)/../../db/db_upg_opd.c \
+ $(PRJ_DIR)/../../db/db_vrfy.c \
+ $(PRJ_DIR)/../../db/db_vrfyutil.c \
+ $(PRJ_DIR)/../../dbreg/dbreg.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_auto.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_rec.c \
+ $(PRJ_DIR)/../../dbreg/dbreg_util.c \
+ $(PRJ_DIR)/../../env/db_salloc.c \
+ $(PRJ_DIR)/../../env/db_shash.c \
+ $(PRJ_DIR)/../../env/env_file.c \
+ $(PRJ_DIR)/../../env/env_method.c \
+ $(PRJ_DIR)/../../env/env_open.c \
+ $(PRJ_DIR)/../../env/env_recover.c \
+ $(PRJ_DIR)/../../env/env_region.c \
+ $(PRJ_DIR)/../../fileops/fileops_auto.c \
+ $(PRJ_DIR)/../../fileops/fop_basic.c \
+ $(PRJ_DIR)/../../fileops/fop_rec.c \
+ $(PRJ_DIR)/../../fileops/fop_util.c \
+ $(PRJ_DIR)/../../hash/hash.c \
+ $(PRJ_DIR)/../../hash/hash_auto.c \
+ $(PRJ_DIR)/../../hash/hash_conv.c \
+ $(PRJ_DIR)/../../hash/hash_dup.c \
+ $(PRJ_DIR)/../../hash/hash_func.c \
+ $(PRJ_DIR)/../../hash/hash_meta.c \
+ $(PRJ_DIR)/../../hash/hash_method.c \
+ $(PRJ_DIR)/../../hash/hash_open.c \
+ $(PRJ_DIR)/../../hash/hash_page.c \
+ $(PRJ_DIR)/../../hash/hash_rec.c \
+ $(PRJ_DIR)/../../hash/hash_reclaim.c \
+ $(PRJ_DIR)/../../hash/hash_stat.c \
+ $(PRJ_DIR)/../../hash/hash_upgrade.c \
+ $(PRJ_DIR)/../../hash/hash_verify.c \
+ $(PRJ_DIR)/../../hmac/hmac.c \
+ $(PRJ_DIR)/../../hmac/sha1.c \
+ $(PRJ_DIR)/../../hsearch/hsearch.c \
+ $(PRJ_DIR)/../../lock/lock.c \
+ $(PRJ_DIR)/../../lock/lock_deadlock.c \
+ $(PRJ_DIR)/../../lock/lock_method.c \
+ $(PRJ_DIR)/../../lock/lock_region.c \
+ $(PRJ_DIR)/../../lock/lock_stat.c \
+ $(PRJ_DIR)/../../lock/lock_util.c \
+ $(PRJ_DIR)/../../log/log.c \
+ $(PRJ_DIR)/../../log/log_archive.c \
+ $(PRJ_DIR)/../../log/log_compare.c \
+ $(PRJ_DIR)/../../log/log_get.c \
+ $(PRJ_DIR)/../../log/log_method.c \
+ $(PRJ_DIR)/../../log/log_put.c \
+ $(PRJ_DIR)/../../mp/mp_alloc.c \
+ $(PRJ_DIR)/../../mp/mp_bh.c \
+ $(PRJ_DIR)/../../mp/mp_fget.c \
+ $(PRJ_DIR)/../../mp/mp_fopen.c \
+ $(PRJ_DIR)/../../mp/mp_fput.c \
+ $(PRJ_DIR)/../../mp/mp_fset.c \
+ $(PRJ_DIR)/../../mp/mp_method.c \
+ $(PRJ_DIR)/../../mp/mp_region.c \
+ $(PRJ_DIR)/../../mp/mp_register.c \
+ $(PRJ_DIR)/../../mp/mp_stat.c \
+ $(PRJ_DIR)/../../mp/mp_sync.c \
+ $(PRJ_DIR)/../../mp/mp_trickle.c \
+ $(PRJ_DIR)/../../mutex/mut_tas.c \
+ $(PRJ_DIR)/../../mutex/mutex.c \
+ $(PRJ_DIR)/../../os/os_alloc.c \
+ $(PRJ_DIR)/../../os/os_clock.c \
+ $(PRJ_DIR)/../../os/os_dir.c \
+ $(PRJ_DIR)/../../os/os_errno.c \
+ $(PRJ_DIR)/../../os/os_fid.c \
+ $(PRJ_DIR)/../../os/os_fsync.c \
+ $(PRJ_DIR)/../../os/os_handle.c \
+ $(PRJ_DIR)/../../os/os_id.c \
+ $(PRJ_DIR)/../../os/os_method.c \
+ $(PRJ_DIR)/../../os/os_oflags.c \
+ $(PRJ_DIR)/../../os/os_open.c \
+ $(PRJ_DIR)/../../os/os_region.c \
+ $(PRJ_DIR)/../../os/os_rename.c \
+ $(PRJ_DIR)/../../os/os_root.c \
+ $(PRJ_DIR)/../../os/os_rpath.c \
+ $(PRJ_DIR)/../../os/os_rw.c \
+ $(PRJ_DIR)/../../os/os_seek.c \
+ $(PRJ_DIR)/../../os/os_sleep.c \
+ $(PRJ_DIR)/../../os/os_spin.c \
+ $(PRJ_DIR)/../../os/os_stat.c \
+ $(PRJ_DIR)/../../os/os_tmpdir.c \
+ $(PRJ_DIR)/../../os/os_unlink.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_config.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_map.c \
+ $(PRJ_DIR)/../../qam/qam.c \
+ $(PRJ_DIR)/../../qam/qam_auto.c \
+ $(PRJ_DIR)/../../qam/qam_conv.c \
+ $(PRJ_DIR)/../../qam/qam_files.c \
+ $(PRJ_DIR)/../../qam/qam_method.c \
+ $(PRJ_DIR)/../../qam/qam_open.c \
+ $(PRJ_DIR)/../../qam/qam_rec.c \
+ $(PRJ_DIR)/../../qam/qam_stat.c \
+ $(PRJ_DIR)/../../qam/qam_upgrade.c \
+ $(PRJ_DIR)/../../qam/qam_verify.c \
+ $(PRJ_DIR)/../../rep/rep_method.c \
+ $(PRJ_DIR)/../../rep/rep_record.c \
+ $(PRJ_DIR)/../../rep/rep_region.c \
+ $(PRJ_DIR)/../../rep/rep_util.c \
+ $(PRJ_DIR)/../../rpc_client/client.c \
+ $(PRJ_DIR)/../../rpc_client/db_server_clnt.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client_ret.c \
+ $(PRJ_DIR)/../../rpc_server/c/db_server_xdr.c \
+ $(PRJ_DIR)/../../txn/txn.c \
+ $(PRJ_DIR)/../../txn/txn_auto.c \
+ $(PRJ_DIR)/../../txn/txn_method.c \
+ $(PRJ_DIR)/../../txn/txn_rec.c \
+ $(PRJ_DIR)/../../txn/txn_recover.c \
+ $(PRJ_DIR)/../../txn/txn_region.c \
+ $(PRJ_DIR)/../../txn/txn_stat.c \
+ $(PRJ_DIR)/../../txn/txn_util.c \
+ $(PRJ_DIR)/../../xa/xa.c \
+ $(PRJ_DIR)/../../xa/xa_db.c \
+ $(PRJ_DIR)/../../xa/xa_map.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_archive/db_archive.c b/bdb/build_vxworks/db_archive/db_archive.c
new file mode 100644
index 00000000000..5e43f32cd8e
--- /dev/null
+++ b/bdb/build_vxworks/db_archive/db_archive.c
@@ -0,0 +1,195 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_archive.c,v 11.36 2002/03/28 20:13:34 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_archive_main __P((int, char *[]));
+int db_archive_usage __P((void));
+int db_archive_version_check __P((const char *));
+
+int
+db_archive(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_archive", args, &argc, &argv);
+ return (db_archive_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_archive_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list, *passwd;
+
+ if ((ret = db_archive_version_check(progname)) != 0)
+ return (ret);
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ah:lP:sVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_archive_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_archive_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ free(list);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_archive_usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_archive [-alsVv] [-h home] [-P password]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_archive_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_archive/db_archive.wpj b/bdb/build_vxworks/db_archive/db_archive.wpj
new file mode 100755
index 00000000000..06091bb6b5f
--- /dev/null
+++ b/bdb/build_vxworks/db_archive/db_archive.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_archive.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_archive.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_archive.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_archive.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_archive.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> FILE_db_archive.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_archive.c
+<END>
+
+<BEGIN> userComments
+db_archive
+<END>
diff --git a/bdb/build_vxworks/db_archive/db_archive/Makefile.custom b/bdb/build_vxworks/db_archive/db_archive/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_archive/db_archive/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_archive/db_archive/component.cdf b/bdb/build_vxworks/db_archive/db_archive/component.cdf
new file mode 100755
index 00000000000..cf88762cbc5
--- /dev/null
+++ b/bdb/build_vxworks/db_archive/db_archive/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_ARCHIVE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_archive.o
+ NAME db_archive
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_archive.o {
+
+ NAME db_archive.o
+ SRC_PATH_NAME $PRJ_DIR/../db_archive.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_archive/db_archive/component.wpj b/bdb/build_vxworks/db_archive/db_archive/component.wpj
new file mode 100755
index 00000000000..e50d91592e6
--- /dev/null
+++ b/bdb/build_vxworks/db_archive/db_archive/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_archive.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_checkpoint/db_checkpoint.c b/bdb/build_vxworks/db_checkpoint/db_checkpoint.c
new file mode 100644
index 00000000000..1e5a45a6fe5
--- /dev/null
+++ b/bdb/build_vxworks/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_checkpoint.c,v 11.46 2002/08/08 03:50:31 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_checkpoint_main __P((int, char *[]));
+int db_checkpoint_usage __P((void));
+int db_checkpoint_version_check __P((const char *));
+
+int
+db_checkpoint(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_checkpoint", args, &argc, &argv);
+ return (db_checkpoint_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_checkpoint_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile, *passwd;
+
+ if ((ret = db_checkpoint_version_check(progname)) != 0)
+ return (ret);
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_checkpoint_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_checkpoint_usage());
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "DB_ENV->memp_register: failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ if ((ret = dbenv->txn_checkpoint(dbenv,
+ kbytes, minutes, flags)) != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_checkpoint_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_checkpoint [-1Vv]",
+ "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_checkpoint_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_checkpoint/db_checkpoint.wpj b/bdb/build_vxworks/db_checkpoint/db_checkpoint.wpj
new file mode 100755
index 00000000000..cae4317821b
--- /dev/null
+++ b/bdb/build_vxworks/db_checkpoint/db_checkpoint.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_checkpoint.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_checkpoint.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_checkpoint.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_checkpoint.c
+<END>
+
+<BEGIN> userComments
+db_checkpoint
+<END>
diff --git a/bdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom b/bdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf b/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
new file mode 100755
index 00000000000..ea05c3a6182
--- /dev/null
+++ b/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_CHECKPOINT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_checkpoint.o
+ NAME db_checkpoint
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_checkpoint.o {
+
+ NAME db_checkpoint.o
+ SRC_PATH_NAME $PRJ_DIR/../db_checkpoint.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj b/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
new file mode 100755
index 00000000000..3b5daa113e1
--- /dev/null
+++ b/bdb/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_checkpoint.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_config.h b/bdb/build_vxworks/db_config.h
index cccfc4a7cf6..642d9927f67 100644
--- a/bdb/build_vxworks/db_config.h
+++ b/bdb/build_vxworks/db_config.h
@@ -1,231 +1,337 @@
-/*
- * $Id: db_config.h,v 1.4 2000/12/12 18:39:26 bostic Exp $
- */
-
-/* Define if building VxWorks */
-#define HAVE_VXWORKS 1
-
-/* Define to empty if the keyword does not work. */
-/* #undef const */
-
-/* Define if your struct stat has st_blksize. */
-#define HAVE_ST_BLKSIZE 1
-
-/* Define to `int' if <sys/types.h> doesn't define. */
-/* #undef mode_t */
-
-/* Define to `long' if <sys/types.h> doesn't define. */
-/* #undef off_t */
-
-/* Define to `int' if <sys/types.h> doesn't define. */
-/* #undef pid_t */
-
-/* Define to `unsigned' if <sys/types.h> doesn't define. */
-/* #undef size_t */
-
-/* Define if the `S_IS*' macros in <sys/stat.h> do not work properly. */
-/* #undef STAT_MACROS_BROKEN */
-
-/* Define if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
-/* Define if you can safely include both <sys/time.h> and <time.h>. */
-/* #undef TIME_WITH_SYS_TIME */
-
-/* !!!
- * WORDS_BIGENDIAN is the ONLY option in this file that may be edited
- * for VxWorks.
- *
- * The user must set this according to VxWork's target arch. We use an
- * x86 (little-endian) target.
- */
-/* Define if your processor stores words with the most significant
- byte first (like Motorola and SPARC, unlike Intel and VAX). */
-/* #undef WORDS_BIGENDIAN */
-
+/* DO NOT EDIT: automatically built by dist/s_vxworks. */
/* !!!
* The CONFIG_TEST option may be added using the Tornado project build.
* DO NOT modify it here.
*/
-/* Define if you are building a version for running the test suite. */
+/* Define to 1 if you want to build a version for running the test suite. */
/* #undef CONFIG_TEST */
/* !!!
* The DEBUG option may be added using the Tornado project build.
* DO NOT modify it here.
*/
-/* Define if you want a debugging version. */
+/* Define to 1 if you want a debugging version. */
/* #undef DEBUG */
-/* Define if you want a version that logs read operations. */
+/* Define to 1 if you want a version that logs read operations. */
/* #undef DEBUG_ROP */
-/* Define if you want a version that logs write operations. */
+/* Define to 1 if you want a version that logs write operations. */
/* #undef DEBUG_WOP */
/* !!!
* The DIAGNOSTIC option may be added using the Tornado project build.
* DO NOT modify it here.
*/
-/* Define if you want a version with run-time diagnostic checking. */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
/* #undef DIAGNOSTIC */
-/* Define if you want to mask harmless unitialized memory read/writes. */
-/* #undef UMRW */
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
-/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
/* #undef HAVE_FCNTL_F_SETFD */
-/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
-/* #undef HAVE_FILE_OFFSET_BITS */
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
-/* Mutex possibilities. */
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
-/* #undef HAVE_MUTEX_MACOS */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
/* #undef HAVE_MUTEX_MSEM_INIT */
-/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
-/* #undef HAVE_MUTEX_SOLARIS_LWP */
-/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
-#define HAVE_MUTEX_THREADS 1
-/* #undef HAVE_MUTEX_UI_THREADS */
-/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
-/* #undef HAVE_MUTEX_VMS */
-#define HAVE_MUTEX_VXWORKS 1
-/* #undef HAVE_MUTEX_WIN16 */
-/* #undef HAVE_MUTEX_WIN32 */
-/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
-/* Define if building on QNX. */
-/* #undef HAVE_QNX */
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
-/* !!!
- * The HAVE_RPC option may be added using the Tornado project build.
- * DO NOT modify it here.
- */
-/* Define if building RPC client/server. */
-/* #undef HAVE_RPC */
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
-/* Define if your sprintf returns a pointer, not a length. */
-/* #undef SPRINTF_RET_CHARPNT */
+/* Define to 1 if mutexes hold system resources. */
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
-/* Define if you have the getcwd function. */
-#define HAVE_GETCWD 1
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
-/* Define if you have the getopt function. */
-/* #undef HAVE_GETOPT */
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
-/* Define if you have the getuid function. */
-/* #undef HAVE_GETUID */
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
-/* Define if you have the memcmp function. */
-#define HAVE_MEMCMP 1
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
-/* Define if you have the memcpy function. */
-#define HAVE_MEMCPY 1
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
-/* Define if you have the memmove function. */
-#define HAVE_MEMMOVE 1
+/* Define to 1 to use VxWorks mutexes. */
+#define HAVE_MUTEX_VXWORKS 1
-/* Define if you have the mlock function. */
-#define HAVE_MLOCK 1
+/* Define to 1 to use Windows mutexes. */
+/* #undef HAVE_MUTEX_WIN32 */
-/* Define if you have the mmap function. */
-/* #undef HAVE_MMAP */
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
-/* Define if you have the munlock function. */
-#define HAVE_MUNLOCK 1
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
-/* Define if you have the munmap function. */
-/* #undef HAVE_MUNMAP */
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
-/* Define if you have the pread function. */
+/* Define to 1 if you have the `pread' function. */
/* #undef HAVE_PREAD */
-/* Define if you have the pstat_getdynamic function. */
+/* Define to 1 if you have the `pstat_getdynamic' function. */
/* #undef HAVE_PSTAT_GETDYNAMIC */
-/* Define if you have the pwrite function. */
+/* Define to 1 if you have the `pwrite' function. */
/* #undef HAVE_PWRITE */
-/* Define if you have the qsort function. */
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
#define HAVE_QSORT 1
-/* Define if you have the raise function. */
+/* Define to 1 if you have the `raise' function. */
#define HAVE_RAISE 1
-/* Define if you have the sched_yield function. */
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
#define HAVE_SCHED_YIELD 1
-/* Define if you have the select function. */
+/* Define to 1 if you have the `select' function. */
#define HAVE_SELECT 1
-/* Define if you have the shmget function. */
+/* Define to 1 if you have the `shmget' function. */
/* #undef HAVE_SHMGET */
-/* Define if you have the snprintf function. */
+/* Define to 1 if you have the `snprintf' function. */
/* #undef HAVE_SNPRINTF */
-/* Define if you have the strcasecmp function. */
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
/* #undef HAVE_STRCASECMP */
-/* Define if you have the strerror function. */
+/* Define to 1 if you have the `strdup' function. */
+/* #undef HAVE_STRDUP */
+
+/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
-/* Define if you have the strtoul function. */
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
-/* Define if you have the sysconf function. */
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+
+/* Define to 1 if you have the `sysconf' function. */
/* #undef HAVE_SYSCONF */
-/* Define if you have the vsnprintf function. */
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+/* #undef HAVE_SYS_STAT_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #undef HAVE_SYS_TYPES_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#define HAVE_UNLINK_WITH_OPEN_FAILURE 1
+
+/* Define to 1 if you have the `vsnprintf' function. */
/* #undef HAVE_VSNPRINTF */
-/* Define if you have the yield function. */
+/* Define to 1 if building VxWorks. */
+#define HAVE_VXWORKS 1
+
+/* Define to 1 if you have the `yield' function. */
/* #undef HAVE_YIELD */
-/* Define if you have the <dirent.h> header file. */
-#define HAVE_DIRENT_H 1
+/* Define to 1 if you have the `_fstati64' function. */
+/* #undef HAVE__FSTATI64 */
-/* Define if you have the <ndir.h> header file. */
-/* #undef HAVE_NDIR_H */
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
-/* Define if you have the <sys/dir.h> header file. */
-/* #undef HAVE_SYS_DIR_H */
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
-/* Define if you have the <sys/ndir.h> header file. */
-/* #undef HAVE_SYS_NDIR_H */
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB 4.1.24"
-/* Define if you have the <sys/select.h> header file. */
-/* #undef HAVE_SYS_SELECT_H */
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-4.1.24"
-/* Define if you have the <sys/time.h> header file. */
-/* #undef HAVE_SYS_TIME_H */
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "4.1.24"
-/* Define if you have the nsl library (-lnsl). */
-/* #undef HAVE_LIBNSL */
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
-/*
- * !!!
- * The following is not part of the automatic configuration setup, but
- * provides necessary VxWorks information.
- */
-#include "vxWorks.h"
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
/*
- * VxWorks does not have getpid().
+ * Exit success/failure macros.
*/
-#define getpid() taskIdSelf()
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
/*
* Don't step on the namespace. Other libraries may have their own
@@ -237,6 +343,10 @@
#endif
#ifndef HAVE_GETOPT
#define getopt __db_Cgetopt
+#define optarg __db_Coptarg
+#define opterr __db_Copterr
+#define optind __db_Coptind
+#define optopt __db_Coptopt
#endif
#ifndef HAVE_MEMCMP
#define memcmp __db_Cmemcmp
@@ -255,6 +365,7 @@
#endif
#ifndef HAVE_STRCASECMP
#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
#endif
#ifndef HAVE_STRERROR
#define strerror __db_Cstrerror
@@ -262,3 +373,10 @@
#ifndef HAVE_VSNPRINTF
#define vsnprintf __db_Cvsnprintf
#endif
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on VxWorks.
+ */
+#include "vxWorks.h"
diff --git a/bdb/build_vxworks/db_deadlock/db_deadlock.c b/bdb/build_vxworks/db_deadlock/db_deadlock.c
new file mode 100644
index 00000000000..3e9f4ba7b02
--- /dev/null
+++ b/bdb/build_vxworks/db_deadlock/db_deadlock.c
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_deadlock.c,v 11.38 2002/08/08 03:50:32 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_deadlock_main __P((int, char *[]));
+int db_deadlock_usage __P((void));
+int db_deadlock_version_check __P((const char *));
+
+int
+db_deadlock(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_deadlock", args, &argc, &argv);
+ return (db_deadlock_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_deadlock_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
+ u_int32_t atype;
+ time_t now;
+ long secs, usecs;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile, *str;
+
+ if ((ret = db_deadlock_version_check(progname)) != 0)
+ return (ret);
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ secs = usecs = 0;
+ e_close = exitval = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ return (db_deadlock_usage());
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ return (db_deadlock_usage());
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (db_deadlock_usage());
+
+ break;
+
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
+ break;
+ case '?':
+ default:
+ return (db_deadlock_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_deadlock_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_deadlock_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_deadlock [-Vv]",
+ "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_deadlock_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_deadlock/db_deadlock.wpj b/bdb/build_vxworks/db_deadlock/db_deadlock.wpj
new file mode 100755
index 00000000000..10cc2dc6cb6
--- /dev/null
+++ b/bdb/build_vxworks/db_deadlock/db_deadlock.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_deadlock.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_deadlock.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_deadlock.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> FILE_db_deadlock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_deadlock.c
+<END>
+
+<BEGIN> userComments
+db_deadlock
+<END>
diff --git a/bdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom b/bdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_deadlock/db_deadlock/component.cdf b/bdb/build_vxworks/db_deadlock/db_deadlock/component.cdf
new file mode 100755
index 00000000000..efc498475bf
--- /dev/null
+++ b/bdb/build_vxworks/db_deadlock/db_deadlock/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DEADLOCK {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_deadlock.o
+ NAME db_deadlock
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_deadlock.o {
+
+ NAME db_deadlock.o
+ SRC_PATH_NAME $PRJ_DIR/../db_deadlock.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_deadlock/db_deadlock/component.wpj b/bdb/build_vxworks/db_deadlock/db_deadlock/component.wpj
new file mode 100755
index 00000000000..f9a1b82cd9c
--- /dev/null
+++ b/bdb/build_vxworks/db_deadlock/db_deadlock/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_deadlock.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_dump/db_dump.c b/bdb/build_vxworks/db_dump/db_dump.c
new file mode 100644
index 00000000000..60e987c48b9
--- /dev/null
+++ b/bdb/build_vxworks/db_dump/db_dump.c
@@ -0,0 +1,626 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_dump.c,v 11.80 2002/08/08 03:50:34 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_dump_db_init __P((DB_ENV *, char *, int, u_int32_t, int *));
+int db_dump_dump __P((DB *, int, int));
+int db_dump_dump_sub __P((DB_ENV *, DB *, char *, int, int));
+int db_dump_is_sub __P((DB *, int *));
+int db_dump_main __P((int, char *[]));
+int db_dump_show_subs __P((DB *));
+int db_dump_usage __P((void));
+int db_dump_version_check __P((const char *));
+
+int
+db_dump(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_dump", args, &argc, &argv);
+ return (db_dump_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_dump_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t cache;
+ int ch, d_close;
+ int e_close, exitval, keyflag, lflag, nflag, pflag, private;
+ int ret, Rflag, rflag, resize, subs;
+ char *dopt, *home, *passwd, *subname;
+
+ if ((ret = db_dump_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ cache = MEGABYTE;
+ private = 0;
+ dopt = home = passwd = subname = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_dump_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_dump_usage());
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto err;
+ }
+
+ /* Initialize the environment. */
+ if (db_dump_db_init(dbenv, home, rflag, cache, &private) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE |
+ (Rflag ? DB_AGGRESSIVE : 0) |
+ (pflag ? DB_PRINTABLE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+ if (private != 0) {
+ if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (db_dump_show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (db_dump_dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_dump_db_init(dbenv, home, is_salvage, cache, is_privatep)
+ DB_ENV *dbenv;
+ char *home;
+ int is_salvage;
+ u_int32_t cache;
+ int *is_privatep;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
+ *
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
+ */
+ *is_privatep = 0;
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ *is_privatep = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+db_dump_is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ free(btsp);
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ free(hsp);
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+db_dump_dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp, NULL,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+db_dump_show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+db_dump_dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ failed = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
+ }
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ failed = 1;
+ }
+
+err: if (data.data != NULL)
+ free(data.data);
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ failed = 1;
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (failed);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_dump_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_dump [-klNprRV]",
+ "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_dump_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_dump/db_dump.wpj b/bdb/build_vxworks/db_dump/db_dump.wpj
new file mode 100755
index 00000000000..6813766e5f1
--- /dev/null
+++ b/bdb/build_vxworks/db_dump/db_dump.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_dump.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_dump.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_dump.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_dump.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_dump.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> FILE_db_dump.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_dump.c
+<END>
+
+<BEGIN> userComments
+db_dump
+<END>
diff --git a/bdb/build_vxworks/db_dump/db_dump/Makefile.custom b/bdb/build_vxworks/db_dump/db_dump/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_dump/db_dump/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_dump/db_dump/component.cdf b/bdb/build_vxworks/db_dump/db_dump/component.cdf
new file mode 100755
index 00000000000..5c1d4ccf308
--- /dev/null
+++ b/bdb/build_vxworks/db_dump/db_dump/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DUMP {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_dump.o
+ NAME db_dump
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_dump.o {
+
+ NAME db_dump.o
+ SRC_PATH_NAME $PRJ_DIR/../db_dump.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_dump/db_dump/component.wpj b/bdb/build_vxworks/db_dump/db_dump/component.wpj
new file mode 100755
index 00000000000..e234641f498
--- /dev/null
+++ b/bdb/build_vxworks/db_dump/db_dump/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_dump.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_load/db_load.c b/bdb/build_vxworks/db_load/db_load.c
new file mode 100644
index 00000000000..6eedae590b1
--- /dev/null
+++ b/bdb/build_vxworks/db_load/db_load.c
@@ -0,0 +1,1247 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_load.c,v 11.71 2002/08/08 03:50:36 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ char *hdrbuf; /* Input file header. */
+ u_long lineno; /* Input file line number. */
+ u_long origline; /* Original file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+ char *home; /* Env home. */
+ char *passwd; /* Env passwd. */
+ int private; /* Private env. */
+ u_int32_t cache; /* Env cache size. */
+} LDG;
+
+void db_load_badend __P((DB_ENV *));
+void db_load_badnum __P((DB_ENV *));
+int db_load_configure __P((DB_ENV *, DB *, char **, char **, int *));
+int db_load_convprintable __P((DB_ENV *, char *, char **));
+int db_load_db_init __P((DB_ENV *, char *, u_int32_t, int *));
+int db_load_dbt_rdump __P((DB_ENV *, DBT *));
+int db_load_dbt_rprint __P((DB_ENV *, DBT *));
+int db_load_dbt_rrecno __P((DB_ENV *, DBT *, int));
+int db_load_digitize __P((DB_ENV *, int, int *));
+int db_load_env_create __P((DB_ENV **, LDG *));
+int db_load_load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
+int db_load_main __P((int, char *[]));
+int db_load_rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int db_load_usage __P((void));
+int db_load_version_check __P((const char *));
+
+#define G(f) ((LDG *)dbenv->app_private)->f
+
+ /* Flags to the load function. */
+#define LDF_NOHEADER 0x01 /* No dump header. */
+#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */
+#define LDF_PASSWORD 0x04 /* Encrypt created databases. */
+
+int
+db_load(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_load", args, &argc, &argv);
+ return (db_load_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_load_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DBTYPE dbtype;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t ldf;
+ int ch, existed, exitval, ret;
+ char **clist, **clp;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+ ldg.cache = MEGABYTE;
+ ldg.hdrbuf = NULL;
+ ldg.home = NULL;
+ ldg.passwd = NULL;
+
+ if ((ret = db_load_version_check(ldg.progname)) != 0)
+ return (ret);
+
+ ldf = 0;
+ exitval = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
+ }
+
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ ldg.home = optarg;
+ break;
+ case 'n':
+ ldf |= LDF_NOOVERWRITE;
+ break;
+ case 'P':
+ ldg.passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (ldg.passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ ldg.progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ ldf |= LDF_PASSWORD;
+ break;
+ case 'T':
+ ldf |= LDF_NOHEADER;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ return (db_load_usage());
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_load_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_load_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if (db_load_env_create(&dbenv, &ldg) != 0)
+ goto shutdown;
+
+ while (!ldg.endofile)
+ if (db_load_load(dbenv, argv[0], dbtype, clist, ldf,
+ &ldg, &existed) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+ free(clist);
+
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+db_load_load(dbenv, name, argtype, clist, flags, ldg, existedp)
+ DB_ENV *dbenv;
+ char *name, **clist;
+ DBTYPE argtype;
+ u_int flags;
+ LDG *ldg;
+ int *existedp;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ u_int32_t put_flags;
+ int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval;
+ char *subdb;
+
+ *existedp = 0;
+
+ put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0;
+ G(endodata) = 0;
+
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+retry_db:
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (LF_ISSET(LDF_NOHEADER)) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (db_load_rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (G(endofile))
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (db_load_configure(dbenv, dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO &&
+ argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in db_load_rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* If configured with a password, encrypt databases we create. */
+ if (LF_ISSET(LDF_PASSWORD) &&
+ (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
+ dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+ goto err;
+ }
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
+ DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
+ __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+ if (ldg->private != 0) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ dbp->close(dbp, 0);
+ dbp = NULL;
+ dbenv->close(dbenv, 0);
+ if ((ret = db_load_env_create(&dbenv, ldg)) != 0)
+ goto err;
+ goto retry_db;
+ }
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, &data))
+ goto err;
+ } else {
+ if (db_load_dbt_rdump(dbenv, &data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rprint(dbenv, &data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (db_load_dbt_rrecno(dbenv, readp, hexkeys))
+ goto err;
+ } else
+ if (db_load_dbt_rdump(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rdump(dbenv, &data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (G(endodata))
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ *existedp = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)ctxn->abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn->abort(txn);
+ }
+
+ /* Close the database. */
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = 1;
+ }
+
+ if (G(hdrbuf) != NULL)
+ free(G(hdrbuf));
+ G(hdrbuf) = NULL;
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_load_db_init(dbenv, home, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t flags;
+ int ret;
+
+ *is_private = 0;
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ *is_private = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ G(progname), name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ db_load_badnum(dbenv); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+db_load_configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if (*subdbp != NULL)
+ free(*subdbp);
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+db_load_rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int ch, first, hdr, linelen, buflen, ret, start;
+ char *buf, *name, *p, *value;
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+ name = p = NULL;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if (G(hdrbuf) == NULL) {
+ hdr = 0;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+ G(hdrbuf) = buf;
+ G(origline) = G(lineno);
+ } else {
+ hdr = 1;
+ buf = G(hdrbuf);
+ G(lineno) = G(origline);
+ }
+
+ start = 0;
+ for (first = 1;; first = 0) {
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ buf = &G(hdrbuf)[start];
+ if (hdr == 0) {
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
+
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen + start == buflen) {
+ G(hdrbuf) = (char *)realloc(G(hdrbuf),
+ buflen *= 2);
+ if (G(hdrbuf) == NULL)
+ goto memerr;
+ buf = &G(hdrbuf)[start];
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen++] = '\0';
+ } else
+ linelen = strlen(buf) + 1;
+ start += linelen;
+
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ name = NULL;
+ }
+ /* If we don't see the expected information, it's an error. */
+ if ((name = strdup(buf)) == NULL)
+ goto memerr;
+ if ((p = strchr(name, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+
+ value = p--;
+
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ G(version) = atoi(value);
+
+ if (G(version) > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ G(lineno), G(version));
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ goto err;
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((ret = db_load_convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ goto err;
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword \"%s\"",
+ name);
+ goto err;
+ }
+ ret = 0;
+ if (0) {
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ ret = 1;
+ }
+ if (0)
+err: ret = 1;
+ if (0) {
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ ret = 1;
+ }
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ }
+ return (ret);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+db_load_convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ if ((outstr = (char *)malloc(strlen(instr))) == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = db_load_digitize(dbenv, *instr, &e1) << 4;
+ c |= db_load_digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ db_load_badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
+}
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+db_load_dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ c1 = db_load_digitize(dbenv,
+ c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+db_load_dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = db_load_digitize(dbenv, c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+db_load_dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++G(lineno);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ G(endodata) = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: db_load_badend(dbenv);
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+db_load_digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+db_load_badnum(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+db_load_badend(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_load_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV] [-c name=value] [-f file]",
+ "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_load_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+db_load_env_create(dbenvp, ldg)
+ DB_ENV **dbenvp;
+ LDG *ldg;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if ((ret = db_env_create(dbenvp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv = *dbenvp;
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg->progname);
+ if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ ldg->passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ return (ret);
+ }
+ if ((ret = db_load_db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0)
+ return (ret);
+ dbenv->app_private = ldg;
+
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_load/db_load.wpj b/bdb/build_vxworks/db_load/db_load.wpj
new file mode 100755
index 00000000000..59e194ae386
--- /dev/null
+++ b/bdb/build_vxworks/db_load/db_load.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_load.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_load.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_load.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_load.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_load.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> FILE_db_load.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_load.c
+<END>
+
+<BEGIN> userComments
+db_load
+<END>
diff --git a/bdb/build_vxworks/db_load/db_load/Makefile.custom b/bdb/build_vxworks/db_load/db_load/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_load/db_load/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_load/db_load/component.cdf b/bdb/build_vxworks/db_load/db_load/component.cdf
new file mode 100755
index 00000000000..7d1d2bc9586
--- /dev/null
+++ b/bdb/build_vxworks/db_load/db_load/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_LOAD {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_load.o
+ NAME db_load
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_load.o {
+
+ NAME db_load.o
+ SRC_PATH_NAME $PRJ_DIR/../db_load.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_load/db_load/component.wpj b/bdb/build_vxworks/db_load/db_load/component.wpj
new file mode 100755
index 00000000000..216e7d9786c
--- /dev/null
+++ b/bdb/build_vxworks/db_load/db_load/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_load.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_printlog/db_printlog.c b/bdb/build_vxworks/db_printlog/db_printlog.c
new file mode 100644
index 00000000000..380e29021f5
--- /dev/null
+++ b/bdb/build_vxworks/db_printlog/db_printlog.c
@@ -0,0 +1,375 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_printlog.c,v 11.52 2002/08/08 03:50:38 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+int db_printlog_main __P((int, char *[]));
+int db_printlog_usage __P((void));
+int db_printlog_version_check __P((const char *));
+int db_printlog_print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int db_printlog_open_rep_db __P((DB_ENV *, DB **, DBC **));
+
+int
+db_printlog(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_printlog", args, &argc, &argv);
+ return (db_printlog_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_printlog_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_printlog";
+ DB *dbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data, keydbt;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, rflag, ret, repflag;
+ char *home, *passwd;
+
+ if ((ret = db_printlog_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ dbc = NULL;
+ logc = NULL;
+ e_close = exitval = nflag = rflag = repflag = 0;
+ home = passwd = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'R':
+ repflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_printlog_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ return (db_printlog_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Set up an app-specific dispatch function so that we can gracefully
+ * handle app-specific log records.
+ */
+ if ((ret = dbenv->set_app_dispatch(dbenv, db_printlog_print_app_record)) != 0) {
+ dbenv->err(dbenv, ret, "app_dispatch");
+ goto shutdown;
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ * If we are reading the replication database, do not open the env
+ * with logging, because we don't want to log the opens.
+ */
+ if (repflag) {
+ if ((ret = dbenv->open(dbenv, home,
+ DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0))
+ != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ } else if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ /* Allocate a log cursor. */
+ if (repflag) {
+ if ((ret = db_printlog_open_rep_db(dbenv, &dbp, &dbc)) != 0)
+ goto shutdown;
+ } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ memset(&keydbt, 0, sizeof(keydbt));
+ while (!__db_util_interrupted()) {
+ if (repflag) {
+ ret = dbc->c_get(dbc,
+ &keydbt, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret == 0)
+ key = ((REP_CONTROL *)keydbt.data)->lsn;
+ } else
+ ret = logc->get(logc,
+ &key, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv,
+ ret, repflag ? "DB_LOGC->get" : "DBC->get");
+ goto shutdown;
+ }
+
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ exitval = 1;
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0)
+ exitval = 1;
+
+ /*
+ * The dtab is allocated by __db_add_recovery (called by *_init_print)
+ * using the library malloc function (__os_malloc). It thus needs to be
+ * freed using the corresponding free (__os_free).
+ */
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_printlog_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_printlog [-NrV] [-h home] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_printlog_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+/* Print an unknown, application-specific log record as best we can. */
+int
+db_printlog_print_app_record(dbenv, dbt, lsnp, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsnp;
+ db_recops op;
+{
+ int ch;
+ u_int32_t i, rectype;
+
+ DB_ASSERT(op == DB_TXN_PRINT);
+ COMPQUIET(dbenv, NULL);
+
+ /*
+ * Fetch the rectype, which always must be at the beginning of the
+ * record (if dispatching is to work at all).
+ */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ /*
+ * Applications may wish to customize the output here based on the
+ * rectype. We just print the entire log record in the generic
+ * mixed-hex-and-printable format we use for binary data.
+ */
+ printf("[%lu][%lu]application specific record: rec: %lu\n",
+ (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype);
+ printf("\tdata: ");
+ for (i = 0; i < dbt->size; i++) {
+ ch = ((u_int8_t *)dbt->data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ printf("\n\n");
+
+ return (0);
+}
+
+int
+db_printlog_open_rep_db(dbenv, dbpp, dbcp)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ DBC **dbcp;
+{
+ int ret;
+
+ DB *dbp;
+ *dbpp = NULL;
+ *dbcp = NULL;
+
+ if ((ret = db_create(dbpp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ret);
+ }
+
+ dbp = *dbpp;
+ if ((ret =
+ dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ return (0);
+
+err: if (*dbpp != NULL)
+ (void)(*dbpp)->close(*dbpp, 0);
+ return (ret);
+}
diff --git a/bdb/build_vxworks/db_printlog/db_printlog.wpj b/bdb/build_vxworks/db_printlog/db_printlog.wpj
new file mode 100755
index 00000000000..514122e6125
--- /dev/null
+++ b/bdb/build_vxworks/db_printlog/db_printlog.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_printlog.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_printlog.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_printlog.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> FILE_db_printlog.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_printlog.c
+<END>
+
+<BEGIN> userComments
+db_printlog
+<END>
diff --git a/bdb/build_vxworks/db_printlog/db_printlog/Makefile.custom b/bdb/build_vxworks/db_printlog/db_printlog/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_printlog/db_printlog/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_printlog/db_printlog/component.cdf b/bdb/build_vxworks/db_printlog/db_printlog/component.cdf
new file mode 100755
index 00000000000..57c645259a4
--- /dev/null
+++ b/bdb/build_vxworks/db_printlog/db_printlog/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_PRINTLOG {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_printlog.o
+ NAME db_printlog
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_printlog.o {
+
+ NAME db_printlog.o
+ SRC_PATH_NAME $PRJ_DIR/../db_printlog.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_printlog/db_printlog/component.wpj b/bdb/build_vxworks/db_printlog/db_printlog/component.wpj
new file mode 100755
index 00000000000..81d2447459d
--- /dev/null
+++ b/bdb/build_vxworks/db_printlog/db_printlog/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_printlog.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_recover/db_recover.c b/bdb/build_vxworks/db_recover/db_recover.c
new file mode 100644
index 00000000000..055964c8508
--- /dev/null
+++ b/bdb/build_vxworks/db_recover/db_recover.c
@@ -0,0 +1,328 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_recover.c,v 11.33 2002/03/28 20:13:42 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+int db_recover_main __P((int, char *[]));
+int db_recover_read_timestamp __P((const char *, char *, time_t *));
+int db_recover_usage __P((void));
+int db_recover_version_check __P((const char *));
+
+int
+db_recover(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_recover", args, &argc, &argv);
+ return (db_recover_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_recover_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home, *passwd;
+
+ if ((ret = db_recover_version_check(progname)) != 0)
+ return (ret);
+
+ home = passwd = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = retain_env = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'e':
+ retain_env = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if ((ret =
+ db_recover_read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_recover_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_recover_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+int
+db_recover_read_timestamp(progname, arg, timep)
+ const char *progname;
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHROUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+db_recover_usage()
+{
+ (void)fprintf(stderr, "%s\n",
+"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_recover_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_recover/db_recover.wpj b/bdb/build_vxworks/db_recover/db_recover.wpj
new file mode 100755
index 00000000000..2df7234233a
--- /dev/null
+++ b/bdb/build_vxworks/db_recover/db_recover.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_recover.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_recover.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_recover.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_recover.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_recover.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> FILE_db_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_recover.c
+<END>
+
+<BEGIN> userComments
+db_recover
+<END>
diff --git a/bdb/build_vxworks/db_recover/db_recover/Makefile.custom b/bdb/build_vxworks/db_recover/db_recover/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_recover/db_recover/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_recover/db_recover/component.cdf b/bdb/build_vxworks/db_recover/db_recover/component.cdf
new file mode 100755
index 00000000000..d322bf4a8fd
--- /dev/null
+++ b/bdb/build_vxworks/db_recover/db_recover/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_RECOVER {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_recover.o
+ NAME db_recover
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_recover.o {
+
+ NAME db_recover.o
+ SRC_PATH_NAME $PRJ_DIR/../db_recover.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_recover/db_recover/component.wpj b/bdb/build_vxworks/db_recover/db_recover/component.wpj
new file mode 100755
index 00000000000..0daf9f6ca1e
--- /dev/null
+++ b/bdb/build_vxworks/db_recover/db_recover/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_recover.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_stat/db_stat.c b/bdb/build_vxworks/db_stat/db_stat.c
new file mode 100644
index 00000000000..5e9348fa04a
--- /dev/null
+++ b/bdb/build_vxworks/db_stat/db_stat.c
@@ -0,0 +1,1282 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_stat.c,v 11.125 2002/08/08 15:26:15 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET,
+ T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
+
+int db_stat_argcheck __P((char *, const char *));
+int db_stat_btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_stat_db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
+void db_stat_dl __P((const char *, u_long));
+void db_stat_dl_bytes __P((const char *, u_long, u_long, u_long));
+int db_stat_env_stats __P((DB_ENV *, u_int32_t));
+int db_stat_hash_stats __P((DB_ENV *, DB *, int));
+int db_stat_lock_stats __P((DB_ENV *, char *, u_int32_t));
+int db_stat_log_stats __P((DB_ENV *, u_int32_t));
+int db_stat_main __P((int, char *[]));
+int db_stat_mpool_stats __P((DB_ENV *, char *, u_int32_t));
+void db_stat_prflags __P((u_int32_t, const FN *));
+int db_stat_queue_stats __P((DB_ENV *, DB *, int));
+int db_stat_rep_stats __P((DB_ENV *, u_int32_t));
+int db_stat_txn_compare __P((const void *, const void *));
+int db_stat_txn_stats __P((DB_ENV *, u_int32_t));
+int db_stat_usage __P((void));
+int db_stat_version_check __P((const char *));
+
+int
+db_stat(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_stat", args, &argc, &argv);
+ return (db_stat_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_stat_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ u_int32_t cache;
+ int ch, checked, d_close, e_close, exitval, fast, flags;
+ int nflag, private, resize, ret;
+ char *db, *home, *internal, *passwd, *subdb;
+
+ if ((ret = db_stat_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = fast = flags = nflag = private = 0;
+ db = home = internal = passwd = subdb = NULL;
+
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF)
+ switch (ch) {
+ case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ if (!db_stat_argcheck(internal = optarg, "Aclmop"))
+ return (db_stat_usage());
+ break;
+ case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ db = optarg;
+ break;
+ case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_ENV;
+ break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOG;
+ break;
+ case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ if (!db_stat_argcheck(internal = optarg, "Ahm"))
+ return (db_stat_usage());
+ break;
+ case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'r':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_REP;
+ break;
+ case 's':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ subdb = optarg;
+ break;
+ case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
+ case '?':
+ default:
+ return (db_stat_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ return (db_stat_usage());
+ break;
+ case T_NOTSET:
+ return (db_stat_usage());
+ /* NOTREACHED */
+ default:
+ if (fast != 0)
+ return (db_stat_usage());
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /* Initialize the environment. */
+ if (db_stat_db_init(dbenv, home, ttype, cache, &private) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if (flags != 0)
+ return (db_stat_usage());
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ d_close = 1;
+
+ if ((ret = dbp->open(dbp,
+ NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", db);
+ goto shutdown;
+ }
+
+ /* Check if cache is too small for this DB's pagesize. */
+ if (private) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto shutdown;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto shutdown;
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp, NULL,
+ db, subdb, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->open: %s:%s", db, subdb);
+ (void)alt_dbp->close(alt_dbp, 0);
+ goto shutdown;
+ }
+
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (db_stat_btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (db_stat_hash_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (db_stat_queue_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ dbenv->errx(dbenv, "Unknown database type.");
+ goto shutdown;
+ }
+ break;
+ case T_ENV:
+ if (db_stat_env_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_LOCK:
+ if (db_stat_lock_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_LOG:
+ if (db_stat_log_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_MPOOL:
+ if (db_stat_mpool_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_REP:
+ if (db_stat_rep_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_TXN:
+ if (db_stat_txn_stats(dbenv, flags))
+ goto shutdown;
+ break;
+ case T_NOTSET:
+ dbenv->errx(dbenv, "Unknown statistics flag.");
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+db_stat_env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.envpanic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ db_stat_dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+db_stat_btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+ int fast;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ db_stat_prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ db_stat_dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ db_stat_dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ db_stat_dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ db_stat_dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ db_stat_dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ db_stat_dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ db_stat_dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ db_stat_dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ db_stat_dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ db_stat_dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+db_stat_hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ db_stat_prflags(sp->hash_metaflags, fn);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ db_stat_dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
+ db_stat_dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ db_stat_dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ db_stat_dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ db_stat_dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ db_stat_dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ db_stat_dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ db_stat_dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ db_stat_dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ db_stat_dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ db_stat_dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+db_stat_queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ db_stat_dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
+ db_stat_dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ db_stat_dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ db_stat_dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+db_stat_lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ db_stat_dl("Last allocated locker ID.\n", (u_long)sp->st_id);
+ db_stat_dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid);
+ db_stat_dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ db_stat_dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ db_stat_dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ db_stat_dl("Maximum number of lock objects possible.\n",
+ (u_long)sp->st_maxobjects);
+ db_stat_dl("Number of current locks.\n", (u_long)sp->st_nlocks);
+ db_stat_dl("Maximum number of locks at any one time.\n",
+ (u_long)sp->st_maxnlocks);
+ db_stat_dl("Number of current lockers.\n", (u_long)sp->st_nlockers);
+ db_stat_dl("Maximum number of lockers at any one time.\n",
+ (u_long)sp->st_maxnlockers);
+ db_stat_dl("Number of current lock objects.\n", (u_long)sp->st_nobjects);
+ db_stat_dl("Maximum number of lock objects at any one time.\n",
+ (u_long)sp->st_maxnobjects);
+ db_stat_dl("Total number of locks requested.\n", (u_long)sp->st_nrequests);
+ db_stat_dl("Total number of locks released.\n", (u_long)sp->st_nreleases);
+ db_stat_dl(
+ "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n",
+ (u_long)sp->st_nnowaits);
+ db_stat_dl(
+ "Total number of locks not immediately available due to conflicts.\n",
+ (u_long)sp->st_nconflicts);
+ db_stat_dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ db_stat_dl("Lock timeout value.\n", (u_long)sp->st_locktimeout);
+ db_stat_dl("Number of locks that have timed out.\n",
+ (u_long)sp->st_nlocktimeouts);
+ db_stat_dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout);
+ db_stat_dl("Number of transactions that have timed out.\n",
+ (u_long)sp->st_ntxntimeouts);
+
+ db_stat_dl_bytes("The size of the lock region.",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+db_stat_log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ db_stat_dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_size % MEGABYTE == 0)
+ printf("%luMb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / MEGABYTE);
+ else if (sp->st_lg_size % 1024 == 0)
+ printf("%luKb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / 1024);
+ else
+ printf("%lu\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size);
+ db_stat_dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ db_stat_dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ db_stat_dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ db_stat_dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ db_stat_dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ db_stat_dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ db_stat_dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+
+ db_stat_dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+db_stat_mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ db_stat_dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ db_stat_dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ db_stat_dl_bytes("Pool individual cache size",
+ (u_long)0, (u_long)0, (u_long)gsp->st_regsize);
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ db_stat_dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ db_stat_dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ db_stat_dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ db_stat_dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ db_stat_dl("Dirty pages written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ db_stat_dl("Current total page count.\n",
+ (u_long)gsp->st_pages);
+ db_stat_dl("Current clean page count.\n",
+ (u_long)gsp->st_page_clean);
+ db_stat_dl("Current dirty page count.\n",
+ (u_long)gsp->st_page_dirty);
+ db_stat_dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ db_stat_dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ db_stat_dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ db_stat_dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ db_stat_dl("The number of hash bucket locks granted without waiting.\n",
+ (u_long)gsp->st_hash_nowait);
+ db_stat_dl("The number of hash bucket locks granted after waiting.\n",
+ (u_long)gsp->st_hash_wait);
+ db_stat_dl("The maximum number of times any hash bucket lock was waited for.\n",
+ (u_long)gsp->st_hash_max_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+ db_stat_dl("The number of page allocations.\n",
+ (u_long)gsp->st_alloc);
+ db_stat_dl("The number of hash buckets examined during allocations\n",
+ (u_long)gsp->st_alloc_buckets);
+ db_stat_dl("The max number of hash buckets examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_buckets);
+ db_stat_dl("The number of pages examined during allocations\n",
+ (u_long)gsp->st_alloc_pages);
+ db_stat_dl("The max number of pages examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_pages);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ db_stat_dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ db_stat_dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ db_stat_dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ free(gsp);
+
+ return (0);
+}
+
+/*
+ * rep_stats --
+ * Display replication statistics.
+ */
+int
+db_stat_rep_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_REP_STAT *sp;
+ int is_client, ret;
+ const char *p;
+
+ if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ is_client = 0;
+ switch (sp->st_status) {
+ case DB_REP_MASTER:
+ printf("Environment configured as a replication master.\n");
+ break;
+ case DB_REP_CLIENT:
+ printf("Environment configured as a replication client.\n");
+ is_client = 1;
+ break;
+ case DB_REP_LOGSONLY:
+ printf("Environment configured as a logs-only replica.\n");
+ is_client = 1;
+ break;
+ default:
+ printf("Environment not configured for replication.\n");
+ break;
+ }
+
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset,
+ is_client ? "Next LSN expected." : "Next LSN to be used.");
+ p = sp->st_waiting_lsn.file == 0 ?
+ "Not waiting for any missed log records." :
+ "LSN of first missed log record being waited for.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset,
+ p);
+
+ db_stat_dl("Number of duplicate master conditions detected.\n",
+ (u_long)sp->st_dupmasters);
+ if (sp->st_env_id != DB_EID_INVALID)
+ db_stat_dl("Current environment ID.\n", (u_long)sp->st_env_id);
+ else
+ printf("No current environment ID.\n");
+ db_stat_dl("Current environment priority.\n", (u_long)sp->st_env_priority);
+ db_stat_dl("Current generation number.\n", (u_long)sp->st_gen);
+ db_stat_dl("Number of duplicate log records received.\n",
+ (u_long)sp->st_log_duplicated);
+ db_stat_dl("Number of log records currently queued.\n",
+ (u_long)sp->st_log_queued);
+ db_stat_dl("Maximum number of log records ever queued at once.\n",
+ (u_long)sp->st_log_queued_max);
+ db_stat_dl("Total number of log records queued.\n",
+ (u_long)sp->st_log_queued_total);
+ db_stat_dl("Number of log records received and appended to the log.\n",
+ (u_long)sp->st_log_records);
+ db_stat_dl("Number of log records missed and requested.\n",
+ (u_long)sp->st_log_requested);
+ if (sp->st_master != DB_EID_INVALID)
+ db_stat_dl("Current master ID.\n", (u_long)sp->st_master);
+ else
+ printf("No current master ID.\n");
+ db_stat_dl("Number of times the master has changed.\n",
+ (u_long)sp->st_master_changes);
+ db_stat_dl("Number of messages received with a bad generation number.\n",
+ (u_long)sp->st_msgs_badgen);
+ db_stat_dl("Number of messages received and processed.\n",
+ (u_long)sp->st_msgs_processed);
+ db_stat_dl("Number of messages ignored due to pending recovery.\n",
+ (u_long)sp->st_msgs_recover);
+ db_stat_dl("Number of failed message sends.\n",
+ (u_long)sp->st_msgs_send_failures);
+ db_stat_dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent);
+ db_stat_dl("Number of new site messages received.\n", (u_long)sp->st_newsites);
+ db_stat_dl("Transmission limited.\n", (u_long)sp->st_nthrottles);
+ db_stat_dl("Number of outdated conditions detected.\n",
+ (u_long)sp->st_outdated);
+ db_stat_dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied);
+
+ db_stat_dl("Number of elections held.\n", (u_long)sp->st_elections);
+ db_stat_dl("Number of elections won.\n", (u_long)sp->st_elections_won);
+
+ if (sp->st_election_status == 0)
+ printf("No election in progress.\n");
+ else {
+ db_stat_dl("Current election phase.\n", (u_long)sp->st_election_status);
+ db_stat_dl("Election winner.\n",
+ (u_long)sp->st_election_cur_winner);
+ db_stat_dl("Election generation number.\n",
+ (u_long)sp->st_election_gen);
+ printf("%lu/%lu\tMaximum LSN of election winner.\n",
+ (u_long)sp->st_election_lsn.file,
+ (u_long)sp->st_election_lsn.offset);
+ db_stat_dl("Number of sites expected to participate in elections.\n",
+ (u_long)sp->st_election_nsites);
+ db_stat_dl("Election priority.\n", (u_long)sp->st_election_priority);
+ db_stat_dl("Election tiebreaker value.\n",
+ (u_long)sp->st_election_tiebreaker);
+ db_stat_dl("Votes received this election round.\n",
+ (u_long)sp->st_election_votes);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+db_stat_txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ db_stat_dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ db_stat_dl("Active transactions.\n", (u_long)sp->st_nactive);
+ db_stat_dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ db_stat_dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ db_stat_dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ db_stat_dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ db_stat_dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
+ db_stat_dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), db_stat_txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; begin LSN: file/offset %lu/%lu",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ free(sp);
+
+ return (0);
+}
+
+int
+db_stat_txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+db_stat_dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+db_stat_dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+
+ /* Normalize the values. */
+ while (bytes >= MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes >= GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ mbytes -= GIGABYTE / MEGABYTE;
+ }
+
+ sep = "";
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ }
+ if (bytes >= 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+
+ printf("\t%s.\n", msg);
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+db_stat_prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_stat_db_init(dbenv, home, ttype, cache, is_private)
+ DB_ENV *dbenv;
+ char *home;
+ test_t ttype;
+ u_int32_t cache;
+ int *is_private;
+{
+ u_int32_t oflags;
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
+ */
+ *is_private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created. Declare a reasonably large cache so that we don't fail
+ * when reporting statistics on large databases.
+ *
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ *is_private = 1;
+ oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON;
+ if (ttype == T_DB)
+ oflags |= DB_INIT_MPOOL;
+ if (ttype == T_LOG)
+ oflags |= DB_INIT_LOG;
+ if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+db_stat_argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+int
+db_stat_usage()
+{
+ fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_stat [-celmNrtVZ] [-C Aclmop]",
+ "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_stat_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_stat/db_stat.wpj b/bdb/build_vxworks/db_stat/db_stat.wpj
new file mode 100755
index 00000000000..ba78c4cc3fd
--- /dev/null
+++ b/bdb/build_vxworks/db_stat/db_stat.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_stat.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_stat.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_stat.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_stat.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_stat.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> FILE_db_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_stat.c
+<END>
+
+<BEGIN> userComments
+db_stat
+<END>
diff --git a/bdb/build_vxworks/db_stat/db_stat/Makefile.custom b/bdb/build_vxworks/db_stat/db_stat/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_stat/db_stat/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_stat/db_stat/component.cdf b/bdb/build_vxworks/db_stat/db_stat/component.cdf
new file mode 100755
index 00000000000..728544eabff
--- /dev/null
+++ b/bdb/build_vxworks/db_stat/db_stat/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_STAT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_stat.o
+ NAME db_stat
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_stat.o {
+
+ NAME db_stat.o
+ SRC_PATH_NAME $PRJ_DIR/../db_stat.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_stat/db_stat/component.wpj b/bdb/build_vxworks/db_stat/db_stat/component.wpj
new file mode 100755
index 00000000000..2020d712dee
--- /dev/null
+++ b/bdb/build_vxworks/db_stat/db_stat/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_stat.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_upgrade/db_upgrade.c b/bdb/build_vxworks/db_upgrade/db_upgrade.c
new file mode 100644
index 00000000000..6f9138b59b9
--- /dev/null
+++ b/bdb/build_vxworks/db_upgrade/db_upgrade.c
@@ -0,0 +1,205 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_upgrade.c,v 1.31 2002/03/28 20:13:47 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_upgrade_main __P((int, char *[]));
+int db_upgrade_usage __P((void));
+int db_upgrade_version_check __P((const char *));
+
+int
+db_upgrade(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_upgrade", args, &argc, &argv);
+ return (db_upgrade_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_upgrade_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_upgrade";
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = db_upgrade_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_upgrade_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_upgrade_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_upgrade_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+db_upgrade_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_upgrade/db_upgrade.wpj b/bdb/build_vxworks/db_upgrade/db_upgrade.wpj
new file mode 100755
index 00000000000..65f834d62d7
--- /dev/null
+++ b/bdb/build_vxworks/db_upgrade/db_upgrade.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_upgrade.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_upgrade.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_upgrade.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> FILE_db_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_upgrade.c
+<END>
+
+<BEGIN> userComments
+db_upgrade
+<END>
diff --git a/bdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom b/bdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_upgrade/db_upgrade/component.cdf b/bdb/build_vxworks/db_upgrade/db_upgrade/component.cdf
new file mode 100755
index 00000000000..7bbdebd4999
--- /dev/null
+++ b/bdb/build_vxworks/db_upgrade/db_upgrade/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_UPGRADE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_upgrade.o
+ NAME db_upgrade
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_upgrade.o {
+
+ NAME db_upgrade.o
+ SRC_PATH_NAME $PRJ_DIR/../db_upgrade.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_upgrade/db_upgrade/component.wpj b/bdb/build_vxworks/db_upgrade/db_upgrade/component.wpj
new file mode 100755
index 00000000000..1cc5f303e5d
--- /dev/null
+++ b/bdb/build_vxworks/db_upgrade/db_upgrade/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_upgrade.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/db_verify/db_verify.c b/bdb/build_vxworks/db_verify/db_verify.c
new file mode 100644
index 00000000000..cfa31195a85
--- /dev/null
+++ b/bdb/build_vxworks/db_verify/db_verify.c
@@ -0,0 +1,263 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "$Id: db_verify.c,v 1.38 2002/08/08 03:51:38 bostic Exp $";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+int db_verify_main __P((int, char *[]));
+int db_verify_usage __P((void));
+int db_verify_version_check __P((const char *));
+
+int
+db_verify(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_verify", args, &argc, &argv);
+ return (db_verify_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_verify_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_verify";
+ DB *dbp, *dbp1;
+ DB_ENV *dbenv;
+ u_int32_t cache;
+ int ch, d_close, e_close, exitval, nflag, oflag, private;
+ int quiet, resize, ret, t_ret;
+ char *home, *passwd;
+
+ if ((ret = db_verify_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ cache = MEGABYTE;
+ d_close = e_close = exitval = nflag = oflag = quiet = 0;
+ home = passwd = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'o':
+ oflag = 1;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_verify_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_verify_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
+ */
+ private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ private = 1;
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+ d_close = 1;
+
+ /*
+ * We create a 2nd dbp to this database to get its pagesize
+ * because the dbp we're using for verify cannot be opened.
+ */
+ if (private) {
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv->err(
+ dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+
+ if ((ret = dbp1->open(dbp1, NULL,
+ argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: %s", argv[0]);
+ (void)dbp1->close(dbp1, 0);
+ goto shutdown;
+ }
+ /*
+ * If we get here, we can check the cache/page.
+ * !!!
+ * If we have to retry with an env with a larger
+ * cache, we jump out of this loop. However, we
+ * will still be working on the same argv when we
+ * get back into the for-loop.
+ */
+ ret = __db_util_cache(dbenv, dbp1, &cache, &resize);
+ (void)dbp1->close(dbp1, 0);
+ if (ret != 0)
+ goto shutdown;
+
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+ if ((ret = dbp->verify(dbp,
+ argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ d_close = 0;
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_verify_usage()
+{
+ fprintf(stderr, "%s\n",
+ "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
+}
+
+int
+db_verify_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/bdb/build_vxworks/db_verify/db_verify.wpj b/bdb/build_vxworks/db_verify/db_verify.wpj
new file mode 100755
index 00000000000..d807c9853bf
--- /dev/null
+++ b/bdb/build_vxworks/db_verify/db_verify.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_verify.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_verify.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_verify.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_verify.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_verify.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> FILE_db_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_verify.c
+<END>
+
+<BEGIN> userComments
+db_verify
+<END>
diff --git a/bdb/build_vxworks/db_verify/db_verify/Makefile.custom b/bdb/build_vxworks/db_verify/db_verify/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/db_verify/db_verify/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/db_verify/db_verify/component.cdf b/bdb/build_vxworks/db_verify/db_verify/component.cdf
new file mode 100755
index 00000000000..f29f8246b57
--- /dev/null
+++ b/bdb/build_vxworks/db_verify/db_verify/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_VERIFY {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_verify.o
+ NAME db_verify
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_verify.o {
+
+ NAME db_verify.o
+ SRC_PATH_NAME $PRJ_DIR/../db_verify.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/db_verify/db_verify/component.wpj b/bdb/build_vxworks/db_verify/db_verify/component.wpj
new file mode 100755
index 00000000000..aca3ae8cb75
--- /dev/null
+++ b/bdb/build_vxworks/db_verify/db_verify/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_verify.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/dbdemo/README b/bdb/build_vxworks/dbdemo/README
new file mode 100644
index 00000000000..1a2c7c7d073
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/README
@@ -0,0 +1,39 @@
+This README describes the steps needed to run a demo example of BerkeleyDB.
+
+1. Read the pages in the Reference Guide that describe building
+ BerkeleyDB on VxWorks:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/intro.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/notes.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/faq.html
+
+2. Launch Tornado 2.0 and open up the BerkeleyDB project.
+
+3. Add the demo project to that workspace:
+
+ $(WIND_BASE)/target/src/BerkeleyDB/build_vxworks/demo/dbdemo.wpj
+
+4. Build BerkeleyDB as described in the Reference Guide.
+
+5. Build the dbdemo project.
+
+6. Download BerkeleyDB onto the target.
+
+7. Download the dbdemo project onto the target.
+
+8. Open a windsh to the target and run the demo:
+
+ -> dbdemo "<pathname>/<dbname>"
+
+ Where pathname is a pathname string pointing to a directory that the
+ demo can create a database in. That directory should already exist.
+ The dbname is the name for the database. For example:
+
+ -> dbdemo "/tmp/demo.db"
+
+9. The demo program will ask for input. You can type in any string.
+ The program will add an entry to the database with that string as
+ the key and the reverse of that string as the data item for that key.
+ It will continue asking for input until you hit ^D or enter "quit".
+ Upon doing so, the demo program will display all the keys you have
+ entered as input and their data items.
diff --git a/bdb/build_vxworks/dbdemo/dbdemo.c b/bdb/build_vxworks/dbdemo/dbdemo.c
new file mode 100644
index 00000000000..6dd2a25c54e
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/dbdemo.c
@@ -0,0 +1,178 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_access.c,v 11.22 2002/09/03 12:54:26 bostic Exp $
+ */
+
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
+#endif
+
+#include <db_config.h>
+#include <db_int.h>
+
+#define DATABASE "access.db"
+int dbdemo_main __P((int, char *[]));
+int dbdemo_usage __P((void));
+
+int
+dbdemo(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("dbdemo", args, &argc, &argv);
+ return (dbdemo_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+dbdemo_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind, __db_getopt_reset;
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ch, ret, rflag;
+ char *database, *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "dbdemo"; /* Program name. */
+
+ rflag = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "r")) != EOF)
+ switch (ch) {
+ case 'r':
+ rflag = 1;
+ break;
+ case '?':
+ default:
+ return (dbdemo_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Accept optional database name. */
+ database = *argv == NULL ? DATABASE : argv[0];
+
+ /* Optionally discard the database. */
+ if (rflag)
+ (void)remove(database);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->set_cachesize(dbp, 0, 32 * 1024, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+ if ((ret = dbp->open(dbp,
+ NULL, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", database);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if (strcmp(buf, "exit\n") == 0 || strcmp(buf, "quit\n") == 0)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ }
+ printf("\n");
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /* Initialize the key/data pair so the flags aren't set. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ goto err2;
+ }
+
+ /* Close everything down. */
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ return (EXIT_SUCCESS);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (EXIT_FAILURE);
+}
+
+int
+dbdemo_usage()
+{
+ (void)fprintf(stderr, "usage: ex_access [-r] [database]\n");
+ return (EXIT_FAILURE);
+}
diff --git a/bdb/build_vxworks/dbdemo/dbdemo.wpj b/bdb/build_vxworks/dbdemo/dbdemo.wpj
new file mode 100755
index 00000000000..52eec5ed945
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/dbdemo.wpj
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+dbdemo.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/dbdemo.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_dbdemo.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_dbdemo.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_dbdemo.c_dependencies
+
+<END>
+
+<BEGIN> FILE_dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> FILE_dbdemo.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/dbdemo.c
+<END>
+
+<BEGIN> userComments
+dbdemo
+<END>
diff --git a/bdb/build_vxworks/dbdemo/dbdemo/Makefile.custom b/bdb/build_vxworks/dbdemo/dbdemo/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/dbdemo/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/build_vxworks/dbdemo/dbdemo/component.cdf b/bdb/build_vxworks/dbdemo/dbdemo/component.cdf
new file mode 100755
index 00000000000..188b63bfa4a
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/dbdemo/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DBDEMO {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES dbdemo.o
+ NAME dbdemo
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module dbdemo.o {
+
+ NAME dbdemo.o
+ SRC_PATH_NAME $PRJ_DIR/../dbdemo.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/build_vxworks/dbdemo/dbdemo/component.wpj b/bdb/build_vxworks/dbdemo/dbdemo/component.wpj
new file mode 100755
index 00000000000..b51ebce106e
--- /dev/null
+++ b/bdb/build_vxworks/dbdemo/dbdemo/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../dbdemo.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/build_vxworks/ex_access/ex_access.wpj b/bdb/build_vxworks/ex_access/ex_access.wpj
deleted file mode 100644
index bbbad47a253..00000000000
--- a/bdb/build_vxworks/ex_access/ex_access.wpj
+++ /dev/null
@@ -1,244 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_access.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -I/export/home/db/build_vxworks
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_access.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_access.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_access.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_objects
-ex_access.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_access.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_access.c
-<END>
-
-<BEGIN> userComments
-ex_access
-<END>
-
diff --git a/bdb/build_vxworks/ex_btrec/ex_btrec.wpj b/bdb/build_vxworks/ex_btrec/ex_btrec.wpj
deleted file mode 100644
index 801ca6808e2..00000000000
--- a/bdb/build_vxworks/ex_btrec/ex_btrec.wpj
+++ /dev/null
@@ -1,250 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_btrec.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -I/export/home/db/build_vxworks
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_btrec.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_btrec.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_btrec.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I/export/home/db/build_vxworks \
- -I/export/home/db/include \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_objects
-ex_btrec.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_btrec.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_btrec.c
-<END>
-
-<BEGIN> userComments
-ex_btrec
-<END>
-
diff --git a/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj b/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj
deleted file mode 100644
index fdb721406ad..00000000000
--- a/bdb/build_vxworks/ex_dbclient/ex_dbclient.wpj
+++ /dev/null
@@ -1,266 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
-ex_dbclient.out
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_dbclient.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I/export/home/db/build_vxworks \
- -I/export/home/db/include \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_dbclient.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_dbclient.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_dbclient.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I/export/home/db/include \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_objects
-ex_dbclient.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_dbclient.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_dbclient.c
-<END>
-
-<BEGIN> userComments
-RPC Client example
-<END>
-
diff --git a/bdb/build_vxworks/ex_env/ex_env.wpj b/bdb/build_vxworks/ex_env/ex_env.wpj
deleted file mode 100644
index 7229ffa1309..00000000000
--- a/bdb/build_vxworks/ex_env/ex_env.wpj
+++ /dev/null
@@ -1,248 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_env.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -I/export/home/db/build_vxworks
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_env.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_env.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_env.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_objects
-ex_env.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_env.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_env.c
-<END>
-
-<BEGIN> userComments
-ex_env
-<END>
-
diff --git a/bdb/build_vxworks/ex_mpool/ex_mpool.wpj b/bdb/build_vxworks/ex_mpool/ex_mpool.wpj
deleted file mode 100644
index 6dd9ed4db27..00000000000
--- a/bdb/build_vxworks/ex_mpool/ex_mpool.wpj
+++ /dev/null
@@ -1,248 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_mpool.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -I/export/home/db/build_vxworks
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_mpool.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_mpool.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_mpool.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_objects
-ex_mpool.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_mpool.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_mpool.c
-<END>
-
-<BEGIN> userComments
-ex_mpool
-<END>
-
diff --git a/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj b/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj
deleted file mode 100644
index 91de499dcf5..00000000000
--- a/bdb/build_vxworks/ex_tpcb/ex_tpcb.wpj
+++ /dev/null
@@ -1,261 +0,0 @@
-Document file - DO NOT EDIT
-
-<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
-ex_tpcb.out
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
-ar386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/PENTIUMgnu/ex_tpcb.a
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
-cc386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM \
- -I/export/home/db/build_vxworks \
- -DVERY_TINY
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
--g \
- -mpentium \
- -ansi \
- -nostdinc \
- -fvolatile \
- -nostdlib \
- -fno-builtin \
- -fno-defer-pop \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=PENTIUM
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
-cc386 -E -P -xc
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
-ld386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
--X -N
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
--X -r
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
-nm386 -g
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
-size386
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_PENTIUMgnu_TC
-::tc_PENTIUMgnu
-<END>
-
-<BEGIN> BUILD_RULE_archive
-
-<END>
-
-<BEGIN> BUILD_RULE_ex_tpcb.out
-
-<END>
-
-<BEGIN> BUILD_RULE_objects
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_BUILDRULE
-ex_tpcb.out
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AR
-arsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_ARCHIVE
-$(PRJ_DIR)/SIMSPARCSOLARISgnu/ex_tpcb.a
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_AS
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CC
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS
--g \
- -ansi \
- -nostdinc \
- -DRW_MULTI_THREAD \
- -D_REENTRANT \
- -fvolatile \
- -fno-builtin \
- -I/export/home/db/build_vxworks \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CFLAGS_AS
--g \
- -ansi \
- -nostdinc \
- -fvolatile \
- -fno-builtin \
- -P \
- -x \
- assembler-with-cpp \
- -I. \
- -I$(WIND_BASE)/target/h \
- -DCPU=SIMSPARCSOLARIS
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_CPP
-ccsimso -E -P -xc
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD
-ccsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDDEPS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LDFLAGS
--N
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_LD_PARTIAL_FLAGS
--nostdlib -r
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_NM
-nmsimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_DEFINE_MACRO
--D
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_OPTION_INCLUDE_DIR
--I
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_POST_BUILD_RULE
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_PRJ_LIBS
-
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_MACRO_SIZE
-sizesimso
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_RO_DEPEND_PATH
-{$(WIND_BASE)/target/h/} \
- {$(WIND_BASE)/target/src/} \
- {$(WIND_BASE)/target/config/}
-<END>
-
-<BEGIN> BUILD_SIMSPARCSOLARISgnu_TC
-::tc_SIMSPARCSOLARISgnu
-<END>
-
-<BEGIN> BUILD__CURRENT
-PENTIUMgnu
-<END>
-
-<BEGIN> BUILD__LIST
-SIMSPARCSOLARISgnu PENTIUMgnu
-<END>
-
-<BEGIN> CORE_INFO_TYPE
-::prj_vxApp
-<END>
-
-<BEGIN> CORE_INFO_VERSION
-2.0
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_dependDone
-TRUE
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_dependencies
-/export/home/db/build_vxworks/db_config.h /export/home/db/build_vxworks/db.h
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_objects
-ex_tpcb.o
-<END>
-
-<BEGIN> FILE_/export/home/db/examples_c/ex_tpcb.c_tool
-C/C++ compiler
-<END>
-
-<BEGIN> PROJECT_FILES
-/export/home/db/examples_c/ex_tpcb.c
-<END>
-
-<BEGIN> userComments
-ex_tpcb
-<END>
-
diff --git a/bdb/build_win32/Berkeley_DB.dsw b/bdb/build_win32/Berkeley_DB.dsw
index 482ac7537f0..a413ff983c2 100644
--- a/bdb/build_win32/Berkeley_DB.dsw
+++ b/bdb/build_win32/Berkeley_DB.dsw
@@ -1,51 +1,9 @@
-Microsoft Developer Studio Workspace File, Format Version 5.00
+Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
-Project: "DB_DLL"=.\db_dll.dsp - Package Owner=<4>
-
-Package=<5>
-{{{
-}}}
-
-Package=<4>
-{{{
-}}}
-
-###############################################################################
-
-Project: "DB_Static"=.\db_static.dsp - Package Owner=<4>
-
-Package=<5>
-{{{
-}}}
-
-Package=<4>
-{{{
-}}}
-
-###############################################################################
-
-Project: "db_archive"=.\db_archive.dsp - Package Owner=<4>
-
-Package=<5>
-{{{
-}}}
-
-Package=<4>
-{{{
- Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
- End Project Dependency
-}}}
-
-###############################################################################
-
-Project: "db_buildall"=.\db_buildall.dsp - Package Owner=<4>
+Project: "build_all"=.\build_all.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -125,11 +83,15 @@ Package=<4>
Begin Project Dependency
Project_Dep_Name excxx_tpcb
End Project Dependency
+ Begin Project Dependency
+ Project_Dep_Name db_lib
+ End Project Dependency
+ Begin Project Dependency
}}}
###############################################################################
-Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4>
+Project: "db_archive"=.\db_archive.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -138,10 +100,22 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
+ Project_Dep_Name db_lib
End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_checkpoint"=.\db_checkpoint.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -158,9 +132,18 @@ Package=<4>
Begin Project Dependency
Project_Dep_Name DB_DLL
End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
- End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_dll"=.\db_dll.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
}}}
###############################################################################
@@ -174,10 +157,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -198,7 +178,7 @@ Package=<4>
###############################################################################
-Project: "db_load"=.\db_load.dsp - Package Owner=<4>
+Project: "db_lib"=.\db_lib.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -207,16 +187,16 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
+ Project_Dep_Name db_dll
End Project Dependency
Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_static
End Project Dependency
}}}
###############################################################################
-Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4>
+Project: "db_load"=.\db_load.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -225,16 +205,28 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
+ Project_Dep_Name db_lib
End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_perf"=.\db_perf.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
###############################################################################
-Project: "db_recover"=.\db_recover.dsp - Package Owner=<4>
+Project: "db_printlog"=.\db_printlog.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -243,10 +235,22 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
+ Project_Dep_Name db_lib
End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "db_recover"=.\db_recover.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -261,15 +265,24 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
###############################################################################
+Project: "db_static"=.\db_static.dsp - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
Project: "db_tcl"=.\db_tcl.dsp - Package Owner=<4>
Package=<5>
@@ -294,7 +307,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name db_buildall
+ Project_Dep_Name build_all
End Project Dependency
Begin Project Dependency
Project_Dep_Name db_tcl
@@ -312,10 +325,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -330,10 +340,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -348,10 +355,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -366,10 +370,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_Static
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_DLL
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -384,10 +385,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -402,10 +400,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -420,10 +415,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -438,10 +430,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -456,10 +445,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -474,10 +460,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -492,10 +475,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -510,10 +490,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -528,10 +505,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
@@ -546,10 +520,7 @@ Package=<5>
Package=<4>
{{{
Begin Project Dependency
- Project_Dep_Name DB_DLL
- End Project Dependency
- Begin Project Dependency
- Project_Dep_Name DB_Static
+ Project_Dep_Name db_lib
End Project Dependency
}}}
diff --git a/bdb/build_win32/app_dsp.src b/bdb/build_win32/app_dsp.src
index 064ea7ef51a..ff98d39ec79 100644
--- a/bdb/build_win32/app_dsp.src
+++ b/bdb/build_win32/app_dsp.src
@@ -1,5 +1,5 @@
# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Console Application" 0x0103
@@ -17,17 +17,14 @@ CFG=@project_name@ - Win32 Debug Static
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
-!MESSAGE "@project_name@ - Win32 Release" (based on\
- "Win32 (x86) Console Application")
-!MESSAGE "@project_name@ - Win32 Debug" (based on\
- "Win32 (x86) Console Application")
-!MESSAGE "@project_name@ - Win32 Release Static" (based on\
- "Win32 (x86) Console Application")
-!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
- "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Console Application")
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
@@ -47,7 +44,7 @@ RSC=rc.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -71,7 +68,7 @@ LINK32=link.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
@@ -95,8 +92,8 @@ LINK32=link.exe
# PROP Intermediate_Dir "Release_static"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -120,8 +117,8 @@ LINK32=link.exe
# PROP Intermediate_Dir "Debug_static"
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
diff --git a/bdb/build_win32/build_all.dsp b/bdb/build_win32/build_all.dsp
new file mode 100644
index 00000000000..7ae1f9bb031
--- /dev/null
+++ b/bdb/build_win32/build_all.dsp
@@ -0,0 +1,96 @@
+# Microsoft Developer Studio Project File - Name="build_all" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=build_all - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "build_all.mak" CFG="build_all - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "build_all - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Release Static" (based on "Win32 (x86) External Target")
+!MESSAGE "build_all - Win32 Debug Static" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "build_all - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "echo DB Release version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "echo DB Debug version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Cmd_Line "echo DB Release Static version built."
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "build_all - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Cmd_Line "echo DB Debug Static version built."
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "build_all - Win32 Release"
+# Name "build_all - Win32 Debug"
+# Name "build_all - Win32 Release Static"
+# Name "build_all - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_buildall.dsp b/bdb/build_win32/db_buildall.dsp
deleted file mode 100644
index 58990dbb867..00000000000
--- a/bdb/build_win32/db_buildall.dsp
+++ /dev/null
@@ -1,128 +0,0 @@
-# Microsoft Developer Studio Project File - Name="db_buildall" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
-# ** DO NOT EDIT **
-
-# TARGTYPE "Win32 (x86) External Target" 0x0106
-
-CFG=db_buildall - Win32 Debug
-!MESSAGE This is not a valid makefile. To build this project using NMAKE,
-!MESSAGE use the Export Makefile command and run
-!MESSAGE
-!MESSAGE NMAKE /f "db_buildall.mak".
-!MESSAGE
-!MESSAGE You can specify a configuration when running NMAKE
-!MESSAGE by defining the macro CFG on the command line. For example:
-!MESSAGE
-!MESSAGE NMAKE /f "db_buildall.mak" CFG="db_buildall - Win32 Debug Static"
-!MESSAGE
-!MESSAGE Possible choices for configuration are:
-!MESSAGE
-!MESSAGE "db_buildall - Win32 Release" (based on "Win32 (x86) External Target")
-!MESSAGE "db_buildall - Win32 Debug" (based on "Win32 (x86) External Target")
-!MESSAGE "db_buildall - Win32 Release Static" (based on\
- "Win32 (x86) External Target")
-!MESSAGE "db_buildall - Win32 Debug Static" (based on\
- "Win32 (x86) External Target")
-!MESSAGE
-
-# Begin Project
-# PROP Scc_ProjName ""
-# PROP Scc_LocalPath ""
-
-!IF "$(CFG)" == "db_buildall - Win32 Release"
-
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "Release"
-# PROP BASE Intermediate_Dir "Release"
-# PROP BASE Cmd_Line "NMAKE /f db_buildall.mak"
-# PROP BASE Rebuild_Opt "/a"
-# PROP BASE Target_File "db_buildall.exe"
-# PROP BASE Bsc_Name "db_buildall.bsc"
-# PROP BASE Target_Dir ""
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release"
-# PROP Intermediate_Dir "Release"
-# PROP Cmd_Line "echo DB release version built."
-# PROP Rebuild_Opt ""
-# PROP Target_File "db_buildall.exe"
-# PROP Bsc_Name "db_buildall.bsc"
-# PROP Target_Dir ""
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug"
-
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "Debug"
-# PROP BASE Intermediate_Dir "Debug"
-# PROP BASE Cmd_Line "NMAKE /f db_buildall.mak"
-# PROP BASE Rebuild_Opt "/a"
-# PROP BASE Target_File "db_buildall.exe"
-# PROP BASE Bsc_Name "db_buildall.bsc"
-# PROP BASE Target_Dir ""
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug"
-# PROP Intermediate_Dir "Debug"
-# PROP Cmd_Line "echo DB debug version built."
-# PROP Rebuild_Opt ""
-# PROP Target_File "db_buildall.exe"
-# PROP Bsc_Name "db_buildall.bsc"
-# PROP Target_Dir ""
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Release Static"
-
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "Release_static"
-# PROP BASE Intermediate_Dir "Release_static"
-# PROP BASE Cmd_Line "echo DB release version built."
-# PROP BASE Rebuild_Opt ""
-# PROP BASE Target_File "db_buildall.exe"
-# PROP BASE Bsc_Name "db_buildall.bsc"
-# PROP BASE Target_Dir ""
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release_static"
-# PROP Intermediate_Dir "Release_static"
-# PROP Cmd_Line "echo DB release version built."
-# PROP Rebuild_Opt ""
-# PROP Target_File "db_buildall.exe"
-# PROP Bsc_Name "db_buildall.bsc"
-# PROP Target_Dir ""
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug Static"
-
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "Debug_static"
-# PROP BASE Intermediate_Dir "Debug_static"
-# PROP BASE Cmd_Line "echo DB debug version built."
-# PROP BASE Rebuild_Opt ""
-# PROP BASE Target_File "db_buildall.exe"
-# PROP BASE Bsc_Name "db_buildall.bsc"
-# PROP BASE Target_Dir ""
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug_static"
-# PROP Intermediate_Dir "Debug_static"
-# PROP Cmd_Line "echo DB debug version built."
-# PROP Rebuild_Opt ""
-# PROP Target_File "db_buildall.exe"
-# PROP Bsc_Name "db_buildall.bsc"
-# PROP Target_Dir ""
-
-!ENDIF
-
-# Begin Target
-
-# Name "db_buildall - Win32 Release"
-# Name "db_buildall - Win32 Debug"
-# Name "db_buildall - Win32 Release Static"
-# Name "db_buildall - Win32 Debug Static"
-
-!IF "$(CFG)" == "db_buildall - Win32 Release"
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug"
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Release Static"
-
-!ELSEIF "$(CFG)" == "db_buildall - Win32 Debug Static"
-
-!ENDIF
-
-# End Target
-# End Project
diff --git a/bdb/build_win32/db_config.h b/bdb/build_win32/db_config.h
index 8b12d64c3c9..76ce0b5095f 100644
--- a/bdb/build_win32/db_config.h
+++ b/bdb/build_win32/db_config.h
@@ -1,42 +1,8 @@
-/*
- * $Id: db_config.h,v 11.24 2000/12/12 18:39:26 bostic Exp $
- */
-
-/* Define to empty if the keyword does not work. */
-/* #undef const */
-
-/* Define if your struct stat has st_blksize. */
-/* #undef HAVE_ST_BLKSIZE */
-
-/* Define to `int' if <sys/types.h> doesn't define. */
-/* #undef mode_t */
-
-/* Define to `long' if <sys/types.h> doesn't define. */
-/* #undef off_t */
-
-/* Define to `int' if <sys/types.h> doesn't define. */
-/* #undef pid_t */
-
-/* Define to `unsigned' if <sys/types.h> doesn't define. */
-/* #undef size_t */
-
-/* Define if the `S_IS*' macros in <sys/stat.h> do not work properly. */
-/* #undef STAT_MACROS_BROKEN */
-
-/* Define if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
-/* Define if you can safely include both <sys/time.h> and <time.h>. */
-/* #undef TIME_WITH_SYS_TIME */
-
-/* Define if your processor stores words with the most significant
- byte first (like Motorola and SPARC, unlike Intel and VAX). */
-/* #undef WORDS_BIGENDIAN */
-
-/* Define if you are building a version for running the test suite. */
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/* Define to 1 if you want to build a version for running the test suite. */
/* #undef CONFIG_TEST */
-/* Define if you want a debugging version. */
+/* Define to 1 if you want a debugging version. */
/* #undef DEBUG */
#if defined(_DEBUG)
#if !defined(DEBUG)
@@ -44,160 +10,360 @@
#endif
#endif
-/* Define if you want a version that logs read operations. */
+/* Define to 1 if you want a version that logs read operations. */
/* #undef DEBUG_ROP */
-/* Define if you want a version that logs write operations. */
+/* Define to 1 if you want a version that logs write operations. */
/* #undef DEBUG_WOP */
-/* Define if you want a version with run-time diagnostic checking. */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
/* #undef DIAGNOSTIC */
-/* Define if you want to mask harmless unitialized memory read/writes. */
-/* #undef UMRW */
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
-/* Define if fcntl/F_SETFD denies child access to file descriptors. */
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_DIRENT_H */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
/* #undef HAVE_FCNTL_F_SETFD */
-/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
-/* #undef HAVE_FILE_OFFSET_BITS */
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
-/* Mutex possibilities. */
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
-/* #undef HAVE_MUTEX_MACOS */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
/* #undef HAVE_MUTEX_MSEM_INIT */
-/* #undef HAVE_MUTEX_PPC_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
-/* #undef HAVE_MUTEX_SOLARIS_LWP */
-/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
-#define HAVE_MUTEX_THREADS 1
-/* #undef HAVE_MUTEX_UI_THREADS */
-/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
-/* #undef HAVE_MUTEX_VMS */
-/* #undef HAVE_MUTEX_VXWORKS */
-/* #undef HAVE_MUTEX_WIN16 */
-#define HAVE_MUTEX_WIN32 1
-/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
-/* Define if building on QNX. */
-/* #undef HAVE_QNX */
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
-/* Define if building RPC client/server. */
-/* #undef HAVE_RPC */
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
-/* Define if your sprintf returns a pointer, not a length. */
-/* #undef SPRINTF_RET_CHARPNT */
+/* Define to 1 if mutexes hold system resources. */
+/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
-/* Define if you have the getcwd function. */
-#define HAVE_GETCWD 1
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
-/* Define if you have the getopt function. */
-/* #undef HAVE_GETOPT */
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
-/* Define if you have the getuid function. */
-/* #undef HAVE_GETUID */
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
-/* Define if you have the memcmp function. */
-#define HAVE_MEMCMP 1
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
-/* Define if you have the memcpy function. */
-#define HAVE_MEMCPY 1
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
-/* Define if you have the memmove function. */
-#define HAVE_MEMMOVE 1
+/* Define to 1 to use VxWorks mutexes. */
+/* #undef HAVE_MUTEX_VXWORKS */
-/* Define if you have the mlock function. */
-/* #undef HAVE_MLOCK */
+/* Define to 1 to use Windows mutexes. */
+#define HAVE_MUTEX_WIN32 1
-/* Define if you have the mmap function. */
-/* #undef HAVE_MMAP */
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
-/* Define if you have the munlock function. */
-/* #undef HAVE_MUNLOCK */
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
-/* Define if you have the munmap function. */
-/* #undef HAVE_MUNMAP */
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
-/* Define if you have the pread function. */
+/* Define to 1 if you have the `pread' function. */
/* #undef HAVE_PREAD */
-/* Define if you have the pstat_getdynamic function. */
+/* Define to 1 if you have the `pstat_getdynamic' function. */
/* #undef HAVE_PSTAT_GETDYNAMIC */
-/* Define if you have the pwrite function. */
+/* Define to 1 if you have the `pwrite' function. */
/* #undef HAVE_PWRITE */
-/* Define if you have the qsort function. */
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
#define HAVE_QSORT 1
-/* Define if you have the raise function. */
+/* Define to 1 if you have the `raise' function. */
#define HAVE_RAISE 1
-/* Define if you have the sched_yield function. */
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
/* #undef HAVE_SCHED_YIELD */
-/* Define if you have the select function. */
+/* Define to 1 if you have the `select' function. */
/* #undef HAVE_SELECT */
-/* Define if you have the shmget function. */
+/* Define to 1 if you have the `shmget' function. */
/* #undef HAVE_SHMGET */
-/* Define if you have the snprintf function. */
+/* Define to 1 if you have the `snprintf' function. */
#define HAVE_SNPRINTF 1
-/* Define if you have the strcasecmp function. */
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
/* #undef HAVE_STRCASECMP */
-/* Define if you have the strerror function. */
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
-/* Define if you have the strtoul function. */
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
#define HAVE_STRTOUL 1
-/* Define if you have the sysconf function. */
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */
+
+/* Define to 1 if you have the `sysconf' function. */
/* #undef HAVE_SYSCONF */
-/* Define if you have the vsnprintf function. */
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef HAVE_UNISTD_H */
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
+
+/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
-/* Define if you have the yield function. */
+/* Define to 1 if building VxWorks. */
+/* #undef HAVE_VXWORKS */
+
+/* Define to 1 if you have the `yield' function. */
/* #undef HAVE_YIELD */
-/* Define if you have the <dirent.h> header file. */
-/* #undef HAVE_DIRENT_H */
+/* Define to 1 if you have the `_fstati64' function. */
+#define HAVE__FSTATI64 1
-/* Define if you have the <ndir.h> header file. */
-/* #undef HAVE_NDIR_H */
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
-/* Define if you have the <sys/dir.h> header file. */
-/* #undef HAVE_SYS_DIR_H */
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
-/* Define if you have the <sys/ndir.h> header file. */
-/* #undef HAVE_SYS_NDIR_H */
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB 4.1.24"
-/* Define if you have the <sys/select.h> header file. */
-/* #undef HAVE_SYS_SELECT_H */
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-4.1.24"
-/* Define if you have the <sys/time.h> header file. */
-/* #undef HAVE_SYS_TIME_H */
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "4.1.24"
-/* Define if you have the nsl library (-lnsl). */
-/* #undef HAVE_LIBNSL */
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
/*
* XXX
- * The following is not part of the automatic configuration setup,
- * but provides the information necessary to build DB on Windows.
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
*/
#include <sys/types.h>
#include <sys/stat.h>
@@ -214,10 +380,6 @@
#include <time.h>
#include <errno.h>
-#if defined(__cplusplus)
-#include <iostream.h>
-#endif
-
/*
* To build Tcl interface libraries, the include path must be configured to
* use the directory containing <tcl.h>, usually the include directory in
@@ -231,9 +393,14 @@
#include <windows.h>
/*
- * Win32 has fsync, getcwd, snprintf and vsnprintf, but under different names.
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
*/
-#define fsync(fd) _commit(fd)
#define getcwd(buf, size) _getcwd(buf, size)
#define snprintf _snprintf
#define vsnprintf _vsnprintf
@@ -251,8 +418,6 @@ extern int getopt(int, char * const *, const char *);
}
#endif
-#define NO_SYSTEM_INCLUDES
-
/*
* We use DB_WIN32 much as one would use _WIN32, to determine that we're
* using an operating system environment that supports Win32 calls
diff --git a/bdb/build_win32/db_cxx.h b/bdb/build_win32/db_cxx.h
new file mode 100644
index 00000000000..1b72f310f82
--- /dev/null
+++ b/bdb/build_win32/db_cxx.h
@@ -0,0 +1,796 @@
+/* DO NOT EDIT: automatically built by dist/s_win32. */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_cxx.in,v 11.113 2002/08/23 13:02:27 mjc Exp $
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <stdarg.h>
+
+#define HAVE_CXX_STDHEADERS 1
+#ifdef HAVE_CXX_STDHEADERS
+#include <iostream>
+#define __DB_OSTREAMCLASS std::ostream
+#else
+#include <iostream.h>
+#define __DB_OSTREAMCLASS ostream
+#endif
+
+#include "db.h"
+#include "cxx_common.h"
+#include "cxx_except.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLogc; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class DbPreplist; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users to define
+// callback functions. For performance and logistical reasons, some
+// callback functions must be declared in extern "C" blocks. For others,
+// we allow you to declare the callbacks in C++ or C (or an extern "C"
+// block) as you wish. See the set methods for the callbacks for
+// the choices.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef void (*db_free_fcn_type)
+ (void *);
+ typedef int (*bt_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+ friend class DbLogc; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbMpoolFile);
+
+public:
+ int close(u_int32_t flags);
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ void last_pgno(db_pgno_t *pgnoaddr);
+ int open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+ int put(void *pgaddr, u_int32_t flags);
+ void refcnt(db_pgno_t *pgnoaddr);
+ int set(void *pgaddr, u_int32_t flags);
+ int set_clear_len(u_int32_t len);
+ int set_fileid(u_int8_t *fileid);
+ int set_ftype(int ftype);
+ int set_lsn_offset(int32_t offset);
+ int set_pgcookie(DBT *dbt);
+ void set_unlink(int);
+ int sync();
+
+ virtual DB_MPOOLFILE *get_DB_MPOOLFILE()
+ {
+ return (DB_MPOOLFILE *)imp();
+ }
+
+ virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const
+ {
+ return (const DB_MPOOLFILE *)constimp();
+ }
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ virtual ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// This is filled in and returned by the DbEnv::txn_recover() method.
+//
+
+class _exported DbPreplist
+{
+public:
+ DbTxn *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbTxn);
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ int discard(u_int32_t flags);
+ u_int32_t id();
+ int prepare(u_int8_t *gid);
+ int set_timeout(db_timeout_t timeout, u_int32_t flags);
+
+ virtual DB_TXN *get_DB_TXN()
+ {
+ return (DB_TXN *)imp();
+ }
+
+ virtual const DB_TXN *get_const_DB_TXN() const
+ {
+ return (const DB_TXN *)constimp();
+ }
+
+ static DbTxn* get_DbTxn(DB_TXN *txn)
+ {
+ return (DbTxn *)txn->api_internal;
+ }
+
+ static const DbTxn* get_const_DbTxn(const DB_TXN *txn)
+ {
+ return (const DbTxn *)txn->api_internal;
+ }
+
+ // For internal use only.
+ static DbTxn* wrap_DB_TXN(DB_TXN *txn);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ // For internal use only.
+ DbTxn(DB_TXN *txn);
+ virtual ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbEnv);
+
+public:
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ virtual ~DbEnv();
+
+ // These methods match those in the C interface.
+ //
+ virtual int close(u_int32_t);
+ virtual int dbremove(DbTxn *txn, const char *name, const char *subdb,
+ u_int32_t flags);
+ virtual int dbrename(DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual void *get_app_private() const;
+ virtual int open(const char *, u_int32_t, int);
+ virtual int remove(const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_data_dir(const char *);
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_flags(u_int32_t, int);
+ virtual int set_feedback(void (*)(DbEnv *, int, int));
+ virtual int set_lg_bsize(u_int32_t);
+ virtual int set_lg_dir(const char *);
+ virtual int set_lg_max(u_int32_t);
+ virtual int set_lg_regionmax(u_int32_t);
+ virtual int set_lk_conflicts(u_int8_t *, int);
+ virtual int set_lk_detect(u_int32_t);
+ virtual int set_lk_max(u_int32_t);
+ virtual int set_lk_max_lockers(u_int32_t);
+ virtual int set_lk_max_locks(u_int32_t);
+ virtual int set_lk_max_objects(u_int32_t);
+ virtual int set_mp_mmapsize(size_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_rpc_server(void *, char *, long, long, u_int32_t);
+ virtual int set_shm_key(long);
+ virtual int set_timeout(db_timeout_t timeout, u_int32_t flags);
+ virtual int set_tmp_dir(const char *);
+ virtual int set_tas_spins(u_int32_t);
+ virtual int set_tx_max(u_int32_t);
+ virtual int set_app_dispatch(int (*)(DbEnv *,
+ Dbt *, DbLsn *, db_recops));
+ virtual int set_tx_timestamp(time_t *);
+ virtual int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+ static void runtime_error_dbt(const char *caller, Dbt *dbt,
+ int error_policy);
+ static void runtime_error_lock_get(const char *caller, int err,
+ db_lockop_t op, db_lockmode_t mode,
+ const Dbt *obj, DbLock lock, int index,
+ int error_policy);
+
+ // Lock functions
+ //
+ virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ virtual int lock_id(u_int32_t *idp);
+ virtual int lock_id_free(u_int32_t id);
+ virtual int lock_put(DbLock *lock);
+ virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+ virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ virtual int log_archive(char **list[], u_int32_t flags);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ virtual int log_cursor(DbLogc **cursorp, u_int32_t flags);
+ virtual int log_file(DbLsn *lsn, char *namep, size_t len);
+ virtual int log_flush(const DbLsn *lsn);
+ virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+
+ // Mpool functions
+ //
+ virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+ virtual int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+ virtual int memp_stat(DB_MPOOL_STAT
+ **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags);
+ virtual int memp_sync(DbLsn *lsn);
+ virtual int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ virtual int txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags);
+ virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+
+ // Replication functions
+ //
+ virtual int rep_elect(int, int, u_int32_t, int *);
+ virtual int rep_process_message(Dbt *, Dbt *, int *);
+ virtual int rep_start(Dbt *, u_int32_t);
+ virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+ virtual int set_rep_limit(u_int32_t, u_int32_t);
+ virtual int set_rep_transport(u_int32_t,
+ int (*)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t));
+
+ // Conversion functions
+ //
+ virtual DB_ENV *get_DB_ENV()
+ {
+ return (DB_ENV *)imp();
+ }
+
+ virtual const DB_ENV *get_const_DB_ENV() const
+ {
+ return (const DB_ENV *)constimp();
+ }
+
+ static DbEnv* get_DbEnv(DB_ENV *dbenv)
+ {
+ return (DbEnv *)dbenv->api1_internal;
+ }
+
+ static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv)
+ {
+ return (const DbEnv *)dbenv->api1_internal;
+ }
+
+ // For internal use only.
+ static DbEnv* wrap_DB_ENV(DB_ENV *dbenv);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static int _rep_send_intercept(DB_ENV *env,
+ const DBT *cntrl, const DBT *data,
+ int id, u_int32_t flags);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // For internal use only.
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ void (*feedback_callback_)(DbEnv *, int, int);
+ void (*paniccall_callback_)(DbEnv *, int);
+ int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*rep_send_callback_)(DbEnv *,
+ const Dbt *, const Dbt *, int, u_int32_t);
+
+ // class data
+ static __DB_OSTREAMCLASS *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(Db);
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ virtual ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ virtual int associate(DbTxn *txn, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+ virtual int close(u_int32_t flags);
+ virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual int fd(int *fdp);
+ virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ virtual void *get_app_private() const;
+ virtual int get_byteswapped(int *);
+ virtual int get_type(DBTYPE *);
+ virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ virtual int open(DbTxn *txnid,
+ const char *, const char *subname, DBTYPE, u_int32_t, int);
+ virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
+ u_int32_t flags);
+ virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ virtual int remove(const char *, const char *, u_int32_t);
+ virtual int rename(const char *, const char *, const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
+ virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_bt_maxkey(u_int32_t);
+ virtual int set_bt_minkey(u_int32_t);
+ virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
+ virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_cache_priority(DB_CACHE_PRIORITY);
+ virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/
+ virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_feedback(void (*)(Db *, int, int));
+ virtual int set_flags(u_int32_t);
+ virtual int set_h_ffactor(u_int32_t);
+ virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/
+ virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t));
+ virtual int set_h_nelem(u_int32_t);
+ virtual int set_lorder(int);
+ virtual int set_pagesize(u_int32_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_re_delim(int);
+ virtual int set_re_len(u_int32_t);
+ virtual int set_re_pad(int);
+ virtual int set_re_source(char *);
+ virtual int set_q_extentsize(u_int32_t);
+ virtual int stat(void *sp, u_int32_t flags);
+ virtual int sync(u_int32_t flags);
+ virtual int truncate(DbTxn *, u_int32_t *, u_int32_t);
+ virtual int upgrade(const char *name, u_int32_t flags);
+ virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, u_int32_t);
+
+ // These additional methods are not in the C interface, and
+ // are only available for C++.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ virtual DB *get_DB()
+ {
+ return (DB *)imp();
+ }
+
+ virtual const DB *get_const_DB() const
+ {
+ return (const DB *)constimp();
+ }
+
+ static Db* get_Db(DB *db)
+ {
+ return (Db *)db->api_internal;
+ }
+
+ static const Db* get_const_Db(const DB *db)
+ {
+ return (const Db *)db->api_internal;
+ }
+
+private:
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+
+public:
+ // These are public only because they need to be called
+ // via C callback functions. They should never be used by
+ // external users of this class.
+ //
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+ int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *);
+ int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *);
+ int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ void (*feedback_callback_)(Db *, int, int);
+ u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+ friend class DbLogc;
+
+public:
+
+ // key/data
+ void *get_data() const { return data; }
+ void set_data(void *value) { data = value; }
+
+ // key/data length
+ u_int32_t get_size() const { return size; }
+ void set_size(u_int32_t value) { size = value; }
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const { return ulen; }
+ void set_ulen(u_int32_t value) { ulen = value; }
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const { return dlen; }
+ void set_dlen(u_int32_t value) { dlen = value; }
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const { return doff; }
+ void set_doff(u_int32_t value) { doff = value; }
+
+ // flags
+ u_int32_t get_flags() const { return flags; }
+ void set_flags(u_int32_t value) { flags = value; }
+
+ // Conversion functions
+ DBT *get_DBT() { return (DBT *)this; }
+ const DBT *get_const_DBT() const { return (const DBT *)this; }
+
+ static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; }
+ static const Dbt* get_const_Dbt(const DBT *dbt)
+ { return (const Dbt *)dbt; }
+
+ Dbt(void *data, u_int32_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // Note: no extra data appears in this class (other than
+ // inherited from DBT) since we need DBT and Dbt objects
+ // to have interchangable pointers.
+ //
+ // When subclassing this class, remember that callback
+ // methods like bt_compare, bt_prefix, dup_compare may
+ // internally manufacture DBT objects (which later are
+ // cast to Dbt), so such callbacks might receive objects
+ // not of your subclassed type.
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+
+class _exported DbLogc : protected DB_LOGC
+{
+ friend class DbEnv;
+
+public:
+ int close(u_int32_t _flags);
+ int get(DbLsn *lsn, Dbt *data, u_int32_t _flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ DbLogc();
+ ~DbLogc();
+
+ // no copying
+ DbLogc(const Dbc &);
+ DbLogc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/bdb/build_win32/db_java_xa.dsp b/bdb/build_win32/db_java_xa.dsp
new file mode 100644
index 00000000000..9c700ffeed4
--- /dev/null
+++ b/bdb/build_win32/db_java_xa.dsp
@@ -0,0 +1,85 @@
+# Microsoft Developer Studio Project File - Name="db_java_xa" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) External Target" 0x0106
+
+CFG=db_java_xa - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_java_xa.mak" CFG="db_java_xa - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_java_xa - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE "db_java_xa - Win32 Debug" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Release/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Release/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Cmd_Line "NMAKE /f db_java_xaj.mak"
+# PROP BASE Rebuild_Opt "/a"
+# PROP BASE Target_File "db_java_xaj.exe"
+# PROP BASE Bsc_Name "db_java_xaj.bsc"
+# PROP BASE Target_Dir ""
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Cmd_Line "NMAKE /f db_java_xaj.mak Debug/dbxa.jar"
+# PROP Rebuild_Opt "/a"
+# PROP Target_File "Debug/dbxa.jar"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_java_xa - Win32 Release"
+# Name "db_java_xa - Win32 Debug"
+
+!IF "$(CFG)" == "db_java_xa - Win32 Release"
+
+!ELSEIF "$(CFG)" == "db_java_xa - Win32 Debug"
+
+!ENDIF
+
+# Begin Source File
+
+SOURCE=.\db_java_xaj.mak
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_java_xaj.mak b/bdb/build_win32/db_java_xaj.mak
new file mode 100644
index 00000000000..c2dbc920d17
--- /dev/null
+++ b/bdb/build_win32/db_java_xaj.mak
@@ -0,0 +1,21 @@
+JAVA_XADIR=../java/src/com/sleepycat/db/xa
+
+JAVA_XASRCS=\
+ $(JAVA_XADIR)/DbXAResource.java \
+ $(JAVA_XADIR)/DbXid.java
+
+Release/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Release/classes -classpath "$(CLASSPATH);./Release/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Release\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
+
+Debug/dbxa.jar : $(JAVA_XASRCS)
+ @echo compiling Berkeley DB XA classes
+ @javac -g -d ./Debug/classes -classpath "$(CLASSPATH);./Debug/classes" $(JAVA_XASRCS)
+ @echo creating jar file
+ @cd .\Debug\classes
+ @jar cf ../dbxa.jar com\sleepycat\db\xa\*.class
+ @echo Java XA build finished
diff --git a/bdb/build_win32/db_lib.dsp b/bdb/build_win32/db_lib.dsp
new file mode 100644
index 00000000000..a7fb4157909
--- /dev/null
+++ b/bdb/build_win32/db_lib.dsp
@@ -0,0 +1,92 @@
+# Microsoft Developer Studio Project File - Name="db_lib" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Generic Project" 0x010a
+
+CFG=db_lib - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_lib.mak" CFG="db_lib - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_lib - Win32 Release" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Release Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE "db_lib - Win32 Debug Static" (based on "Win32 (x86) Generic Project")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "db_lib - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Target_Dir ""
+
+!ELSEIF "$(CFG)" == "db_lib - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_Static"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_Static"
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_lib - Win32 Release"
+# Name "db_lib - Win32 Debug"
+# Name "db_lib - Win32 Release Static"
+# Name "db_lib - Win32 Debug Static"
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_perf.dsp b/bdb/build_win32/db_perf.dsp
new file mode 100644
index 00000000000..21b79ed9e19
--- /dev/null
+++ b/bdb/build_win32/db_perf.dsp
@@ -0,0 +1,216 @@
+# Microsoft Developer Studio Project File - Name="db_perf" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=db_perf - Win32 Debug Static
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "db_perf.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "db_perf.mak" CFG="db_perf - Win32 Debug Static"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "db_perf - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Release Static" (based on "Win32 (x86) Console Application")
+!MESSAGE "db_perf - Win32 Debug Static" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "db_perf - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386 /nodefaultlib:"libcmt"
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb41d.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /nodefaultlib:"libcmtd" /fixed:no
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Release Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release_static"
+# PROP Intermediate_Dir "Release_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Release_static/libdb41.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release_static/libdb41s.lib /nologo /subsystem:console /machine:I386
+
+!ELSEIF "$(CFG)" == "db_perf - Win32 Debug Static"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Ignore_Export_Lib 0
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug_static"
+# PROP Intermediate_Dir "Debug_static"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 Debug_static/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+# ADD LINK32 Debug_static/libdb41sd.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /fixed:no
+
+!ENDIF
+
+# Begin Target
+
+# Name "db_perf - Win32 Release"
+# Name "db_perf - Win32 Debug"
+# Name "db_perf - Win32 Release Static"
+# Name "db_perf - Win32 Debug Static"
+# Begin Source File
+
+SOURCE=..\test_perf\db_perf.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_cache_check.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_checkpoint.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_config.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_dbs.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_debug.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_file.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_key.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_log.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_misc.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_op.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_parse.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_rand.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_spawn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_thread.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_trickle.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_txn.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\test_perf\perf_util.c
+# End Source File
+# Begin Source File
+
+SOURCE=..\clib\getopt.c
+# End Source File
+# End Target
+# End Project
diff --git a/bdb/build_win32/db_test.dsp b/bdb/build_win32/db_test.dsp
index e1bb9056824..f014aa95bcf 100644
--- a/bdb/build_win32/db_test.dsp
+++ b/bdb/build_win32/db_test.dsp
@@ -1,5 +1,5 @@
# Microsoft Developer Studio Project File - Name="db_test" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Console Application" 0x0103
@@ -22,6 +22,7 @@ CFG=db_test - Win32 Debug
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
@@ -41,7 +42,7 @@ RSC=rc.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -49,9 +50,9 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
-# ADD LINK32 Release/libdb32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb41.lib /nologo /subsystem:console /machine:I386
# Begin Special Build Tool
-SOURCE=$(InputPath)
+SOURCE="$(InputPath)"
PostBuild_Desc=Copy built executable files.
PostBuild_Cmds=copy Release\*.exe .
# End Special Build Tool
@@ -60,8 +61,8 @@ PostBuild_Cmds=copy Release\*.exe .
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "db_recov"
-# PROP BASE Intermediate_Dir "db_recov"
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 1
@@ -70,7 +71,7 @@ PostBuild_Cmds=copy Release\*.exe .
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
@@ -78,9 +79,9 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 Debug/libdb32d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# ADD LINK32 Debug/libdb41d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
# Begin Special Build Tool
-SOURCE=$(InputPath)
+SOURCE="$(InputPath)"
PostBuild_Desc=Copy built executable files.
PostBuild_Cmds=copy Debug\*.exe .
# End Special Build Tool
diff --git a/bdb/build_win32/db_test.src b/bdb/build_win32/db_test.src
new file mode 100644
index 00000000000..73479d3856a
--- /dev/null
+++ b/bdb/build_win32/db_test.src
@@ -0,0 +1,97 @@
+# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+
+CFG=@project_name@ - Win32 Debug
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+CPP=cl.exe
+RSC=rc.exe
+
+!IF "$(CFG)" == "@project_name@ - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "Release"
+# PROP BASE Intermediate_Dir "Release"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /subsystem:console /machine:I386
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Release\*.exe .
+# End Special Build Tool
+
+!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "Debug"
+# PROP BASE Intermediate_Dir "Debug"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Ignore_Export_Lib 0
+# PROP Target_Dir ""
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /pdb:none /debug /machine:I386 /out:"Debug/dbkill.exe" /fixed:no
+# Begin Special Build Tool
+SOURCE="$(InputPath)"
+PostBuild_Desc=Copy built executable files.
+PostBuild_Cmds=copy Debug\*.exe .
+# End Special Build Tool
+
+!ENDIF
+
+# Begin Target
+
+# Name "@project_name@ - Win32 Release"
+# Name "@project_name@ - Win32 Debug"
+@SOURCE_FILES@
+# End Target
+# End Project
diff --git a/bdb/build_win32/dbkill.cpp b/bdb/build_win32/dbkill.cpp
index 24709f37201..23dc87b0e85 100644
--- a/bdb/build_win32/dbkill.cpp
+++ b/bdb/build_win32/dbkill.cpp
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: dbkill.cpp,v 11.4 2000/05/02 17:08:31 dda Exp $
+ * $Id: dbkill.cpp,v 11.7 2002/01/11 15:51:27 bostic Exp $
*/
/*
* Kill -
@@ -74,7 +74,7 @@ usage_exit()
{
fprintf(stderr, "Usage: kill [ -sig ] pid\n");
fprintf(stderr, " for win32, sig must be or 0, 15 (TERM)\n");
- exit(1);
+ exit(EXIT_FAILURE);
}
int
@@ -117,15 +117,15 @@ main(int argc, char **argv)
hProcess = OpenProcess(accessflag, FALSE, pid);
if (hProcess == NULL) {
fprintf(stderr, "dbkill: %s: no such process\n", argv[1]);
- exit(1);
+ exit(EXIT_FAILURE);
}
if (sig == 0)
- exit(0);
+ exit(EXIT_SUCCESS);
if (!TerminateProcess(hProcess, 99)) {
DWORD err = GetLastError();
fprintf(stderr,
"dbkill: cannot kill process: error %d (0x%lx)\n", err, err);
- exit(1);
+ exit(EXIT_FAILURE);
}
- return 0;
+ return EXIT_SUCCESS;
}
diff --git a/bdb/build_win32/dynamic_dsp.src b/bdb/build_win32/dynamic_dsp.src
index d9881eda331..a92906a51f4 100644
--- a/bdb/build_win32/dynamic_dsp.src
+++ b/bdb/build_win32/dynamic_dsp.src
@@ -1,5 +1,5 @@
# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
@@ -13,19 +13,16 @@ CFG=@project_name@ - Win32 Debug
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
-!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug Static"
+!MESSAGE NMAKE /f "@project_name@.mak" CFG="@project_name@ - Win32 Debug"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
-!MESSAGE "@project_name@ - Win32 Release Static" (based on\
- "Win32 (x86) Dynamic-Link Library")
-!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
- "Win32 (x86) Dynamic-Link Library")
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
@@ -46,9 +43,9 @@ RSC=rc.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
-# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -72,74 +69,18 @@ LINK32=link.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
# SUBTRACT CPP /Fr
-# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "_DEBUG"
-# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
+# ADD RSC /l 0x409 /d "_DEBUG"
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
-
-!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "DB_DLL__"
-# PROP BASE Intermediate_Dir "DB_DLL__"
-# PROP BASE Ignore_Export_Lib 0
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release"
-# PROP Intermediate_Dir "Release"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_CREATE_DLL" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX /FD /c
-# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
-# ADD BASE RSC /l 0x409 /d "NDEBUG"
-# ADD RSC /l 0x409 /d "NDEBUG"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
-# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
-
-!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
-
-# PROP BASE Use_MFC 2
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "DB_DLL_0"
-# PROP BASE Intermediate_Dir "DB_DLL_0"
-# PROP BASE Ignore_Export_Lib 0
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 2
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug"
-# PROP Intermediate_Dir "Debug"
-# PROP Ignore_Export_Lib 0
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
-# SUBTRACT BASE CPP /Fr
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_CREATE_DLL" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_WINDLL" /D "_AFXDLL" /D "_MBCS" /YX"config.h" /FD /c
-# SUBTRACT CPP /Fr
-# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
-# ADD BASE RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
-# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LINK32=link.exe
-# ADD BASE LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
-# ADD LINK32 /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
!ENDIF
@@ -147,8 +88,6 @@ LINK32=link.exe
# Name "@project_name@ - Win32 Release"
# Name "@project_name@ - Win32 Debug"
-# Name "@project_name@ - Win32 Release Static"
-# Name "@project_name@ - Win32 Debug Static"
@SOURCE_FILES@
# End Target
# End Project
diff --git a/bdb/build_win32/java_dsp.src b/bdb/build_win32/java_dsp.src
index eff251a44f4..15941bcab67 100644
--- a/bdb/build_win32/java_dsp.src
+++ b/bdb/build_win32/java_dsp.src
@@ -1,5 +1,5 @@
# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
@@ -17,12 +17,12 @@ CFG=@project_name@ - Win32 Debug
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
-!MESSAGE "@project_name@ - Win32 Release" (based on\
- "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
@@ -43,9 +43,9 @@ RSC=rc.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
-# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -53,25 +53,22 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
-# ADD LINK32 Release/libdb32.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
# Begin Custom Build - Compiling java files using javac
ProjDir=.
InputPath=.\Release\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll
-SOURCE=$(InputPath)
+SOURCE="$(InputPath)"
"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
- cd $(ProjDir)\..\java\src\com\sleepycat\db
- mkdir ..\..\..\..\classes
+ mkdir $(ProjDir)\Release\classes
echo compiling Berkeley DB classes
- javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
- *.java
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\db\*.java
echo compiling examples
- cd ..\examples
- javac -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
- *.java
- echo creating jar file
- cd ..\..\..\..\classes
- jar cf db.jar com\sleepycat\db\*.class
+ javac -g -d $(ProjDir)/Release/classes -classpath "$(CLASSPATH);$(ProjDir)/Release/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Release\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
echo Java build finished
# End Custom Build
@@ -90,10 +87,10 @@ SOURCE=$(InputPath)
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
# SUBTRACT CPP /Fr
-# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
BSC32=bscmake.exe
@@ -101,25 +98,22 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 Debug/libdb32d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
# Begin Custom Build - Compiling java files using javac
ProjDir=.
InputPath=.\Debug\libdb_java@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll
-SOURCE=$(InputPath)
+SOURCE="$(InputPath)"
"force_compilation.txt" : $(SOURCE) "$(INTDIR)" "$(OUTDIR)"
- cd $(ProjDir)\..\java\src\com\sleepycat\db
- mkdir ..\..\..\..\classes
+ mkdir $(ProjDir)\Debug\classes
echo compiling Berkeley DB classes
- javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
- *.java
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\db\*.java
echo compiling examples
- javac -g -d ../../../../classes -classpath "$(CLASSPATH);../../../../classes"\
- *.java
- cd ..\examples
- echo creating jar file
- cd ..\..\..\..\classes
- jar cf db.jar com\sleepycat\db\*.class
+ javac -g -d $(ProjDir)/Debug/classes -classpath "$(CLASSPATH);$(ProjDir)/Debug/classes" ..\java\src\com\sleepycat\examples\*.java
+ echo creating jar files
+ cd $(ProjDir)\Debug\classes
+ jar cf ../db.jar com\sleepycat\db\*.class
+ jar cf ../dbexamples.jar com\sleepycat\examples\*.class
echo Java build finished
# End Custom Build
diff --git a/bdb/build_win32/libdb.def b/bdb/build_win32/libdb.def
index a3b4cb3b26b..afcb092382e 100644
--- a/bdb/build_win32/libdb.def
+++ b/bdb/build_win32/libdb.def
@@ -1,151 +1,128 @@
-; $Id: libdb.def,v 11.21 2001/01/04 15:07:33 dda Exp $
+; DO NOT EDIT: automatically built by dist/s_win32.
+
+DESCRIPTION 'Berkeley DB 4.1 Library'
-DESCRIPTION 'Berkeley DB 3.2 Library'
EXPORTS
- lock_get @1
- lock_id @2
- lock_put @3
- lock_vec @4
- log_compare @5
- log_file @6
- log_flush @7
- log_get @8
- log_put @9
- log_register @10
- log_unregister @11
- memp_fclose @12
- memp_fget @13
- memp_fopen @14
- memp_fput @15
- memp_fset @16
- memp_fsync @17
- memp_register @18
- memp_sync @19
- txn_abort @20
- txn_begin @21
- txn_checkpoint @22
- txn_commit @23
- txn_prepare @24
- db_version @25
- memp_stat @26
- log_archive @27
- lock_detect @28
- txn_id @29
- txn_stat @30
- memp_trickle @31
- log_stat @32
- lock_stat @33
- db_create @34
- db_env_create @35
- db_strerror @36
- db_xa_switch @37
- db_env_set_func_close @38
- db_env_set_func_dirfree @39
- db_env_set_func_dirlist @40
- db_env_set_func_exists @41
- db_env_set_func_free @42
- db_env_set_func_fsync @43
- db_env_set_func_ioinfo @44
- db_env_set_func_malloc @45
- db_env_set_func_map @46
- db_env_set_func_open @47
- db_env_set_func_read @48
- db_env_set_func_realloc @49
- db_env_set_func_rename @50
- db_env_set_func_sleep @51
- db_env_set_func_unlink @52
- db_env_set_func_unmap @53
- db_env_set_func_write @54
- db_env_set_func_yield @55
-; FREE @56
- db_env_set_pageyield @57
- db_env_set_panicstate @58
- db_env_set_region_init @59
- db_env_set_tas_spins @60
-; these are only for testing
- __db_loadme @201
- __ham_func2 @202
- __ham_func3 @203
- __ham_func4 @204
- __ham_func5 @205
- __db_hcreate @206
- __db_hsearch @207
- __db_hdestroy @208
- __db_dbm_init @209
- __db_dbm_delete @210
- __db_dbm_fetch @211
- __db_dbm_store @212
- __db_dbm_firstkey @213
- __db_dbm_nextkey @214
- __db_dbm_close @215
- __db_ndbm_open @216
- __db_ndbm_store @217
- __db_ndbm_rdonly @218
- __db_ndbm_pagfno @219
- __db_ndbm_nextkey @220
- __db_ndbm_firstkey @221
- __db_ndbm_fetch @222
- __db_ndbm_error @223
- __db_ndbm_dirfno @224
- __db_ndbm_delete @225
- __db_ndbm_close @226
- __db_ndbm_clearerr @227
- __lock_dump_region @228
- __memp_dump_region @229
- __os_closehandle @230
- __os_openhandle @231
- __os_strdup @232
- __db_r_attach @233
- __db_r_detach @234
- __db_tas_mutex_init @235
- __db_tas_mutex_lock @236
- __db_tas_mutex_unlock @237
- __os_read @238
- __os_write @239
- __os_open @240
- __os_ioinfo @241
- __os_free @242
- __os_malloc @243
- __os_freestr @244
- __os_calloc @245
- __ham_test @246
-; these are needed for linking tools
- __db_dump @401
- __db_rpath @402
- __db_dispatch @403
- __db_err @404
- __db_init_print @405
- __txn_init_print @406
- __log_init_print @407
- __ham_init_print @408
- __bam_init_print @409
- __db_jump @410
- __ham_pgin @411
- __ham_pgout @412
- __bam_pgin @413
- __bam_pgout @414
- __db_omode @415
- __db_prdbt @416
- __os_sleep @417
- __db_e_stat @420
- __db_getlong @421
- __os_get_errno @422
- __os_set_errno @423
- __ham_get_meta @424
- __ham_release_meta @425
- __qam_init_print @426
- __crdel_init_print @427
- __qam_pgin_out @428
- __db_pgin @429
- __db_pgout @430
- __db_getulong @431
- __db_util_sigresend @432
- __db_util_siginit @433
- __db_util_interrupted @434
- __db_util_logset @435
- __db_prheader @436
- __db_prfooter @437
- __db_verify_callback @438
- __db_verify_internal @439
- __os_yield @440
- __db_global_values @441
+ db_create @1
+ db_env_create @2
+ db_strerror @3
+ db_version @4
+ db_xa_switch @5
+ log_compare @6
+ txn_abort @7
+ txn_begin @8
+ txn_commit @9
+ db_env_set_func_close @10
+ db_env_set_func_dirfree @11
+ db_env_set_func_dirlist @12
+ db_env_set_func_exists @13
+ db_env_set_func_free @14
+ db_env_set_func_fsync @15
+ db_env_set_func_ioinfo @16
+ db_env_set_func_malloc @17
+ db_env_set_func_map @18
+ db_env_set_func_open @19
+ db_env_set_func_read @20
+ db_env_set_func_realloc @21
+ db_env_set_func_rename @22
+ db_env_set_func_seek @23
+ db_env_set_func_sleep @24
+ db_env_set_func_unlink @25
+ db_env_set_func_unmap @26
+ db_env_set_func_write @27
+ db_env_set_func_yield @28
+ __db_add_recovery @29
+ __db_dbm_close @30
+ __db_dbm_delete @31
+ __db_dbm_fetch @32
+ __db_dbm_firstkey @33
+ __db_dbm_init @34
+ __db_dbm_nextkey @35
+ __db_dbm_store @36
+ __db_hcreate @37
+ __db_hdestroy @38
+ __db_hsearch @39
+ __db_loadme @40
+ __db_ndbm_clearerr @41
+ __db_ndbm_close @42
+ __db_ndbm_delete @43
+ __db_ndbm_dirfno @44
+ __db_ndbm_error @45
+ __db_ndbm_fetch @46
+ __db_ndbm_firstkey @47
+ __db_ndbm_nextkey @48
+ __db_ndbm_open @49
+ __db_ndbm_pagfno @50
+ __db_ndbm_rdonly @51
+ __db_ndbm_store @52
+ __db_panic @53
+ __db_r_attach @54
+ __db_r_detach @55
+ __db_win32_mutex_init @56
+ __db_win32_mutex_lock @57
+ __db_win32_mutex_unlock @58
+ __ham_func2 @59
+ __ham_func3 @60
+ __ham_func4 @61
+ __ham_func5 @62
+ __ham_test @63
+ __lock_dump_region @64
+ __memp_dump_region @65
+ __os_calloc @66
+ __os_closehandle @67
+ __os_free @68
+ __os_ioinfo @69
+ __os_malloc @70
+ __os_open @71
+ __os_openhandle @72
+ __os_read @73
+ __os_realloc @74
+ __os_strdup @75
+ __os_umalloc @76
+ __os_write @77
+ __bam_init_print @78
+ __bam_pgin @79
+ __bam_pgout @80
+ __crdel_init_print @81
+ __db_dispatch @82
+ __db_dump @83
+ __db_e_stat @84
+ __db_err @85
+ __db_getlong @86
+ __db_getulong @87
+ __db_global_values @88
+ __db_init_print @89
+ __db_inmemdbflags @90
+ __db_isbigendian @91
+ __db_omode @92
+ __db_overwrite @93
+ __db_pgin @94
+ __db_pgout @95
+ __db_prdbt @96
+ __db_prfooter @97
+ __db_prheader @98
+ __db_rpath @99
+ __db_util_cache @100
+ __db_util_interrupted @101
+ __db_util_logset @102
+ __db_util_siginit @103
+ __db_util_sigresend @104
+ __db_verify_callback @105
+ __db_verify_internal @106
+ __dbreg_init_print @107
+ __fop_init_print @108
+ __ham_get_meta @109
+ __ham_init_print @110
+ __ham_pgin @111
+ __ham_pgout @112
+ __ham_release_meta @113
+ __os_clock @114
+ __os_get_errno @115
+ __os_id @116
+ __os_set_errno @117
+ __os_sleep @118
+ __os_ufree @119
+ __os_yield @120
+ __qam_init_print @121
+ __qam_pgin_out @122
+ __txn_init_print @123
diff --git a/bdb/build_win32/libdb_tcl.def b/bdb/build_win32/libdb_tcl.def
index a18459beaba..b6323c66bc6 100644
--- a/bdb/build_win32/libdb_tcl.def
+++ b/bdb/build_win32/libdb_tcl.def
@@ -1,16 +1,11 @@
-; $Id: libdb_tcl.def,v 11.2 1999/11/21 23:10:00 bostic Exp $
+; $Id: libdb_tcl.def,v 11.5 2002/04/03 12:01:27 mjc Exp $
DESCRIPTION 'Berkeley DB TCL interface Library'
EXPORTS
Db_tcl_Init
- bdb_DbmCommand
- bdb_HCommand
- bdb_NdbmOpen
- bdb_RandCommand
db_Cmd
dbc_Cmd
env_Cmd
- ndbm_Cmd
tcl_EnvRemove
tcl_LockDetect
tcl_LockGet
@@ -22,9 +17,7 @@ EXPORTS
tcl_LogFlush
tcl_LogGet
tcl_LogPut
- tcl_LogRegister
tcl_LogStat
- tcl_LogUnregister
tcl_Mp
tcl_MpStat
tcl_MpSync
@@ -32,4 +25,3 @@ EXPORTS
tcl_Txn
tcl_TxnCheckpoint
tcl_TxnStat
- txn_Cmd
diff --git a/bdb/build_win32/libdbrc.src b/bdb/build_win32/libdbrc.src
index 82a93068c8b..3e5d8deec6f 100644
--- a/bdb/build_win32/libdbrc.src
+++ b/bdb/build_win32/libdbrc.src
@@ -20,7 +20,7 @@ BEGIN
VALUE "FileDescription", "Berkeley DB 3.0 DLL\0"
VALUE "FileVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
VALUE "InternalName", "libdb.dll\0"
- VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997, 1998, 1999, 2000\0"
+ VALUE "LegalCopyright", "Copyright © Sleepycat Software Inc. 1997-2002\0"
VALUE "OriginalFilename", "libdb.dll\0"
VALUE "ProductName", "Sleepycat Software libdb\0"
VALUE "ProductVersion", "%MAJOR%.%MINOR%.%PATCH%\0"
diff --git a/bdb/build_win32/static_dsp.src b/bdb/build_win32/static_dsp.src
index 99d00f14291..0c66c851025 100644
--- a/bdb/build_win32/static_dsp.src
+++ b/bdb/build_win32/static_dsp.src
@@ -1,10 +1,10 @@
# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Static Library" 0x0104
-CFG=@project_name@ - Win32 Debug
+CFG=@project_name@ - Win32 Debug Static
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
@@ -17,75 +17,33 @@ CFG=@project_name@ - Win32 Debug
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
-!MESSAGE "@project_name@ - Win32 Release" (based on "Win32 (x86) Static Library")
-!MESSAGE "@project_name@ - Win32 Debug" (based on "Win32 (x86) Static Library")
-!MESSAGE "@project_name@ - Win32 Release Static" (based on\
- "Win32 (x86) Static Library")
-!MESSAGE "@project_name@ - Win32 Debug Static" (based on\
- "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Release Static" (based on "Win32 (x86) Static Library")
+!MESSAGE "@project_name@ - Win32 Debug Static" (based on "Win32 (x86) Static Library")
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
+RSC=rc.exe
-!IF "$(CFG)" == "@project_name@ - Win32 Release"
+!IF "$(CFG)" == "@project_name@ - Win32 Release Static"
# PROP BASE Use_MFC 0
# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "DB_Stati"
-# PROP BASE Intermediate_Dir "DB_Stati"
+# PROP BASE Output_Dir "Release_static"
+# PROP BASE Intermediate_Dir "Release_static"
# PROP BASE Target_Dir ""
# PROP Use_MFC 0
# PROP Use_Debug_Libraries 0
# PROP Output_Dir "Release_static"
# PROP Intermediate_Dir "Release_static"
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LIB32=link.exe -lib
-# ADD BASE LIB32 /nologo
-# ADD LIB32 /nologo /out:"Release_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@s.lib"
-
-!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "DB_Stat0"
-# PROP BASE Intermediate_Dir "DB_Stat0"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 1
-# PROP Output_Dir "Debug_static"
-# PROP Intermediate_Dir "Debug_static"
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
-BSC32=bscmake.exe
-# ADD BASE BSC32 /nologo
-# ADD BSC32 /nologo
-LIB32=link.exe -lib
-# ADD BASE LIB32 /nologo
-# ADD LIB32 /nologo /out:"Debug_static/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@sd.lib"
-
-!ELSEIF "$(CFG)" == "@project_name@ - Win32 Release Static"
-
-# PROP BASE Use_MFC 0
-# PROP BASE Use_Debug_Libraries 0
-# PROP BASE Output_Dir "DB_Stati"
-# PROP BASE Intermediate_Dir "DB_Stati"
-# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
-# PROP Use_Debug_Libraries 0
-# PROP Output_Dir "Release_static"
-# PROP Intermediate_Dir "Release_static"
-# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
-# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I "../include" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MT /W3 /GX /O2 /I "." /I ".." /I "../dbinc" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
@@ -95,18 +53,20 @@ LIB32=link.exe -lib
!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug Static"
-# PROP BASE Use_MFC 0
+# PROP BASE Use_MFC 1
# PROP BASE Use_Debug_Libraries 1
-# PROP BASE Output_Dir "DB_Stat0"
-# PROP BASE Intermediate_Dir "DB_Stat0"
+# PROP BASE Output_Dir "Debug_static"
+# PROP BASE Intermediate_Dir "Debug_static"
# PROP BASE Target_Dir ""
-# PROP Use_MFC 0
+# PROP Use_MFC 1
# PROP Use_Debug_Libraries 1
# PROP Output_Dir "Debug_static"
# PROP Intermediate_Dir "Debug_static"
# PROP Target_Dir ""
-# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
-# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I "../include" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX"config.h" /FD /c
+# ADD CPP /nologo /MTd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /YX"config.h" /FD /c
+# ADD BASE RSC /l 0xc09
+# ADD RSC /l 0xc09
BSC32=bscmake.exe
# ADD BASE BSC32 /nologo
# ADD BSC32 /nologo
@@ -118,8 +78,6 @@ LIB32=link.exe -lib
# Begin Target
-# Name "@project_name@ - Win32 Release"
-# Name "@project_name@ - Win32 Debug"
# Name "@project_name@ - Win32 Release Static"
# Name "@project_name@ - Win32 Debug Static"
@SOURCE_FILES@
diff --git a/bdb/build_win32/tcl_dsp.src b/bdb/build_win32/tcl_dsp.src
index 11a36606e37..4de41e6934e 100644
--- a/bdb/build_win32/tcl_dsp.src
+++ b/bdb/build_win32/tcl_dsp.src
@@ -1,5 +1,5 @@
# Microsoft Developer Studio Project File - Name="@project_name@" - Package Owner=<4>
-# Microsoft Developer Studio Generated Build File, Format Version 5.00
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
@@ -22,6 +22,7 @@ CFG=@project_name@ - Win32 Debug
!MESSAGE
# Begin Project
+# PROP AllowPerConfigDependencies 0
# PROP Scc_ProjName ""
# PROP Scc_LocalPath ""
CPP=cl.exe
@@ -42,9 +43,9 @@ RSC=rc.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
-# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o NUL /win32
+# ADD CPP /nologo /MD /W3 /GX /O2 /Ob2 /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /YX /FD /c
+# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "NDEBUG"
# ADD RSC /l 0x409 /d "NDEBUG"
BSC32=bscmake.exe
@@ -52,7 +53,7 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
-# ADD LINK32 Release/libdb32.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
+# ADD LINK32 Release/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.lib tcl83.lib /nologo /base:"0x13000000" /subsystem:windows /dll /machine:I386 /out:"Release/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@.dll"
!ELSEIF "$(CFG)" == "@project_name@ - Win32 Debug"
@@ -68,10 +69,10 @@ LINK32=link.exe
# PROP Ignore_Export_Lib 0
# PROP Target_Dir ""
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I "../include" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /I "." /I ".." /I "../dbinc" /D "DB_TCL_SUPPORT" /D "CONFIG_TEST" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "DB_CREATE_DLL" /D "_WINDLL" /D "_AFXDLL" /YX"config.h" /FD /c
# SUBTRACT CPP /Fr
-# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
-# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o NUL /win32
+# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
+# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "_DEBUG"
# ADD RSC /l 0x409 /d "_DEBUG" /d "_AFXDLL"
BSC32=bscmake.exe
@@ -79,7 +80,7 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 Debug/libdb32d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
+# ADD LINK32 Debug/libdb@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.lib tcl83d.lib /nologo /base:"0x13000000" /subsystem:windows /dll /pdb:none /debug /machine:I386 /out:"Debug/libdb_tcl@DB_VERSION_MAJOR@@DB_VERSION_MINOR@d.dll" /fixed:no
!ENDIF
diff --git a/bdb/clib/getcwd.c b/bdb/clib/getcwd.c
index 630facb4fdb..bae50dfe90c 100644
--- a/bdb/clib/getcwd.c
+++ b/bdb/clib/getcwd.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,7 +36,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: getcwd.c,v 11.7 2000/11/30 00:58:30 ubell Exp $";
+static const char revid[] = "$Id: getcwd.c,v 11.13 2002/02/28 21:27:18 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -120,7 +120,7 @@ getcwd(pt, size)
ept = pt + size;
} else {
if ((ret =
- __os_malloc(NULL, ptsize = 1024 - 4, NULL, &pt)) != 0) {
+ __os_malloc(NULL, ptsize = 1024 - 4, &pt)) != 0) {
__os_set_errno(ret);
return (NULL);
}
@@ -134,7 +134,7 @@ getcwd(pt, size)
* Should always be enough (it's 340 levels). If it's not, allocate
* as necessary. Special case the first stat, it's ".", not "..".
*/
- if ((ret = __os_malloc(NULL, upsize = 1024 - 4, NULL, &up)) != 0)
+ if ((ret = __os_malloc(NULL, upsize = 1024 - 4, &up)) != 0)
goto err;
eup = up + 1024;
bup = up;
@@ -167,7 +167,7 @@ getcwd(pt, size)
* been that way and stuff would probably break.
*/
bcopy(bpt, pt, ept - bpt);
- __os_free(up, upsize);
+ __os_free(NULL, up);
return (pt);
}
@@ -177,7 +177,7 @@ getcwd(pt, size)
* possible component name, plus a trailing NULL.
*/
if (bup + 3 + MAXNAMLEN + 1 >= eup) {
- if (__os_realloc(NULL, upsize *= 2, NULL, &up) != 0)
+ if (__os_realloc(NULL, upsize *= 2, &up) != 0)
goto err;
bup = up;
eup = up + upsize;
@@ -238,7 +238,7 @@ getcwd(pt, size)
}
off = bpt - pt;
len = ept - bpt;
- if (__os_realloc(NULL, ptsize *= 2, NULL, &pt) != 0)
+ if (__os_realloc(NULL, ptsize *= 2, &pt) != 0)
goto err;
bpt = pt + off;
ept = pt + ptsize;
@@ -261,12 +261,12 @@ notfound:
* didn't find the current directory in its parent directory, set
* errno to ENOENT.
*/
- if (__os_get_errno() == 0)
+ if (__os_get_errno_ret_zero() == 0)
__os_set_errno(save_errno == 0 ? ENOENT : save_errno);
/* FALLTHROUGH */
err:
if (ptsize)
- __os_free(pt, ptsize);
- __os_free(up, upsize);
+ __os_free(NULL, pt);
+ __os_free(NULL, up);
return (NULL);
}
diff --git a/bdb/clib/getopt.c b/bdb/clib/getopt.c
index 667fca1d78c..3f6659ea6e6 100644
--- a/bdb/clib/getopt.c
+++ b/bdb/clib/getopt.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,7 +36,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: getopt.c,v 11.4 2000/02/14 02:59:40 bostic Exp $";
+static const char revid[] = "$Id: getopt.c,v 11.7 2002/01/11 15:51:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -47,6 +47,8 @@ static const char revid[] = "$Id: getopt.c,v 11.4 2000/02/14 02:59:40 bostic Exp
#include "db_int.h"
+int __db_getopt_reset; /* global reset for VxWorks. */
+
int opterr = 1, /* if error message should be printed */
optind = 1, /* index into parent argv vector */
optopt, /* character checked for validity */
@@ -78,6 +80,19 @@ getopt(nargc, nargv, ostr)
static char *place = EMSG; /* option letter processing */
char *oli; /* option letter list index */
+ /*
+ * VxWorks needs to be able to repeatedly call getopt from multiple
+ * programs within its global name space.
+ */
+ if (__db_getopt_reset) {
+ __db_getopt_reset = 0;
+
+ opterr = optind = 1;
+ optopt = optreset = 0;
+ optarg = NULL;
+ progname = NULL;
+ place = EMSG;
+ }
if (!progname) {
if ((progname = __db_rpath(*nargv)) == NULL)
progname = *nargv;
diff --git a/bdb/clib/memcmp.c b/bdb/clib/memcmp.c
index 2aedc3fa6b8..979badaef30 100644
--- a/bdb/clib/memcmp.c
+++ b/bdb/clib/memcmp.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,7 +36,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: memcmp.c,v 11.5 2000/02/24 21:58:12 bostic Exp $";
+static const char revid[] = "$Id: memcmp.c,v 11.7 2002/01/11 15:51:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
diff --git a/bdb/clib/memmove.c b/bdb/clib/memmove.c
index da6bcfe8b13..632d50788da 100644
--- a/bdb/clib/memmove.c
+++ b/bdb/clib/memmove.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,7 +36,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: memmove.c,v 11.4 2000/02/14 02:59:40 bostic Exp $";
+static const char revid[] = "$Id: memmove.c,v 11.6 2002/01/11 15:51:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
diff --git a/bdb/clib/raise.c b/bdb/clib/raise.c
index acec86cd63a..fcf3bbcbd7f 100644
--- a/bdb/clib/raise.c
+++ b/bdb/clib/raise.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: raise.c,v 11.3 2000/02/14 02:59:41 bostic Exp $";
+static const char revid[] = "$Id: raise.c,v 11.6 2002/01/11 15:51:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -28,5 +28,9 @@ int
raise(s)
int s;
{
+ /*
+ * Do not use __os_id(), as it may not return the process ID -- any
+ * system with kill(3) probably has getpid(3).
+ */
return (kill(getpid(), s));
}
diff --git a/bdb/clib/snprintf.c b/bdb/clib/snprintf.c
index 6aa9e3ae66c..fa1a63425e8 100644
--- a/bdb/clib/snprintf.c
+++ b/bdb/clib/snprintf.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: snprintf.c,v 11.5 2000/12/22 19:38:37 bostic Exp $";
+static const char revid[] = "$Id: snprintf.c,v 11.10 2002/01/11 15:51:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -39,23 +39,36 @@ snprintf(str, n, fmt, va_alist)
va_dcl
#endif
{
+ static int ret_charpnt = -1;
va_list ap;
- int rval;
+ int len;
COMPQUIET(n, 0);
+
+ /*
+ * Some old versions of sprintf return a pointer to the first argument
+ * instead of a character count. Assume the return value of snprintf,
+ * vsprintf, etc. will be the same as sprintf, and check the easy one.
+ *
+ * We do this test at run-time because it's not a test we can do in a
+ * cross-compilation environment.
+ */
+ if (ret_charpnt == -1) {
+ char buf[10];
+
+ ret_charpnt =
+ sprintf(buf, "123") != 3 ||
+ sprintf(buf, "123456789") != 9 ||
+ sprintf(buf, "1234") != 4;
+ }
+
#ifdef __STDC__
va_start(ap, fmt);
#else
va_start(ap);
#endif
-#ifdef SPRINTF_RET_CHARPNT
- (void)vsprintf(str, fmt, ap);
+ len = vsprintf(str, fmt, ap);
va_end(ap);
- return (strlen(str));
-#else
- rval = vsprintf(str, fmt, ap);
- va_end(ap);
- return (rval);
-#endif
+ return (ret_charpnt ? (int)strlen(str) : len);
}
#endif
diff --git a/bdb/clib/strcasecmp.c b/bdb/clib/strcasecmp.c
index 6633197bc8c..d5ce6d76d5f 100644
--- a/bdb/clib/strcasecmp.c
+++ b/bdb/clib/strcasecmp.c
@@ -34,7 +34,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: strcasecmp.c,v 1.4 2000/03/24 22:31:31 bostic Exp $";
+static const char revid[] = "$Id: strcasecmp.c,v 1.7 2001/11/15 17:51:38 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -78,14 +78,16 @@ static const unsigned char charmap[] = {
'\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
'\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
'\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367',
- '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377',
+ '\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377'
};
/*
* strcasecmp --
* Do strcmp(3) in a case-insensitive manner.
*
+ * PUBLIC: #ifndef HAVE_STRCASECMP
* PUBLIC: int strcasecmp __P((const char *, const char *));
+ * PUBLIC: #endif
*/
int
strcasecmp(s1, s2)
@@ -100,3 +102,31 @@ strcasecmp(s1, s2)
return (0);
return (cm[*us1] - cm[*--us2]);
}
+
+/*
+ * strncasecmp --
+ * Do strncmp(3) in a case-insensitive manner.
+ *
+ * PUBLIC: #ifndef HAVE_STRCASECMP
+ * PUBLIC: int strncasecmp __P((const char *, const char *, size_t));
+ * PUBLIC: #endif
+ */
+int
+strncasecmp(s1, s2, n)
+ const char *s1, *s2;
+ register size_t n;
+{
+ if (n != 0) {
+ register const unsigned char *cm = charmap,
+ *us1 = (const unsigned char *)s1,
+ *us2 = (const unsigned char *)s2;
+
+ do {
+ if (cm[*us1] != cm[*us2++])
+ return (cm[*us1] - cm[*--us2]);
+ if (*us1++ == '\0')
+ break;
+ } while (--n != 0);
+ }
+ return (0);
+}
diff --git a/bdb/clib/strdup.c b/bdb/clib/strdup.c
new file mode 100644
index 00000000000..e68623f1407
--- /dev/null
+++ b/bdb/clib/strdup.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: strdup.c,v 1.5 2002/05/01 18:40:05 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+/*
+ * strdup --
+ *
+ * PUBLIC: #ifndef HAVE_STRDUP
+ * PUBLIC: char *strdup __P((const char *));
+ * PUBLIC: #endif
+ */
+char *
+strdup(str)
+ const char *str;
+{
+ size_t len;
+ char *copy;
+
+ len = strlen(str) + 1;
+ if (!(copy = malloc((u_int)len)))
+ return (NULL);
+ memcpy(copy, str, len);
+ return (copy);
+}
diff --git a/bdb/clib/strerror.c b/bdb/clib/strerror.c
index 0f7447b0419..06c28946b88 100644
--- a/bdb/clib/strerror.c
+++ b/bdb/clib/strerror.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,7 +36,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: strerror.c,v 11.4 2000/02/14 02:59:41 bostic Exp $";
+static const char revid[] = "$Id: strerror.c,v 11.6 2002/01/11 15:51:29 bostic Exp $";
#endif /* not lint */
/*
diff --git a/bdb/clib/vsnprintf.c b/bdb/clib/vsnprintf.c
index 3d27bc0d2f8..4ffea8cb0ad 100644
--- a/bdb/clib/vsnprintf.c
+++ b/bdb/clib/vsnprintf.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: vsnprintf.c,v 11.4 2000/05/18 19:24:59 bostic Exp $";
+static const char revid[] = "$Id: vsnprintf.c,v 11.7 2002/01/11 15:51:29 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -24,7 +24,7 @@ static const char revid[] = "$Id: vsnprintf.c,v 11.4 2000/05/18 19:24:59 bostic
* Bounded version of vsprintf.
*
* PUBLIC: #ifndef HAVE_VSNPRINTF
- * PUBLIC: int vsnprintf();
+ * PUBLIC: int vsnprintf __P((char *, size_t, const char *, va_list));
* PUBLIC: #endif
*/
#ifndef HAVE_VSNPRINTF
diff --git a/bdb/common/db_byteorder.c b/bdb/common/db_byteorder.c
index d089cfe4c99..d42d8e6a958 100644
--- a/bdb/common/db_byteorder.c
+++ b/bdb/common/db_byteorder.c
@@ -1,30 +1,42 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_byteorder.c,v 11.4 2000/11/30 00:58:31 ubell Exp $";
+static const char revid[] = "$Id: db_byteorder.c,v 11.8 2002/02/01 18:15:29 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-
-#ifdef HAVE_ENDIAN_H
-#include <endian.h>
-#if BYTE_ORDER == BIG_ENDIAN
-#define WORDS_BIGENDIAN 1
-#endif
-#endif
-
#endif
#include "db_int.h"
-#include "common_ext.h"
+
+/*
+ * __db_isbigendian --
+ * Return 1 if big-endian (Motorola and Sparc), not little-endian
+ * (Intel and Vax). We do this work at run-time, rather than at
+ * configuration time so cross-compilation and general embedded
+ * system support is simpler.
+ *
+ * PUBLIC: int __db_isbigendian __P((void));
+ */
+int
+__db_isbigendian()
+{
+ union { /* From Harbison & Steele. */
+ long l;
+ char c[sizeof(long)];
+ } u;
+
+ u.l = 1;
+ return (u.c[sizeof(long) - 1] == 1);
+}
/*
* __db_byteorder --
@@ -38,21 +50,21 @@ __db_byteorder(dbenv, lorder)
DB_ENV *dbenv;
int lorder;
{
+ int is_bigendian;
+
+ is_bigendian = __db_isbigendian();
+
switch (lorder) {
case 0:
break;
case 1234:
-#if defined(WORDS_BIGENDIAN)
- return (DB_SWAPBYTES);
-#else
+ if (is_bigendian)
+ return (DB_SWAPBYTES);
break;
-#endif
case 4321:
-#if defined(WORDS_BIGENDIAN)
+ if (!is_bigendian)
+ return (DB_SWAPBYTES);
break;
-#else
- return (DB_SWAPBYTES);
-#endif
default:
__db_err(dbenv,
"unsupported byte order, only big and little-endian supported");
diff --git a/bdb/common/db_err.c b/bdb/common/db_err.c
index d69bd023dfd..7c9ee3c4fde 100644
--- a/bdb/common/db_err.c
+++ b/bdb/common/db_err.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_err.c,v 11.38 2001/01/22 21:50:25 sue Exp $";
+static const char revid[] = "$Id: db_err.c,v 11.80 2002/07/30 01:21:53 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,21 +20,12 @@ static const char revid[] = "$Id: db_err.c,v 11.38 2001/01/22 21:50:25 sue Exp $
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "lock_ext.h"
-#include "log.h"
-#include "log_ext.h"
-#include "mp.h"
-#include "mp_ext.h"
-#include "txn.h"
-#include "txn_ext.h"
-#include "clib_ext.h"
-#include "common_ext.h"
-#include "db_auto.h"
-
-static void __db_errcall __P((const DB_ENV *, int, int, const char *, va_list));
-static void __db_errfile __P((const DB_ENV *, int, int, const char *, va_list));
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
/*
* __db_fchk --
@@ -89,12 +80,13 @@ __db_ferr(dbenv, name, iscombo)
* __db_pgerr --
* Error when unable to retrieve a specified page.
*
- * PUBLIC: int __db_pgerr __P((DB *, db_pgno_t));
+ * PUBLIC: void __db_pgerr __P((DB *, db_pgno_t, int));
*/
-int
-__db_pgerr(dbp, pgno)
+void
+__db_pgerr(dbp, pgno, errval)
DB *dbp;
db_pgno_t pgno;
+ int errval;
{
/*
* Three things are certain:
@@ -103,23 +95,22 @@ __db_pgerr(dbp, pgno)
*/
__db_err(dbp->dbenv,
"unable to create/retrieve page %lu", (u_long)pgno);
- return (__db_panic(dbp->dbenv, EIO));
+ (void)__db_panic(dbp->dbenv, errval);
}
/*
* __db_pgfmt --
* Error when a page has the wrong format.
*
- * PUBLIC: int __db_pgfmt __P((DB *, db_pgno_t));
+ * PUBLIC: int __db_pgfmt __P((DB_ENV *, db_pgno_t));
*/
int
-__db_pgfmt(dbp, pgno)
- DB *dbp;
+__db_pgfmt(dbenv, pgno)
+ DB_ENV *dbenv;
db_pgno_t pgno;
{
- __db_err(dbp->dbenv,
- "page %lu: illegal page type or format", (u_long)pgno);
- return (__db_panic(dbp->dbenv, EINVAL));
+ __db_err(dbenv, "page %lu: illegal page type or format", (u_long)pgno);
+ return (__db_panic(dbenv, EINVAL));
}
/*
@@ -157,7 +148,7 @@ __db_assert(failedexpr, file, line)
(void)fprintf(stderr,
"__db_assert: \"%s\" failed: file \"%s\", line %d\n",
failedexpr, file, line);
- fflush(stderr);
+ (void)fflush(stderr);
/* We want a stack trace of how this could possibly happen. */
abort();
@@ -176,7 +167,7 @@ int
__db_panic_msg(dbenv)
DB_ENV *dbenv;
{
- __db_err(dbenv, "region error detected; run recovery.");
+ __db_err(dbenv, "fatal region error detected; run recovery");
return (DB_RUNRECOVERY);
}
@@ -191,11 +182,10 @@ __db_panic(dbenv, errval)
DB_ENV *dbenv;
int errval;
{
-
if (dbenv != NULL) {
- ((REGENV *)((REGINFO *)dbenv->reginfo)->primary)->panic = 1;
+ PANIC_SET(dbenv, 1);
- dbenv->db_panic = errval;
+ dbenv->panic_errval = errval;
__db_err(dbenv, "PANIC: %s", db_strerror(errval));
@@ -203,6 +193,17 @@ __db_panic(dbenv, errval)
dbenv->db_paniccall(dbenv, errval);
}
+#if defined(DIAGNOSTIC) && !defined(CONFIG_TEST)
+ /*
+ * We want a stack trace of how this could possibly happen.
+ *
+ * Don't drop core if it's the test suite -- it's reasonable for the
+ * test suite to check to make sure that DB_RUNRECOVERY is returned
+ * under certain conditions.
+ */
+ abort();
+#endif
+
/*
* Chaos reigns within.
* Reflect, repent, and reboot.
@@ -214,6 +215,8 @@ __db_panic(dbenv, errval)
/*
* db_strerror --
* ANSI C strerror(3) for DB.
+ *
+ * EXTERN: char *db_strerror __P((int));
*/
char *
db_strerror(error)
@@ -232,8 +235,8 @@ db_strerror(error)
* altered.
*/
switch (error) {
- case DB_INCOMPLETE:
- return ("DB_INCOMPLETE: Cache flush was unable to complete");
+ case DB_DONOTINDEX:
+ return ("DB_DONOTINDEX: Secondary index callback returns null");
case DB_KEYEMPTY:
return ("DB_KEYEMPTY: Non-existent key/data pair");
case DB_KEYEXIST:
@@ -253,8 +256,26 @@ db_strerror(error)
return ("DB_NOTFOUND: No matching key/data pair found");
case DB_OLD_VERSION:
return ("DB_OLDVERSION: Database requires a version upgrade");
+ case DB_PAGE_NOTFOUND:
+ return ("DB_PAGE_NOTFOUND: Requested page not found");
+ case DB_REP_DUPMASTER:
+ return ("DB_REP_DUPMASTER: A second master site appeared");
+ case DB_REP_HOLDELECTION:
+ return ("DB_REP_HOLDELECTION: Need to hold an election");
+ case DB_REP_NEWMASTER:
+ return ("DB_REP_NEWMASTER: A new master has declared itself");
+ case DB_REP_NEWSITE:
+ return ("DB_REP_NEWSITE: A new site has entered the system");
+ case DB_REP_OUTDATED:
+ return
+ ("DB_REP_OUTDATED: Insufficient logs on master to recover");
+ case DB_REP_UNAVAIL:
+ return ("DB_REP_UNAVAIL: Unable to elect a master");
case DB_RUNRECOVERY:
return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ case DB_SECONDARY_BAD:
+ return
+ ("DB_SECONDARY_BAD: Secondary index item missing from primary");
case DB_VERIFY_BAD:
return ("DB_VERIFY_BAD: Database verification failed");
default: {
@@ -274,8 +295,8 @@ db_strerror(error)
/*
* __db_err --
- * Standard DB error routine. The same as db_errx, except that we
- * don't write to stderr if no output mechanism was specified.
+ * Standard DB error routine. The same as errx, except we don't write
+ * to stderr if no output mechanism was specified.
*
* PUBLIC: void __db_err __P((const DB_ENV *, const char *, ...));
*/
@@ -289,81 +310,17 @@ __db_err(dbenv, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
-
-/*
- XXX
- Log the message.
-
- It would be nice to automatically log the error into the log files
- if the application is configured for logging. The problem is that
- if we currently hold the log region mutex, we will self-deadlock.
- Leave all the structure in place, but turned off. I'd like to fix
- this in the future by detecting if we have the log region already
- locked (e.g., a flag in the environment handle), or perhaps even
- have a finer granularity so that the only calls to __db_err we
- can't log are those made while we have the current log buffer
- locked, or perhaps have a separate buffer into which we log error
- messages.
-
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_log(dbenv, NULL, "db_err", 0, fmt, ap);
-
- va_end(ap);
-#endif
-*/
-
- /* Tell the application. */
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_err(dbenv, 0, 0, 0, fmt, ap);
-
- va_end(ap);
-}
-
-/*
- * __db_real_err --
- * All the DB error routines end up here.
- *
- * PUBLIC: void __db_real_err
- * PUBLIC: __P((const DB_ENV *, int, int, int, const char *, va_list));
- */
-void
-__db_real_err(dbenv, error, error_set, stderr_default, fmt, ap)
- const DB_ENV *dbenv;
- int error, error_set, stderr_default;
- const char *fmt;
- va_list ap;
-{
- /* Call the user's callback function, if specified. */
- if (dbenv != NULL && dbenv->db_errcall != NULL)
- __db_errcall(dbenv, error, error_set, fmt, ap);
-
- /* Write to the user's file descriptor, if specified. */
- if (dbenv != NULL && dbenv->db_errfile != NULL)
- __db_errfile(dbenv, error, error_set, fmt, ap);
-
- /*
- * If we have a default and we didn't do either of the above, write
- * to the default.
- */
- if (stderr_default && (dbenv == NULL ||
- (dbenv->db_errcall == NULL && dbenv->db_errfile == NULL)))
- __db_errfile(dbenv, error, error_set, fmt, ap);
+ DB_REAL_ERR(dbenv, 0, 0, 0, fmt);
}
/*
* __db_errcall --
* Do the error message work for callback functions.
+ *
+ * PUBLIC: void __db_errcall
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
*/
-static void
+void
__db_errcall(dbenv, error, error_set, fmt, ap)
const DB_ENV *dbenv;
int error, error_set;
@@ -371,27 +328,44 @@ __db_errcall(dbenv, error, error_set, fmt, ap)
va_list ap;
{
char *p;
- char __errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
+ char errbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
- p = __errbuf;
- if (fmt != NULL) {
- p += vsnprintf(__errbuf, sizeof(__errbuf), fmt, ap);
- if (error_set) {
- *p++ = ':';
- *p++ = ' ';
- }
- }
+ p = errbuf;
+ if (fmt != NULL)
+ p += vsnprintf(errbuf, sizeof(errbuf), fmt, ap);
if (error_set)
- (void)strcpy(p, db_strerror(error));
+ p += snprintf(p,
+ sizeof(errbuf) - (p - errbuf), ": %s", db_strerror(error));
+ /*
+ * !!!
+ * We're potentially manipulating strings handed us by the application,
+ * and on systems without a real snprintf() the sprintf() calls could
+ * have overflowed the buffer. We can't do anything about it now, but
+ * we don't want to return control to the application, we might have
+ * overwritten the stack with a Trojan horse. We're not trying to do
+ * anything recoverable here because systems without snprintf support
+ * are pretty rare anymore.
+ */
+ if ((size_t)(p - errbuf) > sizeof(errbuf)) {
+ (void)fprintf(stderr,
+ "Berkeley DB: error callback interface buffer overflow\n");
+ (void)fflush(stderr);
+
+ abort();
+ /* NOTREACHED */
+ }
- dbenv->db_errcall(dbenv->db_errpfx, __errbuf);
+ dbenv->db_errcall(dbenv->db_errpfx, errbuf);
}
/*
* __db_errfile --
* Do the error message work for FILE *s.
+ *
+ * PUBLIC: void __db_errfile
+ * PUBLIC: __P((const DB_ENV *, int, int, const char *, va_list));
*/
-static void
+void
__db_errfile(dbenv, error, error_set, fmt, ap)
const DB_ENV *dbenv;
int error, error_set;
@@ -436,48 +410,22 @@ __db_logmsg(dbenv, txnid, opname, flags, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
-
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_log(dbenv, txnid, opname, flags, fmt, ap);
-
- va_end(ap);
-}
-
-/*
- * __db_real_log --
- * Write information into the DB log.
- *
- * PUBLIC: void __db_real_log __P((const DB_ENV *,
- * PUBLIC: DB_TXN *, const char *, u_int32_t, const char *, va_list ap));
- */
-void
-#ifdef __STDC__
-__db_real_log(const DB_ENV *dbenv, DB_TXN *txnid,
- const char *opname, u_int32_t flags, const char *fmt, va_list ap)
-#else
-__db_real_log(dbenv, txnid, opname, flags, fmt, ap)
- const DB_ENV *dbenv;
- DB_TXN *txnid;
- const char *opname, *fmt;
- u_int32_t flags;
- va_list ap;
-#endif
-{
DBT opdbt, msgdbt;
DB_LSN lsn;
+ va_list ap;
char __logbuf[2048]; /* !!!: END OF THE STACK DON'T TRUST SPRINTF. */
if (!LOGGING_ON(dbenv))
return;
+#ifdef __STDC__
+ va_start(ap, fmt);
+#else
+ va_start(ap);
+#endif
memset(&opdbt, 0, sizeof(opdbt));
opdbt.data = (void *)opname;
- opdbt.size = strlen(opname) + 1;
+ opdbt.size = (u_int32_t)(strlen(opname) + 1);
memset(&msgdbt, 0, sizeof(msgdbt));
msgdbt.data = __logbuf;
@@ -490,6 +438,8 @@ __db_real_log(dbenv, txnid, opname, flags, fmt, ap)
*/
__db_debug_log(
(DB_ENV *)dbenv, txnid, &lsn, flags, &opdbt, -1, &msgdbt, NULL, 0);
+
+ va_end(ap);
}
/*
@@ -511,34 +461,119 @@ __db_unknown_flag(dbenv, routine, flag)
/*
* __db_unknown_type -- report internal error
*
- * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, u_int32_t));
+ * PUBLIC: int __db_unknown_type __P((DB_ENV *, char *, DBTYPE));
*/
int
__db_unknown_type(dbenv, routine, type)
DB_ENV *dbenv;
char *routine;
- u_int32_t type;
+ DBTYPE type;
{
__db_err(dbenv, "%s: Unknown db type: 0x%x", routine, type);
DB_ASSERT(0);
return (EINVAL);
}
-#ifdef DIAGNOSTIC
/*
- * __db_missing_txn_err --
- * Cannot combine operations with and without transactions.
+ * __db_check_txn --
+ * Check for common transaction errors.
*
- * PUBLIC: #ifdef DIAGNOSTIC
- * PUBLIC: int __db_missing_txn_err __P((DB_ENV *));
- * PUBLIC: #endif
+ * PUBLIC: int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int));
*/
int
-__db_missing_txn_err(dbenv)
- DB_ENV *dbenv;
+__db_check_txn(dbp, txn, assoc_lid, read_op)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t assoc_lid;
+ int read_op;
{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * If we are in recovery or aborting a transaction, then we
+ * don't need to enforce the rules about dbp's not allowing
+ * transactional operations in non-transactional dbps and
+ * vica-versa. This happens all the time as the dbp during
+ * an abort may be transactional, but we undo operations
+ * outside a transaction since we're aborting.
+ */
+ if (IS_RECOVERING(dbenv) || F_ISSET(dbp, DB_AM_RECOVER))
+ return (0);
+
+ /*
+ * Check for common transaction errors:
+ * Failure to pass a transaction handle to a DB operation
+ * Failure to configure the DB handle in a proper environment
+ * Operation on a handle whose open commit hasn't completed.
+ *
+ * Read operations don't require a txn even if we've used one before
+ * with this handle, although if they do have a txn, we'd better be
+ * prepared for it.
+ */
+ if (txn == NULL) {
+ if (!read_op && F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "DB handle previously used in transaction, missing transaction handle");
+ return (EINVAL);
+ }
+
+ if (dbp->cur_lid >= TXN_MINIMUM)
+ goto open_err;
+ } else {
+ if (dbp->cur_lid >= TXN_MINIMUM && dbp->cur_lid != txn->txnid)
+ goto open_err;
+
+ if (!TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ if (!F_ISSET(dbp, DB_AM_TXN)) {
+ __db_err(dbenv,
+ "Transaction specified for a DB handle opened outside a transaction");
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * If dbp->associate_lid is not DB_LOCK_INVALIDID, that means we're in
+ * the middle of a DB->associate with DB_CREATE (i.e., a secondary index
+ * creation).
+ *
+ * In addition to the usual transaction rules, we need to lock out
+ * non-transactional updates that aren't part of the associate (and
+ * thus are using some other locker ID).
+ *
+ * Transactional updates should simply block; from the time we
+ * decide to build the secondary until commit, we'll hold a write
+ * lock on all of its pages, so it should be safe to attempt to update
+ * the secondary in another transaction (presumably by updating the
+ * primary).
+ */
+ if (!read_op && dbp->associate_lid != DB_LOCK_INVALIDID &&
+ txn != NULL && dbp->associate_lid != assoc_lid) {
+ __db_err(dbenv,
+ "Operation forbidden while secondary index is being created");
+ return (EINVAL);
+ }
+
+ return (0);
+open_err:
__db_err(dbenv,
- "DB handle previously used in transaction, missing transaction handle.");
+ "Transaction that opened the DB handle is still active");
+ return (EINVAL);
+}
+
+/*
+ * __db_not_txn_env --
+ * DB handle must be in an environment that supports transactions.
+ *
+ * PUBLIC: int __db_not_txn_env __P((DB_ENV *));
+ */
+int
+__db_not_txn_env(dbenv)
+ DB_ENV *dbenv;
+{
+ __db_err(dbenv, "DB environment not configured for transactions");
return (EINVAL);
}
-#endif
diff --git a/bdb/common/db_getlong.c b/bdb/common/db_getlong.c
index bead530cd94..6ba8ebfcdaa 100644
--- a/bdb/common/db_getlong.c
+++ b/bdb/common/db_getlong.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_getlong.c,v 11.11 2000/12/22 19:16:04 bostic Exp $";
+static const char revid[] = "$Id: db_getlong.c,v 11.18 2002/03/28 20:13:33 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,6 @@ static const char revid[] = "$Id: db_getlong.c,v 11.11 2000/12/22 19:16:04 bosti
#endif
#include "db_int.h"
-#include "clib_ext.h"
/*
* __db_getlong --
@@ -43,42 +42,40 @@ __db_getlong(dbp, progname, p, min, max, storep)
val = strtol(p, &end, 10);
if ((val == LONG_MIN || val == LONG_MAX) &&
__os_get_errno() == ERANGE) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: %s\n", progname, p, strerror(ERANGE));
- exit(1);
- }
- dbp->err(dbp, ERANGE, "%s", p);
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
return (1);
}
if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: Invalid numeric argument\n", progname, p);
- exit(1);
- }
- dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
return (1);
}
if (val < min) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: Less than minimum value (%ld)\n",
progname, p, min);
- exit(1);
- }
- dbp->errx(dbp, "%s: Less than minimum value (%ld)", p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%ld)", p, min);
return (1);
}
if (val > max) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: Greater than maximum value (%ld)\n",
progname, p, max);
- exit(1);
- }
- dbp->errx(dbp, "%s: Greater than maximum value (%ld)", p, max);
- exit(1);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%ld)", p, max);
+ return (1);
}
*storep = val;
return (0);
@@ -109,31 +106,29 @@ __db_getulong(dbp, progname, p, min, max, storep)
__os_set_errno(0);
val = strtoul(p, &end, 10);
if (val == ULONG_MAX && __os_get_errno() == ERANGE) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: %s\n", progname, p, strerror(ERANGE));
- exit(1);
- }
- dbp->err(dbp, ERANGE, "%s", p);
+ else
+ dbp->err(dbp, ERANGE, "%s", p);
return (1);
}
if (p[0] == '\0' || (end[0] != '\0' && end[0] != '\n')) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
"%s: %s: Invalid numeric argument\n", progname, p);
- exit(1);
- }
- dbp->errx(dbp, "%s: Invalid numeric argument", p);
+ else
+ dbp->errx(dbp, "%s: Invalid numeric argument", p);
return (1);
}
if (val < min) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
- "%s: %s: Less than minimum value (%ld)\n",
+ "%s: %s: Less than minimum value (%lu)\n",
progname, p, min);
- exit(1);
- }
- dbp->errx(dbp, "%s: Less than minimum value (%ld)", p, min);
+ else
+ dbp->errx(dbp,
+ "%s: Less than minimum value (%lu)", p, min);
return (1);
}
@@ -144,14 +139,14 @@ __db_getulong(dbp, progname, p, min, max, storep)
* may not exist on all platforms.
*/
if (max != 0 && val > max) {
- if (dbp == NULL) {
+ if (dbp == NULL)
fprintf(stderr,
- "%s: %s: Greater than maximum value (%ld)\n",
+ "%s: %s: Greater than maximum value (%lu)\n",
progname, p, max);
- exit(1);
- }
- dbp->errx(dbp, "%s: Greater than maximum value (%ld)", p, max);
- exit(1);
+ else
+ dbp->errx(dbp,
+ "%s: Greater than maximum value (%lu)", p, max);
+ return (1);
}
*storep = val;
return (0);
diff --git a/bdb/common/db_idspace.c b/bdb/common/db_idspace.c
new file mode 100644
index 00000000000..588ffd9fca9
--- /dev/null
+++ b/bdb/common/db_idspace.c
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_idspace.c,v 1.5 2002/02/01 18:15:29 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_idcmp __P((const void *, const void *));
+
+static int
+__db_idcmp(a, b)
+ const void *a;
+ const void *b;
+{
+ u_int32_t i, j;
+
+ i = *(u_int32_t *)a;
+ j = *(u_int32_t *)b;
+
+ if (i < j)
+ return (-1);
+ else if (i > j)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * __db_idspace --
+ *
+ * On input, minp and maxp contain the minimum and maximum valid values for
+ * the name space and on return, they contain the minimum and maximum ids
+ * available (by finding the biggest gap).
+ *
+ * PUBLIC: void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *));
+ */
+void
+__db_idspace(inuse, n, minp, maxp)
+ u_int32_t *inuse;
+ int n;
+ u_int32_t *minp, *maxp;
+{
+ int i, low;
+ u_int32_t gap, t;
+
+ /* A single locker ID is a special case. */
+ if (n == 1) {
+ /*
+ * If the single item in use is the last one in the range,
+ * then we've got to perform wrap which means that we set
+ * the min to the minimum ID, which is what we came in with,
+ * so we don't do anything.
+ */
+ if (inuse[0] != *maxp)
+ *minp = inuse[0];
+ *maxp = inuse[0] - 1;
+ return;
+ }
+
+ gap = 0;
+ low = 0;
+ qsort(inuse, n, sizeof(u_int32_t), __db_idcmp);
+ for (i = 0; i < n - 1; i++)
+ if ((t = (inuse[i + 1] - inuse[i])) > gap) {
+ gap = t;
+ low = i;
+ }
+
+ /* Check for largest gap at the end of the space. */
+ if ((*maxp - inuse[n - 1]) + (inuse[0] - *minp) > gap) {
+ /* Do same check as we do in the n == 1 case. */
+ if (inuse[n - 1] != *maxp)
+ *minp = inuse[n - 1];
+ *maxp = inuse[0];
+ } else {
+ *minp = inuse[low];
+ *maxp = inuse[low + 1];
+ }
+}
diff --git a/bdb/common/db_log2.c b/bdb/common/db_log2.c
index 95bc69499c6..cdd87dda11d 100644
--- a/bdb/common/db_log2.c
+++ b/bdb/common/db_log2.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -39,7 +39,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_log2.c,v 11.4 2000/02/14 02:59:41 bostic Exp $";
+static const char revid[] = "$Id: db_log2.c,v 11.7 2002/02/01 18:15:30 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -47,7 +47,6 @@ static const char revid[] = "$Id: db_log2.c,v 11.4 2000/02/14 02:59:41 bostic Ex
#endif
#include "db_int.h"
-#include "common_ext.h"
/*
* PUBLIC: u_int32_t __db_log2 __P((u_int32_t));
diff --git a/bdb/common/util_arg.c b/bdb/common/util_arg.c
new file mode 100644
index 00000000000..e034e3bd194
--- /dev/null
+++ b/bdb/common/util_arg.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_arg.c,v 1.4 2002/02/01 18:15:30 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+static char *__db_strsep __P((char **, const char *));
+
+/*
+ * __db_util_arg --
+ * Convert a string into an argc/argv pair.
+ *
+ * PUBLIC: int __db_util_arg __P((char *, char *, int *, char ***));
+ */
+int
+__db_util_arg(arg0, str, argcp, argvp)
+ char *arg0, *str, ***argvp;
+ int *argcp;
+{
+ int n, ret;
+ char **ap, **argv;
+
+#define MAXARGS 25
+ if ((ret =
+ __os_malloc(NULL, (MAXARGS + 1) * sizeof(char **), &argv)) != 0)
+ return (ret);
+
+ ap = argv;
+ *ap++ = arg0;
+ for (n = 1; (*ap = __db_strsep(&str, " \t")) != NULL;)
+ if (**ap != '\0') {
+ ++ap;
+ if (++n == MAXARGS)
+ break;
+ }
+ *ap = NULL;
+
+ *argcp = ap - argv;
+ *argvp = argv;
+
+ return (0);
+}
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Get next token from string *stringp, where tokens are possibly-empty
+ * strings separated by characters from delim.
+ *
+ * Writes NULs into the string at *stringp to end tokens.
+ * delim need not remain constant from call to call.
+ * On return, *stringp points past the last NUL written (if there might
+ * be further tokens), or is NULL (if there are definitely no more tokens).
+ *
+ * If *stringp is NULL, strsep returns NULL.
+ */
+static char *
+__db_strsep(stringp, delim)
+ char **stringp;
+ const char *delim;
+{
+ const char *spanp;
+ int c, sc;
+ char *s, *tok;
+
+ if ((s = *stringp) == NULL)
+ return (NULL);
+ for (tok = s;;) {
+ c = *s++;
+ spanp = delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+ else
+ s[-1] = 0;
+ *stringp = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
diff --git a/bdb/common/util_cache.c b/bdb/common/util_cache.c
new file mode 100644
index 00000000000..5ca88665cc7
--- /dev/null
+++ b/bdb/common/util_cache.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: util_cache.c,v 1.3 2002/04/04 18:50:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __db_util_cache --
+ * Compute if we have enough cache.
+ *
+ * PUBLIC: int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *));
+ */
+int
+__db_util_cache(dbenv, dbp, cachep, resizep)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t *cachep;
+ int *resizep;
+{
+ DBTYPE type;
+ DB_BTREE_STAT *bsp;
+ DB_HASH_STAT *hsp;
+ DB_QUEUE_STAT *qsp;
+ u_int32_t pgsize;
+ int ret;
+ void *sp;
+
+ /*
+ * The current cache size is in cachep. If it's insufficient, set the
+ * the memory referenced by resizep to 1 and set cachep to the minimum
+ * size needed.
+ */
+ if ((ret = dbp->get_type(dbp, &type)) != 0) {
+ dbenv->err(dbenv, ret, "DB->get_type");
+ return (ret);
+ }
+
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbenv->err(dbenv, ret, "DB->stat");
+ return (ret);
+ }
+
+ switch (type) {
+ case DB_QUEUE:
+ qsp = (DB_QUEUE_STAT *)sp;
+ pgsize = qsp->qs_pagesize;
+ break;
+ case DB_HASH:
+ hsp = (DB_HASH_STAT *)sp;
+ pgsize = hsp->hash_pagesize;
+ break;
+ case DB_BTREE:
+ case DB_RECNO:
+ bsp = (DB_BTREE_STAT *)sp;
+ pgsize = bsp->bt_pagesize;
+ break;
+ default:
+ dbenv->err(dbenv, ret, "unknown database type: %d", type);
+ return (EINVAL);
+ }
+ free(sp);
+
+ /*
+ * Make sure our current cache is big enough. We want at least
+ * DB_MINPAGECACHE pages in the cache.
+ */
+ if ((*cachep / pgsize) < DB_MINPAGECACHE) {
+ *resizep = 1;
+ *cachep = pgsize * DB_MINPAGECACHE;
+ } else
+ *resizep = 0;
+
+ return (0);
+}
diff --git a/bdb/common/util_log.c b/bdb/common/util_log.c
index a4743cc2cee..ae215fca64a 100644
--- a/bdb/common/util_log.c
+++ b/bdb/common/util_log.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: util_log.c,v 1.7 2000/11/30 00:58:31 ubell Exp $";
+static const char revid[] = "$Id: util_log.c,v 1.11 2002/02/01 18:15:30 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -31,7 +31,6 @@ static const char revid[] = "$Id: util_log.c,v 1.7 2000/11/30 00:58:31 ubell Exp
#endif
#include "db_int.h"
-#include "common_ext.h"
/*
* __db_util_logset --
@@ -46,12 +45,14 @@ __db_util_logset(progname, fname)
{
FILE *fp;
time_t now;
+ u_int32_t id;
if ((fp = fopen(fname, "w")) == NULL)
goto err;
(void)time(&now);
- fprintf(fp, "%s: %lu %s", progname, (u_long)getpid(), ctime(&now));
+ __os_id(&id);
+ fprintf(fp, "%s: %lu %s", progname, (u_long)id, ctime(&now));
if (fclose(fp) == EOF)
goto err;
diff --git a/bdb/common/util_sig.c b/bdb/common/util_sig.c
index 6fe0166fe64..9714427ad33 100644
--- a/bdb/common/util_sig.c
+++ b/bdb/common/util_sig.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: util_sig.c,v 1.3 2000/04/28 19:32:00 bostic Exp $";
+static const char revid[] = "$Id: util_sig.c,v 1.7 2002/02/02 17:04:42 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,7 +18,6 @@ static const char revid[] = "$Id: util_sig.c,v 1.3 2000/04/28 19:32:00 bostic Ex
#endif
#include "db_int.h"
-#include "common_ext.h"
static int interrupt;
static void onint __P((int));
@@ -79,7 +78,7 @@ void
__db_util_sigresend()
{
/* Resend any caught signal. */
- if (__db_util_interrupted != 0) {
+ if (interrupt != 0) {
(void)signal(interrupt, SIG_DFL);
(void)raise(interrupt);
/* NOTREACHED */
diff --git a/bdb/cxx/cxx_app.cpp b/bdb/cxx/cxx_app.cpp
deleted file mode 100644
index 1fcf04b5c43..00000000000
--- a/bdb/cxx/cxx_app.cpp
+++ /dev/null
@@ -1,671 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: cxx_app.cpp,v 11.38 2000/12/21 20:30:18 dda Exp $";
-#endif /* not lint */
-
-#include <errno.h>
-#include <stdio.h> // needed for set_error_stream
-#include <string.h>
-
-#include "db_cxx.h"
-#include "cxx_int.h"
-
-#include "db_int.h"
-#include "common_ext.h"
-
-// The reason for a static variable is that some structures
-// (like Dbts) have no connection to any Db or DbEnv, so when
-// errors occur in their methods, we must have some reasonable
-// way to determine whether to throw or return errors.
-//
-// This variable is taken from flags whenever a DbEnv is constructed.
-// Normally there is only one DbEnv per program, and even if not,
-// there is typically a single policy of throwing or returning.
-//
-static int last_known_error_policy = ON_ERROR_UNKNOWN;
-
-////////////////////////////////////////////////////////////////////////
-// //
-// DbEnv //
-// //
-////////////////////////////////////////////////////////////////////////
-
-ostream *DbEnv::error_stream_ = 0;
-
-// _destroy_check is called when there is a user error in a
-// destructor, specifically when close has not been called for an
-// object (even if it was never opened). If the DbEnv is being
-// destroyed we cannot always use DbEnv::error_stream_, so we'll
-// use cerr in that case.
-//
-void DbEnv::_destroy_check(const char *str, int isDbEnv)
-{
- ostream *out;
-
- out = error_stream_;
- if (out == NULL || isDbEnv == 1)
- out = &cerr;
-
- (*out) << "DbEnv::_destroy_check: open " << str << " object destroyed\n";
-}
-
-// A truism for the DbEnv object is that there is a valid
-// DB_ENV handle from the constructor until close().
-// After the close, the DB_ENV handle is invalid and
-// no operations are permitted on the DbEnv (other than
-// destructor). Leaving the DbEnv handle open and not
-// doing a close is generally considered an error.
-//
-// We used to allow DbEnv objects to be closed and reopened.
-// This implied always keeping a valid DB_ENV object, and
-// coordinating the open objects between Db/DbEnv turned
-// out to be overly complicated. Now we do not allow this.
-
-DbEnv::DbEnv(u_int32_t flags)
-: imp_(0)
-, construct_error_(0)
-, construct_flags_(flags)
-, tx_recover_callback_(0)
-, paniccall_callback_(0)
-{
- int err;
-
- COMPQUIET(err, 0);
- if ((err = initialize(0)) != 0)
- DB_ERROR("DbEnv::DbEnv", err, error_policy());
-}
-
-DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
-: imp_(0)
-, construct_error_(0)
-, construct_flags_(flags)
-, tx_recover_callback_(0)
-, paniccall_callback_(0)
-{
- int err;
-
- COMPQUIET(err, 0);
- if ((err = initialize(env)) != 0)
- DB_ERROR("DbEnv::DbEnv", err, error_policy());
-}
-
-// Note: if the user has not closed, we call _destroy_check
-// to warn against this non-safe programming practice,
-// and call close anyway.
-//
-DbEnv::~DbEnv()
-{
- DB_ENV *env = unwrap(this);
-
- if (env != NULL) {
- _destroy_check("DbEnv", 1);
- (void)env->close(env, 0);
-
- // extra safety
- cleanup();
- }
-}
-
-// called by Db destructor when the DbEnv is owned by DB.
-void DbEnv::cleanup()
-{
- DB_ENV *env = unwrap(this);
-
- if (env != NULL) {
- env->cj_internal = 0;
- imp_ = 0;
- }
-}
-
-int DbEnv::close(u_int32_t flags)
-{
- DB_ENV *env = unwrap(this);
- int err, init_err;
-
- COMPQUIET(init_err, 0);
-
- // after a close (no matter if success or failure),
- // the underlying DB_ENV object must not be accessed,
- // so we clean up in advance.
- //
- cleanup();
-
- // It's safe to throw an error after the close,
- // since our error mechanism does not peer into
- // the DB* structures.
- //
- if ((err = env->close(env, flags)) != 0) {
- DB_ERROR("DbEnv::close", err, error_policy());
- }
- return (err);
-}
-
-void DbEnv::err(int error, const char *format, ...)
-{
- va_list args;
- DB_ENV *env = unwrap(this);
-
- va_start(args, format);
- __db_real_err(env, error, 1, 1, format, args);
- va_end(args);
-}
-
-void DbEnv::errx(const char *format, ...)
-{
- va_list args;
- DB_ENV *env = unwrap(this);
-
- va_start(args, format);
- __db_real_err(env, 0, 0, 1, format, args);
- va_end(args);
-}
-
-// used internally during constructor
-// to associate an existing DB_ENV with this DbEnv,
-// or create a new one. If there is an error,
-// construct_error_ is set; this is examined during open.
-//
-int DbEnv::initialize(DB_ENV *env)
-{
- int err;
-
- last_known_error_policy = error_policy();
-
- if (env == 0) {
- // Create a new DB_ENV environment.
- if ((err = ::db_env_create(&env,
- construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0) {
- construct_error_ = err;
- return (err);
- }
- }
- imp_ = wrap(env);
- env->cj_internal = this; // for DB_ENV* to DbEnv* conversion
- return (0);
-}
-
-// Return a tristate value corresponding to whether we should
-// throw exceptions on errors:
-// ON_ERROR_RETURN
-// ON_ERROR_THROW
-// ON_ERROR_UNKNOWN
-//
-int DbEnv::error_policy()
-{
- if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
- return (ON_ERROR_RETURN);
- }
- else {
- return (ON_ERROR_THROW);
- }
-}
-
-// If an error occurred during the constructor, report it now.
-// Otherwise, call the underlying DB->open method.
-//
-int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = construct_error_) != 0)
- DB_ERROR("Db::open", err, error_policy());
- else if ((err = env->open(env, db_home, flags, mode)) != 0)
- DB_ERROR("DbEnv::open", err, error_policy());
-
- return (err);
-}
-
-int DbEnv::remove(const char *db_home, u_int32_t flags)
-{
- DB_ENV *env;
- int ret;
-
- env = unwrap(this);
-
- // after a remove (no matter if success or failure),
- // the underlying DB_ENV object must not be accessed,
- // so we clean up in advance.
- //
- cleanup();
-
- if ((ret = env->remove(env, db_home, flags)) != 0)
- DB_ERROR("DbEnv::remove", ret, error_policy());
-
- return (ret);
-}
-
-// Report an error associated with the DbEnv.
-// error_policy is one of:
-// ON_ERROR_THROW throw an error
-// ON_ERROR_RETURN do nothing here, the caller will return an error
-// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
-//
-void DbEnv::runtime_error(const char *caller, int error, int error_policy)
-{
- if (error_policy == ON_ERROR_UNKNOWN)
- error_policy = last_known_error_policy;
- if (error_policy == ON_ERROR_THROW) {
- // Creating and throwing the object in two separate
- // statements seems to be necessary for HP compilers.
- DbException except(caller, error);
- throw except;
- }
-}
-
-// static method
-char *DbEnv::strerror(int error)
-{
- return (db_strerror(error));
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-void _stream_error_function_c(const char *prefix, char *message)
-{
- DbEnv::_stream_error_function(prefix, message);
-}
-
-void DbEnv::_stream_error_function(const char *prefix, char *message)
-{
- // HP compilers need the extra casts, we don't know why.
- if (error_stream_) {
- if (prefix) {
- (*error_stream_) << prefix << (const char *)": ";
- }
- if (message) {
- (*error_stream_) << (const char *)message;
- }
- (*error_stream_) << (const char *)"\n";
- }
-}
-
-// Note: This actually behaves a bit like a static function,
-// since DB_ENV.db_errcall has no information about which
-// db_env triggered the call. A user that has multiple DB_ENVs
-// will simply not be able to have different streams for each one.
-//
-void DbEnv::set_error_stream(ostream *stream)
-{
- DB_ENV *dbenv = unwrap(this);
-
- error_stream_ = stream;
- dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
- _stream_error_function_c);
-}
-
-// static method
-char *DbEnv::version(int *major, int *minor, int *patch)
-{
- return (db_version(major, minor, patch));
-}
-
-// This is a variant of the DB_WO_ACCESS macro to define a simple set_
-// method calling the underlying C method, but unlike a simple
-// set method, it may return an error or raise an exception.
-// Note this macro expects that input _argspec is an argument
-// list element (e.g. "char *arg") defined in terms of "arg".
-//
-#define DB_DBENV_ACCESS(_name, _argspec) \
- \
-int DbEnv::set_##_name(_argspec) \
-{ \
- int ret; \
- DB_ENV *dbenv = unwrap(this); \
- \
- if ((ret = (*(dbenv->set_##_name))(dbenv, arg)) != 0) {\
- DB_ERROR("DbEnv::set_" # _name, ret, error_policy()); \
- } \
- return (ret); \
-}
-
-#define DB_DBENV_ACCESS_NORET(_name, _argspec) \
- \
-void DbEnv::set_##_name(_argspec) \
-{ \
- DB_ENV *dbenv = unwrap(this); \
- \
- (*(dbenv->set_##_name))(dbenv, arg); \
- return; \
-}
-
-DB_DBENV_ACCESS_NORET(errfile, FILE *arg)
-DB_DBENV_ACCESS_NORET(errpfx, const char *arg)
-
-// We keep these alphabetical by field name,
-// for comparison with Java's list.
-//
-DB_DBENV_ACCESS(data_dir, const char *arg)
-DB_DBENV_ACCESS(lg_bsize, u_int32_t arg)
-DB_DBENV_ACCESS(lg_dir, const char *arg)
-DB_DBENV_ACCESS(lg_max, u_int32_t arg)
-DB_DBENV_ACCESS(lk_detect, u_int32_t arg)
-DB_DBENV_ACCESS(lk_max, u_int32_t arg)
-DB_DBENV_ACCESS(lk_max_lockers, u_int32_t arg)
-DB_DBENV_ACCESS(lk_max_locks, u_int32_t arg)
-DB_DBENV_ACCESS(lk_max_objects, u_int32_t arg)
-DB_DBENV_ACCESS(mp_mmapsize, size_t arg)
-DB_DBENV_ACCESS(mutexlocks, int arg)
-DB_DBENV_ACCESS(tmp_dir, const char *arg)
-DB_DBENV_ACCESS(tx_max, u_int32_t arg)
-
-// Here are the set methods that don't fit the above mold.
-//
-extern "C" {
- typedef void (*db_errcall_fcn_type)
- (const char *, char *);
-};
-
-void DbEnv::set_errcall(void (*arg)(const char *, char *))
-{
- DB_ENV *dbenv = unwrap(this);
-
- // XXX
- // We are casting from a function ptr declared with C++
- // linkage to one (same arg types) declared with C
- // linkage. It's hard to imagine a pair of C/C++
- // compilers from the same vendor for which this
- // won't work. Unfortunately, we can't use a
- // intercept function like the others since the
- // function does not have a (DbEnv*) as one of
- // the args. If this causes trouble, we can pull
- // the same trick we use in Java, namely stuffing
- // a (DbEnv*) pointer into the prefix. We're
- // avoiding this for the moment because it obfuscates.
- //
- (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
-}
-
-int DbEnv::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret =
- (*(dbenv->set_cachesize))(dbenv, gbytes, bytes, ncache)) != 0)
- DB_ERROR("DbEnv::set_cachesize", ret, error_policy());
-
- return (ret);
-}
-
-int DbEnv::set_flags(u_int32_t flags, int onoff)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = (dbenv->set_flags)(dbenv, flags, onoff)) != 0)
- DB_ERROR("DbEnv::set_flags", ret, error_policy());
-
- return (ret);
-}
-
-int DbEnv::set_lk_conflicts(u_int8_t *lk_conflicts, int lk_max)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = (*(dbenv->set_lk_conflicts))
- (dbenv, lk_conflicts, lk_max)) != 0)
- DB_ERROR("DbEnv::set_lk_conflicts", ret, error_policy());
-
- return (ret);
-}
-
-// static method
-int DbEnv::set_pageyield(int arg)
-{
- int ret;
-
- if ((ret = db_env_set_pageyield(arg)) != 0)
- DB_ERROR("DbEnv::set_pageyield", ret, last_known_error_policy);
-
- return (ret);
-}
-
-// static method
-int DbEnv::set_panicstate(int arg)
-{
- int ret;
-
- if ((ret = db_env_set_panicstate(arg)) != 0)
- DB_ERROR("DbEnv::set_panicstate", ret, last_known_error_policy);
-
- return (ret);
-}
-
-// static method
-int DbEnv::set_region_init(int arg)
-{
- int ret;
-
- if ((ret = db_env_set_region_init(arg)) != 0)
- DB_ERROR("DbEnv::set_region_init", ret, last_known_error_policy);
-
- return (ret);
-}
-
-int DbEnv::set_server(char *host, long tsec, long ssec, u_int32_t flags)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = dbenv->set_server(dbenv, host, tsec, ssec, flags)) != 0)
- DB_ERROR("DbEnv::set_server", ret, error_policy());
-
- return (ret);
-}
-
-int DbEnv::set_shm_key(long shm_key)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = dbenv->set_shm_key(dbenv, shm_key)) != 0)
- DB_ERROR("DbEnv::set_shm_key", ret, error_policy());
-
- return (ret);
-}
-
-// static method
-int DbEnv::set_tas_spins(u_int32_t arg)
-{
- int ret;
-
- if ((ret = db_env_set_tas_spins(arg)) != 0)
- DB_ERROR("DbEnv::set_tas_spins", ret, last_known_error_policy);
-
- return (ret);
-}
-
-int DbEnv::set_verbose(u_int32_t which, int onoff)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = (*(dbenv->set_verbose))(dbenv, which, onoff)) != 0)
- DB_ERROR("DbEnv::set_verbose", ret, error_policy());
-
- return (ret);
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-int _tx_recover_intercept_c(DB_ENV *env, DBT *dbt,
- DB_LSN *lsn, db_recops op)
-{
- return (DbEnv::_tx_recover_intercept(env, dbt, lsn, op));
-}
-
-int DbEnv::_tx_recover_intercept(DB_ENV *env, DBT *dbt,
- DB_LSN *lsn, db_recops op)
-{
- if (env == 0) {
- DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
- return (EINVAL);
- }
- DbEnv *cxxenv = (DbEnv *)env->cj_internal;
- if (cxxenv == 0) {
- DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
- return (EINVAL);
- }
- if (cxxenv->tx_recover_callback_ == 0) {
- DB_ERROR("DbEnv::tx_recover_callback", EINVAL, cxxenv->error_policy());
- return (EINVAL);
- }
- Dbt *cxxdbt = (Dbt *)dbt;
- DbLsn *cxxlsn = (DbLsn *)lsn;
- return ((*cxxenv->tx_recover_callback_)(cxxenv, cxxdbt, cxxlsn, op));
-}
-
-int DbEnv::set_tx_recover
- (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- tx_recover_callback_ = arg;
- if ((ret =
- (*(dbenv->set_tx_recover))(dbenv, _tx_recover_intercept_c)) != 0)
- DB_ERROR("DbEnv::set_tx_recover", ret, error_policy());
-
- return (ret);
-}
-
-int DbEnv::set_tx_timestamp(time_t *timestamp)
-{
- int ret;
- DB_ENV *dbenv = unwrap(this);
-
- if ((ret = dbenv->set_tx_timestamp(dbenv, timestamp)) != 0)
- DB_ERROR("DbEnv::set_tx_timestamp", ret, error_policy());
-
- return (ret);
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-void _paniccall_intercept_c(DB_ENV *env, int errval)
-{
- DbEnv::_paniccall_intercept(env, errval);
-}
-
-void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
-{
- if (env == 0) {
- DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
- }
- DbEnv *cxxenv = (DbEnv *)env->cj_internal;
- if (cxxenv == 0) {
- DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
- }
- if (cxxenv->paniccall_callback_ == 0) {
- DB_ERROR("DbEnv::paniccall_callback", EINVAL, cxxenv->error_policy());
- }
- (*cxxenv->paniccall_callback_)(cxxenv, errval);
-}
-
-int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
-{
- DB_ENV *dbenv = unwrap(this);
-
- paniccall_callback_ = arg;
-
- return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-int _recovery_init_intercept_c(DB_ENV *env)
-{
- return (DbEnv::_recovery_init_intercept(env));
-}
-
-int DbEnv::_recovery_init_intercept(DB_ENV *env)
-{
- if (env == 0) {
- DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
- ON_ERROR_UNKNOWN);
- }
- DbEnv *cxxenv = (DbEnv *)env->cj_internal;
- if (cxxenv == 0) {
- DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
- ON_ERROR_UNKNOWN);
- }
- if (cxxenv->recovery_init_callback_ == 0) {
- DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
- cxxenv->error_policy());
- }
- return ((*cxxenv->recovery_init_callback_)(cxxenv));
-}
-
-int DbEnv::set_recovery_init(int (*arg)(DbEnv *))
-{
- DB_ENV *dbenv = unwrap(this);
-
- recovery_init_callback_ = arg;
-
- return ((*(dbenv->set_recovery_init))(dbenv, _recovery_init_intercept_c));
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
-{
- DbEnv::_feedback_intercept(env, opcode, pct);
-}
-
-void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
-{
- if (env == 0) {
- DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
- return;
- }
- DbEnv *cxxenv = (DbEnv *)env->cj_internal;
- if (cxxenv == 0) {
- DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
- return;
- }
- if (cxxenv->feedback_callback_ == 0) {
- DB_ERROR("DbEnv::feedback_callback", EINVAL,
- cxxenv->error_policy());
- return;
- }
- (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
-}
-
-int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
-{
- DB_ENV *dbenv = unwrap(this);
-
- feedback_callback_ = arg;
-
- return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
-}
diff --git a/bdb/cxx/cxx_db.cpp b/bdb/cxx/cxx_db.cpp
new file mode 100644
index 00000000000..7e50a9b3f27
--- /dev/null
+++ b/bdb/cxx/cxx_db.cpp
@@ -0,0 +1,605 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_db.cpp,v 11.71 2002/08/26 22:13:36 mjc Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DB_METHOD(_name, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_CHECKED(_name, _cleanup, _argspec, _arglist, _retok) \
+int Db::_name _argspec \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ if (!db) { \
+ DB_ERROR("Db::" # _name, EINVAL, error_policy()); \
+ return (EINVAL); \
+ } \
+ if (_cleanup) \
+ cleanup(); \
+ ret = db->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Db::" # _name, ret, error_policy()); \
+ return (ret); \
+}
+
+#define DB_METHOD_QUIET(_name, _argspec, _arglist) \
+int Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ return (db->_name _arglist); \
+}
+
+#define DB_METHOD_VOID(_name, _argspec, _arglist) \
+void Db::_name _argspec \
+{ \
+ DB *db = unwrap(this); \
+ \
+ db->_name _arglist; \
+}
+
+// A truism for the Db object is that there is a valid
+// DB handle from the constructor until close().
+// After the close, the DB handle is invalid and
+// no operations are permitted on the Db (other than
+// destructor). Leaving the Db handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow Db objects to be closed and reopened.
+// This implied always keeping a valid DB object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+Db::Db(DbEnv *env, u_int32_t flags)
+: imp_(0)
+, env_(env)
+, construct_error_(0)
+, flags_(0)
+, construct_flags_(flags)
+, append_recno_callback_(0)
+, associate_callback_(0)
+, bt_compare_callback_(0)
+, bt_prefix_callback_(0)
+, dup_compare_callback_(0)
+, feedback_callback_(0)
+, h_hash_callback_(0)
+{
+ if (env_ == 0)
+ flags_ |= DB_CXX_PRIVATE_ENV;
+
+ if ((construct_error_ = initialize()) != 0)
+ DB_ERROR("Db::Db", construct_error_, error_policy());
+}
+
+// If the DB handle is still open, we close it. This is to make stack
+// allocation of Db objects easier so that they are cleaned up in the error
+// path. If the environment was closed prior to this, it may cause a trap, but
+// an error message is generated during the environment close. Applications
+// should call close explicitly in normal (non-exceptional) cases to check the
+// return value.
+//
+Db::~Db()
+{
+ DB *db;
+
+ db = unwrap(this);
+ if (db != NULL) {
+ cleanup();
+ (void)db->close(db, 0);
+ }
+}
+
+// private method to initialize during constructor.
+// initialize must create a backing DB object,
+// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
+//
+int Db::initialize()
+{
+ DB *db;
+ DB_ENV *cenv = unwrap(env_);
+ int ret;
+ u_int32_t cxx_flags;
+
+ cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
+
+ // Create a new underlying DB object.
+ // We rely on the fact that if a NULL DB_ENV* is given,
+ // one is allocated by DB.
+ //
+ if ((ret = db_create(&db, cenv,
+ construct_flags_ & ~cxx_flags)) != 0)
+ return (ret);
+
+ // Associate the DB with this object
+ imp_ = wrap(db);
+ db->api_internal = this;
+
+ // Create a new DbEnv from a DB_ENV* if it was created locally.
+ // It is deleted in Db::close().
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
+ env_ = new DbEnv(db->dbenv, cxx_flags);
+
+ return (0);
+}
+
+// private method to cleanup after destructor or during close.
+// If the environment was created by this Db object, we optionally
+// delete it, or return it so the caller can delete it after
+// last use.
+//
+void Db::cleanup()
+{
+ DB *db = unwrap(this);
+
+ if (db != NULL) {
+ // extra safety
+ db->api_internal = 0;
+ imp_ = 0;
+
+ // we must dispose of the DbEnv object if
+ // we created it. This will be the case
+ // if a NULL DbEnv was passed into the constructor.
+ // The underlying DB_ENV object will be inaccessible
+ // after the close, so we must clean it up now.
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
+ env_->cleanup();
+ delete env_;
+ env_ = 0;
+ }
+ }
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int Db::error_policy()
+{
+ if (env_ != NULL)
+ return (env_->error_policy());
+ else {
+ // If the env_ is null, that means that the user
+ // did not attach an environment, so the correct error
+ // policy can be deduced from constructor flags
+ // for this Db.
+ //
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+ }
+}
+
+int Db::close(u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ // after a DB->close (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = db->close(db, flags)) != 0)
+ DB_ERROR("Db::close", ret, error_policy());
+
+ return (ret);
+}
+
+// The following cast implies that Dbc can be no larger than DBC
+DB_METHOD(cursor, (DbTxn *txnid, Dbc **cursorp, u_int32_t flags),
+ (db, unwrap(txnid), (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(del, (DbTxn *txnid, Dbt *key, u_int32_t flags),
+ (db, unwrap(txnid), key, flags),
+ DB_RETOK_DBDEL)
+
+void Db::err(int error, const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, error, 1, 1, format);
+}
+
+void Db::errx(const char *format, ...)
+{
+ DB *db = unwrap(this);
+
+ DB_REAL_ERR(db->dbenv, 0, 0, 1, format);
+}
+
+DB_METHOD(fd, (int *fdp),
+ (db, fdp),
+ DB_RETOK_STD)
+
+int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->get(db, unwrap(txnid), key, value, flags);
+
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::get", value, error_policy());
+ else
+ DB_ERROR("Db::get", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+int Db::get_byteswapped(int *isswapped)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_byteswapped(db, isswapped));
+}
+
+int Db::get_type(DBTYPE *dbtype)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_type(db, dbtype));
+}
+
+// Dbc is a "compatible" subclass of DBC - that is, no virtual functions
+// or even extra data members, so these casts, although technically
+// non-portable, "should" always be okay.
+DB_METHOD(join, (Dbc **curslist, Dbc **cursorp, u_int32_t flags),
+ (db, (DBC **)curslist, (DBC **)cursorp, flags),
+ DB_RETOK_STD)
+
+DB_METHOD(key_range,
+ (DbTxn *txnid, Dbt *key, DB_KEY_RANGE *results, u_int32_t flags),
+ (db, unwrap(txnid), key, results, flags),
+ DB_RETOK_STD)
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int Db::open(DbTxn *txnid, const char *file, const char *database,
+ DBTYPE type, u_int32_t flags, int mode)
+{
+ int ret;
+ DB *db = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = db->open(db, unwrap(txnid), file, database, type, flags,
+ mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::open", ret, error_policy());
+
+ return (ret);
+}
+
+int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ ret = db->pget(db, unwrap(txnid), key, pkey, value, flags);
+
+ /* The logic here is identical to Db::get - reuse the macro. */
+ if (!DB_RETOK_DBGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT("Db::pget", value, error_policy());
+ else
+ DB_ERROR("Db::pget", ret, error_policy());
+ }
+
+ return (ret);
+}
+
+DB_METHOD(put,
+ (DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags),
+ (db, unwrap(txnid), key, value, flags),
+ DB_RETOK_DBPUT)
+
+DB_METHOD_CHECKED(rename, 1,
+ (const char *file, const char *database, const char *newname,
+ u_int32_t flags),
+ (db, file, database, newname, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(remove, 1,
+ (const char *file, const char *database, u_int32_t flags),
+ (db, file, database, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(truncate, 0,
+ (DbTxn *txnid, u_int32_t *countp, u_int32_t flags),
+ (db, unwrap(txnid), countp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(stat, 0,
+ (void *sp, u_int32_t flags), (db, sp, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(sync, 0,
+ (u_int32_t flags), (db, flags), DB_RETOK_STD)
+
+DB_METHOD_CHECKED(upgrade, 0,
+ (const char *name, u_int32_t flags), (db, name, flags), DB_RETOK_STD)
+
+////////////////////////////////////////////////////////////////////////
+//
+// callbacks
+//
+// *_intercept_c are 'glue' functions that must be declared
+// as extern "C" so to be typesafe. Using a C++ method, even
+// a static class method with 'correct' arguments, will not pass
+// the test; some picky compilers do not allow mixing of function
+// pointers to 'C' functions with function pointers to C++ functions.
+//
+// One wart with this scheme is that the *_callback_ method pointer
+// must be declared public to be accessible by the C intercept.
+// It's possible to accomplish the goal without this, and with
+// another public transfer method, but it's just too much overhead.
+// These callbacks are supposed to be *fast*.
+//
+// The DBTs we receive in these callbacks from the C layer may be
+// manufactured there, but we want to treat them as a Dbts.
+// Technically speaking, these DBTs were not constructed as a Dbts,
+// but it should be safe to cast them as such given that Dbt is a
+// *very* thin extension of the DBT. That is, Dbt has no additional
+// data elements, does not use virtual functions, virtual inheritance,
+// multiple inheritance, RTI, or any other language feature that
+// causes the structure to grow or be displaced. Although this may
+// sound risky, a design goal of C++ is complete structure
+// compatibility with C, and has the philosophy 'if you don't use it,
+// you shouldn't incur the overhead'. If the C/C++ compilers you're
+// using on a given machine do not have matching struct layouts, then
+// a lot more things will be broken than just this.
+//
+// The alternative, creating a Dbt here in the callback, and populating
+// it from the DBT, is just too slow and cumbersome to be very useful.
+
+// These macros avoid a lot of boilerplate code for callbacks
+
+#define DB_CALLBACK_C_INTERCEPT(_name, _rettype, _cargspec, \
+ _return, _cxxargs) \
+extern "C" _rettype _db_##_name##_intercept_c _cargspec \
+{ \
+ Db *cxxthis; \
+ \
+ DB_ASSERT(cthis != NULL); \
+ cxxthis = (Db *)cthis->api_internal; \
+ DB_ASSERT(cxxthis != NULL); \
+ DB_ASSERT(cxxthis->_name##_callback_ != 0); \
+ \
+ _return (*cxxthis->_name##_callback_) _cxxargs; \
+}
+
+#define DB_SET_CALLBACK(_cxxname, _name, _cxxargspec, _cb) \
+int Db::_cxxname _cxxargspec \
+{ \
+ DB *cthis = unwrap(this); \
+ \
+ _name##_callback_ = _cb; \
+ return ((*(cthis->_cxxname))(cthis, \
+ (_cb) ? _db_##_name##_intercept_c : NULL)); \
+}
+
+/* associate callback - doesn't quite fit the pattern because of the flags */
+DB_CALLBACK_C_INTERCEPT(associate,
+ int, (DB *cthis, const DBT *key, const DBT *data, DBT *retval),
+ return, (cxxthis, Dbt::get_const_Dbt(key), Dbt::get_const_Dbt(data),
+ Dbt::get_Dbt(retval)))
+
+int Db::associate(DbTxn *txn, Db *secondary, int (*callback)(Db *, const Dbt *,
+ const Dbt *, Dbt *), u_int32_t flags)
+{
+ DB *cthis = unwrap(this);
+
+ /* Since the secondary Db is used as the first argument
+ * to the callback, we store the C++ callback on it
+ * rather than on 'this'.
+ */
+ secondary->associate_callback_ = callback;
+ return ((*(cthis->associate))(cthis, unwrap(txn), unwrap(secondary),
+ (callback) ? _db_associate_intercept_c : NULL, flags));
+}
+
+DB_CALLBACK_C_INTERCEPT(feedback,
+ void, (DB *cthis, int opcode, int pct),
+ /* no return */ (void), (cxxthis, opcode, pct))
+
+DB_SET_CALLBACK(set_feedback, feedback,
+ (void (*arg)(Db *cxxthis, int opcode, int pct)), arg)
+
+DB_CALLBACK_C_INTERCEPT(append_recno,
+ int, (DB *cthis, DBT *data, db_recno_t recno),
+ return, (cxxthis, Dbt::get_Dbt(data), recno))
+
+DB_SET_CALLBACK(set_append_recno, append_recno,
+ (int (*arg)(Db *cxxthis, Dbt *data, db_recno_t recno)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_compare, bt_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(bt_prefix,
+ size_t, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_bt_prefix, bt_prefix,
+ (size_t (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(dup_compare,
+ int, (DB *cthis, const DBT *data1, const DBT *data2),
+ return,
+ (cxxthis, Dbt::get_const_Dbt(data1), Dbt::get_const_Dbt(data2)))
+
+DB_SET_CALLBACK(set_dup_compare, dup_compare,
+ (int (*arg)(Db *cxxthis, const Dbt *data1, const Dbt *data2)), arg)
+
+DB_CALLBACK_C_INTERCEPT(h_hash,
+ u_int32_t, (DB *cthis, const void *data, u_int32_t len),
+ return, (cxxthis, data, len))
+
+DB_SET_CALLBACK(set_h_hash, h_hash,
+ (u_int32_t (*arg)(Db *cxxthis, const void *data, u_int32_t len)), arg)
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _verify_callback_c(void *handle, const void *str_arg)
+{
+ char *str;
+ __DB_OSTREAMCLASS *out;
+
+ str = (char *)str_arg;
+ out = (__DB_OSTREAMCLASS *)handle;
+
+ (*out) << str;
+ if (out->fail())
+ return (EIO);
+
+ return (0);
+}
+
+int Db::verify(const char *name, const char *subdb,
+ __DB_OSTREAMCLASS *ostr, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int ret;
+
+ if (!db)
+ ret = EINVAL;
+ else
+ ret = __db_verify_internal(db, name, subdb, ostr,
+ _verify_callback_c, flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("Db::verify", ret, error_policy());
+
+ return (ret);
+}
+
+DB_METHOD(set_bt_compare, (bt_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_bt_maxkey, (u_int32_t bt_maxkey),
+ (db, bt_maxkey), DB_RETOK_STD)
+DB_METHOD(set_bt_minkey, (u_int32_t bt_minkey),
+ (db, bt_minkey), DB_RETOK_STD)
+DB_METHOD(set_bt_prefix, (bt_prefix_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_dup_compare, (dup_compare_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_encrypt, (const char *passwd, int flags),
+ (db, passwd, flags), DB_RETOK_STD)
+DB_METHOD_VOID(set_errfile, (FILE *errfile), (db, errfile))
+DB_METHOD_VOID(set_errpfx, (const char *errpfx), (db, errpfx))
+DB_METHOD(set_flags, (u_int32_t flags), (db, flags),
+ DB_RETOK_STD)
+DB_METHOD(set_h_ffactor, (u_int32_t h_ffactor),
+ (db, h_ffactor), DB_RETOK_STD)
+DB_METHOD(set_h_hash, (h_hash_fcn_type func),
+ (db, func), DB_RETOK_STD)
+DB_METHOD(set_h_nelem, (u_int32_t h_nelem),
+ (db, h_nelem), DB_RETOK_STD)
+DB_METHOD(set_lorder, (int db_lorder), (db, db_lorder),
+ DB_RETOK_STD)
+DB_METHOD(set_pagesize, (u_int32_t db_pagesize),
+ (db, db_pagesize), DB_RETOK_STD)
+DB_METHOD(set_re_delim, (int re_delim),
+ (db, re_delim), DB_RETOK_STD)
+DB_METHOD(set_re_len, (u_int32_t re_len),
+ (db, re_len), DB_RETOK_STD)
+DB_METHOD(set_re_pad, (int re_pad),
+ (db, re_pad), DB_RETOK_STD)
+DB_METHOD(set_re_source, (char *re_source),
+ (db, re_source), DB_RETOK_STD)
+DB_METHOD(set_q_extentsize, (u_int32_t extentsize),
+ (db, extentsize), DB_RETOK_STD)
+
+DB_METHOD_QUIET(set_alloc, (db_malloc_fcn_type malloc_fcn,
+ db_realloc_fcn_type realloc_fcn, db_free_fcn_type free_fcn),
+ (db, malloc_fcn, realloc_fcn, free_fcn))
+
+void Db::set_errcall(void (*arg)(const char *, char *))
+{
+ env_->set_errcall(arg);
+}
+
+void *Db::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+void Db::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DB_METHOD(set_cachesize, (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (db, gbytes, bytes, ncache), DB_RETOK_STD)
+DB_METHOD(set_cache_priority, (DB_CACHE_PRIORITY priority),
+ (db, priority), DB_RETOK_STD)
+
+int Db::set_paniccall(void (*callback)(DbEnv *, int))
+{
+ return (env_->set_paniccall(callback));
+}
+
+void Db::set_error_stream(__DB_OSTREAMCLASS *error_stream)
+{
+ env_->set_error_stream(error_stream);
+}
diff --git a/bdb/cxx/cxx_dbc.cpp b/bdb/cxx/cxx_dbc.cpp
new file mode 100644
index 00000000000..4d5844f922f
--- /dev/null
+++ b/bdb/cxx/cxx_dbc.cpp
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_dbc.cpp,v 11.55 2002/07/03 21:03:52 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBC_METHOD(_name, _argspec, _arglist, _retok) \
+int Dbc::_name _argspec \
+{ \
+ int ret; \
+ DBC *dbc = this; \
+ \
+ ret = dbc->c_##_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("Dbc::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+// It's private, and should never be called, but VC4.0 needs it resolved
+//
+Dbc::~Dbc()
+{
+}
+
+DBC_METHOD(close, (void), (dbc), DB_RETOK_STD)
+DBC_METHOD(count, (db_recno_t *countp, u_int32_t _flags),
+ (dbc, countp, _flags), DB_RETOK_STD)
+DBC_METHOD(del, (u_int32_t _flags),
+ (dbc, _flags), DB_RETOK_DBCDEL)
+
+int Dbc::dup(Dbc** cursorp, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+ DBC *new_cursor = 0;
+
+ ret = dbc->c_dup(dbc, &new_cursor, _flags);
+
+ if (DB_RETOK_STD(ret))
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)new_cursor;
+ else
+ DB_ERROR("Dbc::dup", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+int Dbc::get(Dbt* key, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_get(dbc, key, data, _flags);
+
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::get", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags)
+{
+ int ret;
+ DBC *dbc = this;
+
+ ret = dbc->c_pget(dbc, key, pkey, data, _flags);
+
+ /* Logic is the same as for Dbc::get - reusing macro. */
+ if (!DB_RETOK_DBCGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT("Dbc::pget", key, ON_ERROR_UNKNOWN);
+ else if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("Dbc::pget", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("Dbc::pget", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
+
+DBC_METHOD(put, (Dbt* key, Dbt *data, u_int32_t _flags),
+ (dbc, key, data, _flags), DB_RETOK_DBCPUT)
diff --git a/bdb/cxx/cxx_dbt.cpp b/bdb/cxx/cxx_dbt.cpp
new file mode 100644
index 00000000000..7a4224503ee
--- /dev/null
+++ b/bdb/cxx/cxx_dbt.cpp
@@ -0,0 +1,61 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_dbt.cpp,v 11.53 2002/03/27 04:31:14 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+Dbt::Dbt()
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+}
+
+Dbt::Dbt(void *data_arg, u_int32_t size_arg)
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+ set_data(data_arg);
+ set_size(size_arg);
+}
+
+Dbt::~Dbt()
+{
+}
+
+Dbt::Dbt(const Dbt &that)
+{
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+}
+
+Dbt &Dbt::operator = (const Dbt &that)
+{
+ if (this != &that) {
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+ }
+ return (*this);
+}
diff --git a/bdb/cxx/cxx_env.cpp b/bdb/cxx/cxx_env.cpp
new file mode 100644
index 00000000000..c78c6e9fa47
--- /dev/null
+++ b/bdb/cxx/cxx_env.cpp
@@ -0,0 +1,802 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_env.cpp,v 11.88 2002/08/26 22:13:36 mjc Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <stdio.h> // needed for set_error_stream
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc_auto/common_ext.h"
+
+#ifdef HAVE_CXX_STDHEADERS
+using std::cerr;
+#endif
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. They may return an error or raise an exception.
+// These macros expect that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(dbenv, arg)")
+//
+#define DBENV_METHOD_ERR(_name, _argspec, _arglist, _on_err) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ int ret; \
+ \
+ if ((ret = dbenv->_name _arglist) != 0) { \
+ _on_err; \
+ } \
+ return (ret); \
+}
+
+#define DBENV_METHOD(_name, _argspec, _arglist) \
+ DBENV_METHOD_ERR(_name, _argspec, _arglist, \
+ DB_ERROR("DbEnv::" # _name, ret, error_policy()))
+
+#define DBENV_METHOD_QUIET(_name, _argspec, _arglist) \
+int DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ return (dbenv->_name _arglist); \
+}
+
+#define DBENV_METHOD_VOID(_name, _argspec, _arglist) \
+void DbEnv::_name _argspec \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ dbenv->_name _arglist; \
+}
+
+// This datatype is needed for picky compilers.
+//
+extern "C" {
+ typedef void (*db_errcall_fcn_type)
+ (const char *, char *);
+};
+
+// The reason for a static variable is that some structures
+// (like Dbts) have no connection to any Db or DbEnv, so when
+// errors occur in their methods, we must have some reasonable
+// way to determine whether to throw or return errors.
+//
+// This variable is taken from flags whenever a DbEnv is constructed.
+// Normally there is only one DbEnv per program, and even if not,
+// there is typically a single policy of throwing or returning.
+//
+static int last_known_error_policy = ON_ERROR_UNKNOWN;
+
+__DB_OSTREAMCLASS *DbEnv::error_stream_ = 0;
+
+// These 'glue' function are declared as extern "C" so they will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
+{
+ DbEnv::_feedback_intercept(env, opcode, pct);
+}
+
+extern "C"
+void _paniccall_intercept_c(DB_ENV *env, int errval)
+{
+ DbEnv::_paniccall_intercept(env, errval);
+}
+
+extern "C"
+void _stream_error_function_c(const char *prefix, char *message)
+{
+ DbEnv::_stream_error_function(prefix, message);
+}
+
+extern "C"
+int _app_dispatch_intercept_c(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ return (DbEnv::_app_dispatch_intercept(env, dbt, lsn, op));
+}
+
+extern "C"
+int _rep_send_intercept_c(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+ return (DbEnv::_rep_send_intercept(env,
+ cntrl, data, id, flags));
+}
+
+void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxenv->feedback_callback_ == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL,
+ cxxenv->error_policy());
+ return;
+ }
+ (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
+}
+
+void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->paniccall_callback_ == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL,
+ cxxenv->error_policy());
+ }
+ (*cxxenv->paniccall_callback_)(cxxenv, errval);
+}
+
+int DbEnv::_app_dispatch_intercept(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxenv->app_dispatch_callback_ == 0) {
+ DB_ERROR("DbEnv::app_dispatch_callback",
+ EINVAL, cxxenv->error_policy());
+ return (EINVAL);
+ }
+ Dbt *cxxdbt = (Dbt *)dbt;
+ DbLsn *cxxlsn = (DbLsn *)lsn;
+ return ((*cxxenv->app_dispatch_callback_)(cxxenv, cxxdbt, cxxlsn, op));
+}
+
+int DbEnv::_rep_send_intercept(DB_ENV *env, const DBT *cntrl,
+ const DBT *data, int id, u_int32_t flags)
+{
+
+ if (env == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->api1_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ const Dbt *cxxcntrl = (const Dbt *)cntrl;
+ Dbt *cxxdata = (Dbt *)data;
+ return ((*cxxenv->rep_send_callback_)(cxxenv,
+ cxxcntrl, cxxdata, id, flags));
+}
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB_ENV handle is invalid and
+// no operations are permitted on the DbEnv (other than
+// destructor). Leaving the DbEnv handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow DbEnv objects to be closed and reopened.
+// This implied always keeping a valid DB_ENV object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+DbEnv::DbEnv(u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(0)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, app_dispatch_callback_(0)
+, feedback_callback_(0)
+, paniccall_callback_(0)
+, pgin_callback_(0)
+, pgout_callback_(0)
+, rep_send_callback_(0)
+{
+ if ((construct_error_ = initialize(env)) != 0)
+ DB_ERROR("DbEnv::DbEnv", construct_error_, error_policy());
+}
+
+// If the DB_ENV handle is still open, we close it. This is to make stack
+// allocation of DbEnv objects easier so that they are cleaned up in the error
+// path. Note that the C layer catches cases where handles are open in the
+// environment at close time and reports an error. Applications should call
+// close explicitly in normal (non-exceptional) cases to check the return
+// value.
+//
+DbEnv::~DbEnv()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ cleanup();
+ (void)env->close(env, 0);
+ }
+}
+
+// called by destructors before the DB_ENV is destroyed.
+void DbEnv::cleanup()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ env->api1_internal = 0;
+ imp_ = 0;
+ }
+}
+
+int DbEnv::close(u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a close (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((ret = env->close(env, flags)) != 0)
+ DB_ERROR("DbEnv::close", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(dbremove,
+ (DbTxn *txn, const char *name, const char *subdb, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, flags))
+DBENV_METHOD(dbrename, (DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags),
+ (dbenv, unwrap(txn), name, subdb, newname, flags))
+
+void DbEnv::err(int error, const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, error, 1, 1, format);
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int DbEnv::error_policy()
+{
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+}
+
+void DbEnv::errx(const char *format, ...)
+{
+ DB_ENV *env = unwrap(this);
+
+ DB_REAL_ERR(env, 0, 0, 1, format);
+}
+
+void *DbEnv::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+// used internally during constructor
+// to associate an existing DB_ENV with this DbEnv,
+// or create a new one.
+//
+int DbEnv::initialize(DB_ENV *env)
+{
+ int ret;
+
+ last_known_error_policy = error_policy();
+
+ if (env == 0) {
+ // Create a new DB_ENV environment.
+ if ((ret = ::db_env_create(&env,
+ construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0)
+ return (ret);
+ }
+ imp_ = wrap(env);
+ env->api1_internal = this; // for DB_ENV* to DbEnv* conversion
+ return (0);
+}
+
+// lock methods
+DBENV_METHOD(lock_detect, (u_int32_t flags, u_int32_t atype, int *aborted),
+ (dbenv, flags, atype, aborted))
+DBENV_METHOD_ERR(lock_get,
+ (u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock),
+ (dbenv, locker, flags, obj, lock_mode, &lock->lock_),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_get", ret,
+ DB_LOCK_GET, lock_mode, obj, *lock,
+ -1, error_policy()))
+DBENV_METHOD(lock_id, (u_int32_t *idp), (dbenv, idp))
+DBENV_METHOD(lock_id_free, (u_int32_t id), (dbenv, id))
+DBENV_METHOD(lock_put, (DbLock *lock), (dbenv, &lock->lock_))
+DBENV_METHOD(lock_stat, (DB_LOCK_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+DBENV_METHOD_ERR(lock_vec,
+ (u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elist_returned),
+ (dbenv, locker, flags, list, nlist, elist_returned),
+ DbEnv::runtime_error_lock_get("DbEnv::lock_vec", ret,
+ (*elist_returned)->op, (*elist_returned)->mode,
+ Dbt::get_Dbt((*elist_returned)->obj), DbLock((*elist_returned)->lock),
+ (*elist_returned) - list, error_policy()))
+// log methods
+DBENV_METHOD(log_archive, (char **list[], u_int32_t flags),
+ (dbenv, list, flags))
+
+int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
+{
+ return (::log_compare(lsn0, lsn1));
+}
+
+// The following cast implies that DbLogc can be no larger than DB_LOGC
+DBENV_METHOD(log_cursor, (DbLogc **cursorp, u_int32_t flags),
+ (dbenv, (DB_LOGC **)cursorp, flags))
+DBENV_METHOD(log_file, (DbLsn *lsn, char *namep, size_t len),
+ (dbenv, lsn, namep, len))
+DBENV_METHOD(log_flush, (const DbLsn *lsn), (dbenv, lsn))
+DBENV_METHOD(log_put, (DbLsn *lsn, const Dbt *data, u_int32_t flags),
+ (dbenv, lsn, data, flags))
+DBENV_METHOD(log_stat, (DB_LOG_STAT **spp, u_int32_t flags),
+ (dbenv, spp, flags))
+
+int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int ret;
+ DB_MPOOLFILE *mpf;
+
+ if (env == NULL)
+ ret = EINVAL;
+ else
+ ret = env->memp_fcreate(env, &mpf, flags);
+
+ if (DB_RETOK_STD(ret)) {
+ *dbmfp = new DbMpoolFile();
+ (*dbmfp)->imp_ = wrap(mpf);
+ } else
+ DB_ERROR("DbMpoolFile::f_create", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+DBENV_METHOD(memp_register,
+ (int ftype, pgin_fcn_type pgin_fcn, pgout_fcn_type pgout_fcn),
+ (dbenv, ftype, pgin_fcn, pgout_fcn))
+
+// memory pool methods
+DBENV_METHOD(memp_stat,
+ (DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags),
+ (dbenv, gsp, fsp, flags))
+
+DBENV_METHOD(memp_sync, (DbLsn *sn), (dbenv, sn))
+
+DBENV_METHOD(memp_trickle, (int pct, int *nwrotep), (dbenv, pct, nwrotep))
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ if (construct_error_ != 0)
+ ret = construct_error_;
+ else
+ ret = env->open(env, db_home, flags, mode);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbEnv::open", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::remove(const char *db_home, u_int32_t flags)
+{
+ int ret;
+ DB_ENV *env = unwrap(this);
+
+ // after a remove (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((ret = env->remove(env, db_home, flags)) != 0)
+ DB_ERROR("DbEnv::remove", ret, error_policy());
+
+ return (ret);
+}
+
+// Report an error associated with the DbEnv.
+// error_policy is one of:
+// ON_ERROR_THROW throw an error
+// ON_ERROR_RETURN do nothing here, the caller will return an error
+// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
+//
+void DbEnv::runtime_error(const char *caller, int error, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ switch (error) {
+ case DB_LOCK_DEADLOCK:
+ {
+ DbDeadlockException dl_except(caller);
+ throw dl_except;
+ }
+ break;
+ case DB_RUNRECOVERY:
+ {
+ DbRunRecoveryException rr_except(caller);
+ throw rr_except;
+ }
+ break;
+ default:
+ {
+ DbException except(caller, error);
+ throw except;
+ }
+ break;
+ }
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbMemoryException
+// based on the fact that this Dbt is not large enough.
+void DbEnv::runtime_error_dbt(const char *caller, Dbt *dbt, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbMemoryException except(caller, dbt);
+ throw except;
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbLockNotGrantedException,
+// or a regular runtime error.
+// call regular runtime_error if it
+void DbEnv::runtime_error_lock_get(const char *caller, int error,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj,
+ DbLock lock, int index, int error_policy)
+{
+ if (error != DB_LOCK_NOTGRANTED) {
+ runtime_error(caller, error, error_policy);
+ return;
+ }
+
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbLockNotGrantedException except(caller, op, mode,
+ obj, lock, index);
+ throw except;
+ }
+}
+
+// static method
+char *DbEnv::strerror(int error)
+{
+ return (db_strerror(error));
+}
+
+void DbEnv::_stream_error_function(const char *prefix, char *message)
+{
+ // HP compilers need the extra casts, we don't know why.
+ if (error_stream_) {
+ if (prefix) {
+ (*error_stream_) << prefix << (const char *)": ";
+ }
+ if (message) {
+ (*error_stream_) << (const char *)message;
+ }
+ (*error_stream_) << (const char *)"\n";
+ }
+}
+
+// set methods
+
+DBENV_METHOD_VOID(set_errfile, (FILE *errfile), (dbenv, errfile))
+DBENV_METHOD_VOID(set_errpfx, (const char *errpfx), (dbenv, errpfx))
+
+// We keep these alphabetical by field name,
+// for comparison with Java's list.
+//
+DBENV_METHOD(set_data_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_encrypt, (const char *passwd, int flags),
+ (dbenv, passwd, flags))
+DBENV_METHOD(set_lg_bsize, (u_int32_t bsize), (dbenv, bsize))
+DBENV_METHOD(set_lg_dir, (const char *dir), (dbenv, dir))
+DBENV_METHOD(set_lg_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lg_regionmax, (u_int32_t regionmax), (dbenv, regionmax))
+DBENV_METHOD(set_lk_detect, (u_int32_t detect), (dbenv, detect))
+DBENV_METHOD(set_lk_max, (u_int32_t max), (dbenv, max))
+DBENV_METHOD(set_lk_max_lockers, (u_int32_t max_lockers), (dbenv, max_lockers))
+DBENV_METHOD(set_lk_max_locks, (u_int32_t max_locks), (dbenv, max_locks))
+DBENV_METHOD(set_lk_max_objects, (u_int32_t max_objects), (dbenv, max_objects))
+DBENV_METHOD(set_mp_mmapsize, (size_t mmapsize), (dbenv, mmapsize))
+DBENV_METHOD(set_tmp_dir, (const char *tmp_dir), (dbenv, tmp_dir))
+DBENV_METHOD(set_tx_max, (u_int32_t tx_max), (dbenv, tx_max))
+
+DBENV_METHOD_QUIET(set_alloc,
+ (db_malloc_fcn_type malloc_fcn, db_realloc_fcn_type realloc_fcn,
+ db_free_fcn_type free_fcn),
+ (dbenv, malloc_fcn, realloc_fcn, free_fcn))
+
+void DbEnv::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+DBENV_METHOD(set_cachesize,
+ (u_int32_t gbytes, u_int32_t bytes, int ncache),
+ (dbenv, gbytes, bytes, ncache))
+
+void DbEnv::set_errcall(void (*arg)(const char *, char *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ // XXX
+ // We are casting from a function ptr declared with C++
+ // linkage to one (same arg types) declared with C
+ // linkage. It's hard to imagine a pair of C/C++
+ // compilers from the same vendor for which this
+ // won't work. Unfortunately, we can't use a
+ // intercept function like the others since the
+ // function does not have a (DbEnv*) as one of
+ // the args. If this causes trouble, we can pull
+ // the same trick we use in Java, namely stuffing
+ // a (DbEnv*) pointer into the prefix. We're
+ // avoiding this for the moment because it obfuscates.
+ //
+ (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
+}
+
+// Note: This actually behaves a bit like a static function,
+// since DB_ENV.db_errcall has no information about which
+// db_env triggered the call. A user that has multiple DB_ENVs
+// will simply not be able to have different streams for each one.
+//
+void DbEnv::set_error_stream(__DB_OSTREAMCLASS *stream)
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ error_stream_ = stream;
+ dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
+ _stream_error_function_c);
+}
+
+int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
+}
+
+DBENV_METHOD(set_flags, (u_int32_t flags, int onoff), (dbenv, flags, onoff))
+DBENV_METHOD(set_lk_conflicts, (u_int8_t *lk_conflicts, int lk_max),
+ (dbenv, lk_conflicts, lk_max))
+
+int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ paniccall_callback_ = arg;
+
+ return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
+}
+
+DBENV_METHOD(set_rpc_server,
+ (void *cl, char *host, long tsec, long ssec, u_int32_t flags),
+ (dbenv, cl, host, tsec, ssec, flags))
+DBENV_METHOD(set_shm_key, (long shm_key), (dbenv, shm_key))
+// Note: this changes from last_known_error_policy to error_policy()
+DBENV_METHOD(set_tas_spins, (u_int32_t arg), (dbenv, arg))
+
+int DbEnv::set_app_dispatch
+ (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ app_dispatch_callback_ = arg;
+ if ((ret = (*(dbenv->set_app_dispatch))(dbenv,
+ _app_dispatch_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_app_dispatch", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(set_tx_timestamp, (time_t *timestamp), (dbenv, timestamp))
+DBENV_METHOD(set_verbose, (u_int32_t which, int onoff), (dbenv, which, onoff))
+
+int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ DB_TXN *txn;
+ int ret;
+
+ ret = env->txn_begin(env, unwrap(pid), &txn, flags);
+ if (DB_RETOK_STD(ret))
+ *tid = new DbTxn(txn);
+ else
+ DB_ERROR("DbEnv::txn_begin", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(txn_checkpoint, (u_int32_t kbyte, u_int32_t min, u_int32_t flags),
+ (dbenv, kbyte, min, flags))
+
+int DbEnv::txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags)
+{
+ DB_ENV *dbenv = unwrap(this);
+ DB_PREPLIST *c_preplist;
+ long i;
+ int ret;
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0)
+ ret = EINVAL;
+ else
+ ret = __os_malloc(dbenv, sizeof(DB_PREPLIST) * count,
+ &c_preplist);
+
+ if (ret != 0) {
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ if ((ret =
+ dbenv->txn_recover(dbenv, c_preplist, count, retp, flags)) != 0) {
+ __os_free(dbenv, c_preplist);
+ DB_ERROR("DbEnv::txn_recover", ret, error_policy());
+ return (ret);
+ }
+
+ for (i = 0; i < *retp; i++) {
+ preplist[i].txn = new DbTxn();
+ preplist[i].txn->imp_ = wrap(c_preplist[i].txn);
+ memcpy(preplist[i].gid, c_preplist[i].gid,
+ sizeof(preplist[i].gid));
+ }
+
+ __os_free(dbenv, c_preplist);
+
+ return (0);
+}
+
+DBENV_METHOD(txn_stat, (DB_TXN_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+int DbEnv::set_rep_transport(u_int32_t myid,
+ int (*f_send)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t))
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ rep_send_callback_ = f_send;
+ if ((ret = dbenv->set_rep_transport(dbenv,
+ myid, _rep_send_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_rep_transport", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_elect,
+ (int nsites, int pri, u_int32_t timeout, int *idp),
+ (dbenv, nsites, pri, timeout, idp))
+
+int DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *idp)
+{
+ DB_ENV *dbenv = unwrap(this);
+ int ret;
+
+ ret = dbenv->rep_process_message(dbenv, control, rec, idp);
+ if (!DB_RETOK_REPPMSG(ret))
+ DB_ERROR("DbEnv::rep_process_message", ret, error_policy());
+
+ return (ret);
+}
+
+DBENV_METHOD(rep_start,
+ (Dbt *cookie, u_int32_t flags),
+ (dbenv, (DBT *)cookie, flags))
+
+DBENV_METHOD(rep_stat, (DB_REP_STAT **statp, u_int32_t flags),
+ (dbenv, statp, flags))
+
+DBENV_METHOD(set_rep_limit, (u_int32_t gbytes, u_int32_t bytes),
+ (dbenv, gbytes, bytes))
+
+DBENV_METHOD(set_timeout,
+ (db_timeout_t timeout, u_int32_t flags),
+ (dbenv, timeout, flags))
+
+// static method
+char *DbEnv::version(int *major, int *minor, int *patch)
+{
+ return (db_version(major, minor, patch));
+}
+
+// static method
+DbEnv *DbEnv::wrap_DB_ENV(DB_ENV *dbenv)
+{
+ DbEnv *wrapped_env = get_DbEnv(dbenv);
+ if (wrapped_env == NULL)
+ wrapped_env = new DbEnv(dbenv, 0);
+ return wrapped_env;
+}
diff --git a/bdb/cxx/cxx_except.cpp b/bdb/cxx/cxx_except.cpp
index a62e21a767d..40fdeae69d6 100644
--- a/bdb/cxx/cxx_except.cpp
+++ b/bdb/cxx/cxx_except.cpp
@@ -1,20 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: cxx_except.cpp,v 11.7 2000/09/21 15:05:45 dda Exp $";
+static const char revid[] = "$Id: cxx_except.cpp,v 11.17 2002/08/23 01:07:27 mjc Exp $";
#endif /* not lint */
#include <string.h>
+#include <errno.h>
#include "db_cxx.h"
-#include "cxx_int.h"
+#include "dbinc/cxx_int.h"
// tmpString is used to create strings on the stack
//
@@ -25,30 +26,7 @@ public:
const char *str2 = 0,
const char *str3 = 0,
const char *str4 = 0,
- const char *str5 = 0)
- {
- int len = strlen(str1);
- if (str2)
- len += strlen(str2);
- if (str3)
- len += strlen(str3);
- if (str4)
- len += strlen(str4);
- if (str5)
- len += strlen(str5);
-
- s_ = new char[len+1];
-
- strcpy(s_, str1);
- if (str2)
- strcat(s_, str2);
- if (str3)
- strcat(s_, str3);
- if (str4)
- strcat(s_, str4);
- if (str5)
- strcat(s_, str5);
- }
+ const char *str5 = 0);
~tmpString() { delete [] s_; }
operator const char *() { return (s_); }
@@ -56,6 +34,35 @@ private:
char *s_;
};
+tmpString::tmpString(const char *str1,
+ const char *str2,
+ const char *str3,
+ const char *str4,
+ const char *str5)
+{
+ size_t len = strlen(str1);
+ if (str2)
+ len += strlen(str2);
+ if (str3)
+ len += strlen(str3);
+ if (str4)
+ len += strlen(str4);
+ if (str5)
+ len += strlen(str5);
+
+ s_ = new char[len+1];
+
+ strcpy(s_, str1);
+ if (str2)
+ strcat(s_, str2);
+ if (str3)
+ strcat(s_, str3);
+ if (str4)
+ strcat(s_, str4);
+ if (str5)
+ strcat(s_, str5);
+}
+
// Note: would not be needed if we can inherit from exception
// It does not appear to be possible to inherit from exception
// with the current Microsoft library (VC5.0).
@@ -100,7 +107,8 @@ DbException::DbException(const char *prefix, int err)
DbException::DbException(const char *prefix1, const char *prefix2, int err)
: err_(err)
{
- what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ", db_strerror(err)));
+ what_ = dupString(tmpString(prefix1, ": ", prefix2, ": ",
+ db_strerror(err)));
}
DbException::DbException(const DbException &that)
@@ -130,3 +138,193 @@ const char *DbException::what() const
{
return (what_);
}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbMemoryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+static const char *memory_err_desc = "Dbt not large enough for available data";
+DbMemoryException::~DbMemoryException()
+{
+}
+
+DbMemoryException::DbMemoryException(Dbt *dbt)
+: DbException(memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *description)
+: DbException(description, ENOMEM)
+, dbt_(0)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix, Dbt *dbt)
+: DbException(prefix, memory_err_desc, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const char *prefix1, const char *prefix2,
+ Dbt *dbt)
+: DbException(prefix1, prefix2, ENOMEM)
+, dbt_(dbt)
+{
+}
+
+DbMemoryException::DbMemoryException(const DbMemoryException &that)
+: DbException(that)
+, dbt_(that.dbt_)
+{
+}
+
+DbMemoryException
+&DbMemoryException::operator =(const DbMemoryException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ dbt_ = that.dbt_;
+ }
+ return (*this);
+}
+
+Dbt *DbMemoryException::get_dbt() const
+{
+ return (dbt_);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbDeadlockException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbDeadlockException::~DbDeadlockException()
+{
+}
+
+DbDeadlockException::DbDeadlockException(const char *description)
+: DbException(description, DB_LOCK_DEADLOCK)
+{
+}
+
+DbDeadlockException::DbDeadlockException(const DbDeadlockException &that)
+: DbException(that)
+{
+}
+
+DbDeadlockException
+&DbDeadlockException::operator =(const DbDeadlockException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbLockNotGrantedException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbLockNotGrantedException::~DbLockNotGrantedException()
+{
+ delete lock_;
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException(const char *prefix,
+ db_lockop_t op, db_lockmode_t mode, const Dbt *obj, const DbLock lock,
+ int index)
+: DbException(prefix, DbEnv::strerror(DB_LOCK_NOTGRANTED),
+ DB_LOCK_NOTGRANTED)
+, op_(op)
+, mode_(mode)
+, obj_(obj)
+, index_(index)
+{
+ lock_ = new DbLock(lock);
+}
+
+DbLockNotGrantedException::DbLockNotGrantedException
+ (const DbLockNotGrantedException &that)
+: DbException(that)
+{
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+}
+
+DbLockNotGrantedException
+&DbLockNotGrantedException::operator =(const DbLockNotGrantedException &that)
+{
+ if (this != &that) {
+ DbException::operator=(that);
+ op_ = that.op_;
+ mode_ = that.mode_;
+ obj_ = that.obj_;
+ lock_ = new DbLock(*that.lock_);
+ index_ = that.index_;
+ }
+ return (*this);
+}
+
+db_lockop_t DbLockNotGrantedException::get_op() const
+{
+ return op_;
+}
+
+db_lockmode_t DbLockNotGrantedException::get_mode() const
+{
+ return mode_;
+}
+
+const Dbt* DbLockNotGrantedException::get_obj() const
+{
+ return obj_;
+}
+
+DbLock* DbLockNotGrantedException::get_lock() const
+{
+ return lock_;
+}
+
+int DbLockNotGrantedException::get_index() const
+{
+ return index_;
+}
+
+
+
+////////////////////////////////////////////////////////////////////////
+// //
+// DbRunRecoveryException //
+// //
+////////////////////////////////////////////////////////////////////////
+
+DbRunRecoveryException::~DbRunRecoveryException()
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException(const char *description)
+: DbException(description, DB_RUNRECOVERY)
+{
+}
+
+DbRunRecoveryException::DbRunRecoveryException
+ (const DbRunRecoveryException &that)
+: DbException(that)
+{
+}
+
+DbRunRecoveryException
+&DbRunRecoveryException::operator =(const DbRunRecoveryException &that)
+{
+ if (this != &that)
+ DbException::operator=(that);
+ return (*this);
+}
diff --git a/bdb/cxx/cxx_lock.cpp b/bdb/cxx/cxx_lock.cpp
index e8ce2aa9d30..446eba49e27 100644
--- a/bdb/cxx/cxx_lock.cpp
+++ b/bdb/cxx/cxx_lock.cpp
@@ -1,86 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: cxx_lock.cpp,v 11.9 2000/09/21 15:05:45 dda Exp $";
+static const char revid[] = "$Id: cxx_lock.cpp,v 11.17 2002/03/27 04:31:16 bostic Exp $";
#endif /* not lint */
#include <errno.h>
#include <string.h>
#include "db_cxx.h"
-#include "cxx_int.h"
-
-int DbEnv::lock_detect(u_int32_t flags, u_int32_t atype, int *aborted)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = ::lock_detect(env, flags, atype, aborted)) != 0) {
- DB_ERROR("DbEnv::lock_detect", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-int DbEnv::lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
- db_lockmode_t lock_mode, DbLock *lock)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = ::lock_get(env, locker, flags, obj,
- lock_mode, &lock->lock_)) != 0) {
- DB_ERROR("DbEnv::lock_get", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-int DbEnv::lock_id(u_int32_t *idp)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = ::lock_id(env, idp)) != 0) {
- DB_ERROR("DbEnv::lock_id", err, error_policy());
- }
- return (err);
-}
-
-int DbEnv::lock_stat(DB_LOCK_STAT **statp,
- db_malloc_fcn_type db_malloc_fcn)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = ::lock_stat(env, statp, db_malloc_fcn)) != 0) {
- DB_ERROR("DbEnv::lock_stat", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::lock_vec(u_int32_t locker, u_int32_t flags,
- DB_LOCKREQ list[],
- int nlist, DB_LOCKREQ **elist_returned)
-{
- DB_ENV *env = unwrap(this);
- int err;
-
- if ((err = ::lock_vec(env, locker, flags, list,
- nlist, elist_returned)) != 0) {
- DB_ERROR("DbEnv::lock_vec", err, error_policy());
- return (err);
- }
- return (err);
-}
+#include "dbinc/cxx_int.h"
////////////////////////////////////////////////////////////////////////
// //
@@ -108,18 +43,3 @@ DbLock &DbLock::operator = (const DbLock &that)
lock_ = that.lock_;
return (*this);
}
-
-int DbLock::put(DbEnv *env)
-{
- DB_ENV *envp = unwrap(env);
-
- if (!env) {
- return (EINVAL); // handle never assigned
- }
-
- int err;
- if ((err = lock_put(envp, &lock_)) != 0) {
- DB_ERROR("DbLock::put", err, env->error_policy());
- }
- return (err);
-}
diff --git a/bdb/cxx/cxx_log.cpp b/bdb/cxx/cxx_log.cpp
deleted file mode 100644
index 336b9d337f0..00000000000
--- a/bdb/cxx/cxx_log.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: cxx_log.cpp,v 11.9 2000/09/21 15:05:45 dda Exp $";
-#endif /* not lint */
-
-#include <errno.h>
-
-#include "db_cxx.h"
-#include "cxx_int.h"
-
-////////////////////////////////////////////////////////////////////////
-// //
-// DbLog //
-// //
-////////////////////////////////////////////////////////////////////////
-
-int DbEnv::log_archive(char **list[], u_int32_t flags,
- db_malloc_fcn_type db_malloc_fcn)
-{
- int err;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_archive(env, list, flags, db_malloc_fcn)) != 0) {
- DB_ERROR("DbEnv::log_archive", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
-{
- return (::log_compare(lsn0, lsn1));
-}
-
-int DbEnv::log_file(DbLsn *lsn, char *namep, size_t len)
-{
- int err;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_file(env, lsn, namep, len)) != 0) {
- DB_ERROR("DbEnv::log_file", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_flush(const DbLsn *lsn)
-{
- int err;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_flush(env, lsn)) != 0) {
- DB_ERROR("DbEnv::log_flush", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_get(DbLsn *lsn, Dbt *data, u_int32_t flags)
-{
- int err;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_get(env, lsn, data, flags)) != 0) {
- DB_ERROR("DbEnv::log_get", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags)
-{
- int err = 0;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_put(env, lsn, data, flags)) != 0) {
- DB_ERROR("DbEnv::log_put", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_register(Db *dbp, const char *name)
-{
- int err = 0;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_register(env, unwrap(dbp), name)) != 0) {
- DB_ERROR("DbEnv::log_register", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_stat(DB_LOG_STAT **spp, db_malloc_fcn_type db_malloc_fcn)
-{
- int err = 0;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_stat(env, spp, db_malloc_fcn)) != 0) {
- DB_ERROR("DbEnv::log_stat", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int DbEnv::log_unregister(Db *dbp)
-{
- int err;
- DB_ENV *env = unwrap(this);
-
- if ((err = ::log_unregister(env, unwrap(dbp))) != 0) {
- DB_ERROR("DbEnv::log_unregister", err, error_policy());
- return (err);
- }
- return (0);
-}
diff --git a/bdb/cxx/cxx_logc.cpp b/bdb/cxx/cxx_logc.cpp
new file mode 100644
index 00000000000..d1fe83dd58b
--- /dev/null
+++ b/bdb/cxx/cxx_logc.cpp
@@ -0,0 +1,65 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: cxx_logc.cpp,v 11.8 2002/07/03 21:03:53 bostic Exp $";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+// It's private, and should never be called,
+// but some compilers need it resolved
+//
+DbLogc::~DbLogc()
+{
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::close(u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->close(logc, _flags);
+
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbLogc::close", ret, ON_ERROR_UNKNOWN);
+
+ return (ret);
+}
+
+// The name _flags prevents a name clash with __db_log_cursor::flags
+int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t _flags)
+{
+ DB_LOGC *logc = this;
+ int ret;
+
+ ret = logc->get(logc, lsn, data, _flags);
+
+ if (!DB_RETOK_LGGET(ret)) {
+ if (ret == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT("DbLogc::get", data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR("DbLogc::get", ret, ON_ERROR_UNKNOWN);
+ }
+
+ return (ret);
+}
diff --git a/bdb/cxx/cxx_mpool.cpp b/bdb/cxx/cxx_mpool.cpp
index 22f4735e333..3eb78d03ff4 100644
--- a/bdb/cxx/cxx_mpool.cpp
+++ b/bdb/cxx/cxx_mpool.cpp
@@ -1,20 +1,51 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: cxx_mpool.cpp,v 11.11 2000/09/21 15:05:45 dda Exp $";
+static const char revid[] = "$Id: cxx_mpool.cpp,v 11.20 2002/07/03 21:03:53 bostic Exp $";
#endif /* not lint */
#include <errno.h>
#include "db_cxx.h"
-#include "cxx_int.h"
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macros for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(mpf, arg)")
+//
+#define DB_MPOOLFILE_METHOD(_name, _argspec, _arglist, _retok) \
+int DbMpoolFile::_name _argspec \
+{ \
+ int ret; \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ if (mpf == NULL) \
+ ret = EINVAL; \
+ else \
+ ret = mpf->_name _arglist; \
+ if (!_retok(ret)) \
+ DB_ERROR("DbMpoolFile::"#_name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
+}
+
+#define DB_MPOOLFILE_METHOD_VOID(_name, _argspec, _arglist) \
+void DbMpoolFile::_name _argspec \
+{ \
+ DB_MPOOLFILE *mpf = unwrap(this); \
+ \
+ mpf->_name _arglist; \
+}
////////////////////////////////////////////////////////////////////////
// //
@@ -31,150 +62,49 @@ DbMpoolFile::~DbMpoolFile()
{
}
-int DbMpoolFile::open(DbEnv *envp, const char *file,
- u_int32_t flags, int mode, size_t pagesize,
- DB_MPOOL_FINFO *finfop, DbMpoolFile **result)
-{
- int err;
-
- DB_MPOOLFILE *mpf;
- DB_ENV *env = unwrap(envp);
-
- if ((err = ::memp_fopen(env, file, flags, mode, pagesize,
- finfop, &mpf)) != 0) {
- DB_ERROR("DbMpoolFile::open", err, envp->error_policy());
- return (err);
- }
- *result = new DbMpoolFile();
- (*result)->imp_ = wrap(mpf);
- return (0);
-}
-
-int DbMpoolFile::close()
+int DbMpoolFile::close(u_int32_t flags)
{
DB_MPOOLFILE *mpf = unwrap(this);
- int err = 0;
- if (!mpf) {
- err = EINVAL;
- }
- else if ((err = ::memp_fclose(mpf)) != 0) {
- DB_ERROR("DbMpoolFile::close", err, ON_ERROR_UNKNOWN);
- return (err);
- }
+ int ret;
+
+ if (mpf == NULL)
+ ret = EINVAL;
+ else
+ ret = mpf->close(mpf, flags);
+
imp_ = 0; // extra safety
// This may seem weird, but is legal as long as we don't access
// any data before returning.
- //
delete this;
- return (0);
-}
-
-int DbMpoolFile::get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep)
-{
- DB_MPOOLFILE *mpf = unwrap(this);
- int err = 0;
- if (!mpf) {
- err = EINVAL;
- }
- else if ((err = ::memp_fget(mpf, pgnoaddr, flags, pagep)) != 0) {
- DB_ERROR("DbMpoolFile::get", err, ON_ERROR_UNKNOWN);
- }
- return (err);
-}
-
-int DbMpoolFile::put(void *pgaddr, u_int32_t flags)
-{
- DB_MPOOLFILE *mpf = unwrap(this);
- int err = 0;
- if (!mpf) {
- err = EINVAL;
- }
- else if ((err = ::memp_fput(mpf, pgaddr, flags)) != 0) {
- DB_ERROR("DbMpoolFile::put", err, ON_ERROR_UNKNOWN);
- }
- return (err);
-}
-
-int DbMpoolFile::set(void *pgaddr, u_int32_t flags)
-{
- DB_MPOOLFILE *mpf = unwrap(this);
- int err = 0;
- if (!mpf) {
- err = EINVAL;
- }
- else if ((err = ::memp_fset(mpf, pgaddr, flags)) != 0) {
- DB_ERROR("DbMpoolFile::set", err, ON_ERROR_UNKNOWN);
- }
- return (err);
-}
-
-int DbMpoolFile::sync()
-{
- DB_MPOOLFILE *mpf = unwrap(this);
- int err = 0;
- if (!mpf) {
- err = EINVAL;
- }
- else if ((err = ::memp_fsync(mpf)) != 0 && err != DB_INCOMPLETE) {
- DB_ERROR("DbMpoolFile::sync", err, ON_ERROR_UNKNOWN);
- }
- return (err);
-}
-////////////////////////////////////////////////////////////////////////
-// //
-// DbMpool //
-// //
-////////////////////////////////////////////////////////////////////////
+ if (!DB_RETOK_STD(ret))
+ DB_ERROR("DbMpoolFile::close", ret, ON_ERROR_UNKNOWN);
-int DbEnv::memp_register(int ftype,
- pgin_fcn_type pgin_fcn,
- pgout_fcn_type pgout_fcn)
-{
- DB_ENV *env = unwrap(this);
- int err = 0;
-
- if ((err = ::memp_register(env, ftype, pgin_fcn, pgout_fcn)) != 0) {
- DB_ERROR("DbEnv::memp_register", err, error_policy());
- return (err);
- }
- return (err);
+ return (ret);
}
-int DbEnv::memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp,
- db_malloc_fcn_type db_malloc_fcn)
-{
- DB_ENV *env = unwrap(this);
- int err = 0;
-
- if ((err = ::memp_stat(env, gsp, fsp, db_malloc_fcn)) != 0) {
- DB_ERROR("DbEnv::memp_stat", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-int DbEnv::memp_sync(DbLsn *sn)
-{
- DB_ENV *env = unwrap(this);
- int err = 0;
-
- if ((err = ::memp_sync(env, sn)) != 0 && err != DB_INCOMPLETE) {
- DB_ERROR("DbEnv::memp_sync", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-int DbEnv::memp_trickle(int pct, int *nwrotep)
-{
- DB_ENV *env = unwrap(this);
- int err = 0;
-
- if ((err = ::memp_trickle(env, pct, nwrotep)) != 0) {
- DB_ERROR("DbEnv::memp_trickle", err, error_policy());
- return (err);
- }
- return (err);
-}
+DB_MPOOLFILE_METHOD(get, (db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep),
+ (mpf, pgnoaddr, flags, pagep), DB_RETOK_MPGET)
+DB_MPOOLFILE_METHOD_VOID(last_pgno, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(open,
+ (const char *file, u_int32_t flags, int mode, size_t pagesize),
+ (mpf, file, flags, mode, pagesize), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(put, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(refcnt, (db_pgno_t *pgnoaddr), (mpf, pgnoaddr))
+DB_MPOOLFILE_METHOD(set, (void *pgaddr, u_int32_t flags),
+ (mpf, pgaddr, flags), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_clear_len, (u_int32_t len),
+ (mpf, len), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_fileid, (u_int8_t *fileid),
+ (mpf, fileid), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_ftype, (int ftype),
+ (mpf, ftype), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_lsn_offset, (int32_t offset),
+ (mpf, offset), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD(set_pgcookie, (DBT *dbt),
+ (mpf, dbt), DB_RETOK_STD)
+DB_MPOOLFILE_METHOD_VOID(set_unlink, (int ul), (mpf, ul))
+DB_MPOOLFILE_METHOD(sync, (),
+ (mpf), DB_RETOK_STD)
diff --git a/bdb/cxx/cxx_table.cpp b/bdb/cxx/cxx_table.cpp
deleted file mode 100644
index b7b335d26e9..00000000000
--- a/bdb/cxx/cxx_table.cpp
+++ /dev/null
@@ -1,808 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: cxx_table.cpp,v 11.35 2001/01/11 18:19:49 bostic Exp $";
-#endif /* not lint */
-
-#include <errno.h>
-#include <string.h>
-
-#include "db_cxx.h"
-#include "cxx_int.h"
-
-#include "db_int.h"
-#include "db_page.h"
-#include "db_ext.h"
-#include "common_ext.h"
-
-////////////////////////////////////////////////////////////////////////
-// //
-// Db //
-// //
-////////////////////////////////////////////////////////////////////////
-
-// A truism for the DbEnv object is that there is a valid
-// DB_ENV handle from the constructor until close().
-// After the close, the DB handle is invalid and
-// no operations are permitted on the Db (other than
-// destructor). Leaving the Db handle open and not
-// doing a close is generally considered an error.
-//
-// We used to allow Db objects to be closed and reopened.
-// This implied always keeping a valid DB object, and
-// coordinating the open objects between Db/DbEnv turned
-// out to be overly complicated. Now we do not allow this.
-
-Db::Db(DbEnv *env, u_int32_t flags)
-: imp_(0)
-, env_(env)
-, construct_error_(0)
-, flags_(0)
-, construct_flags_(flags)
-{
- if (env_ == 0)
- flags_ |= DB_CXX_PRIVATE_ENV;
- initialize();
-}
-
-// Note: if the user has not closed, we call _destroy_check
-// to warn against this non-safe programming practice.
-// We can't close, because the environment may already
-// be closed/destroyed.
-//
-Db::~Db()
-{
- DB *db;
-
- db = unwrap(this);
- if (db != NULL) {
- DbEnv::_destroy_check("Db", 0);
- cleanup();
- }
-}
-
-// private method to initialize during constructor.
-// initialize must create a backing DB object,
-// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
-// If there is an error, construct_error_ is set; this is examined
-// during open.
-//
-int Db::initialize()
-{
- u_int32_t cxx_flags;
- DB *db;
- int err;
- DB_ENV *cenv = unwrap(env_);
-
- cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
-
- // Create a new underlying DB object.
- // We rely on the fact that if a NULL DB_ENV* is given,
- // one is allocated by DB.
- //
- if ((err = db_create(&db, cenv,
- construct_flags_ & ~cxx_flags)) != 0) {
- construct_error_ = err;
- return (err);
- }
-
- // Associate the DB with this object
- imp_ = wrap(db);
- db->cj_internal = this;
-
- // Create a new DbEnv from a DB_ENV* if it was created locally.
- // It is deleted in Db::close().
- //
- if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
- env_ = new DbEnv(db->dbenv, cxx_flags);
-
- return (0);
-}
-
-// private method to cleanup after destructor or during close.
-// If the environment was created by this Db object, we optionally
-// delete it, or return it so the caller can delete it after
-// last use.
-//
-void Db::cleanup()
-{
- DB *db = unwrap(this);
-
- if (db != NULL) {
- // extra safety
- db->cj_internal = 0;
- imp_ = 0;
-
- // we must dispose of the DbEnv object if
- // we created it. This will be the case
- // if a NULL DbEnv was passed into the constructor.
- // The underlying DB_ENV object will be inaccessible
- // after the close, so we must clean it up now.
- //
- if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
- env_->cleanup();
- delete env_;
- env_ = 0;
- }
- }
- construct_error_ = 0;
-}
-
-// Return a tristate value corresponding to whether we should
-// throw exceptions on errors:
-// ON_ERROR_RETURN
-// ON_ERROR_THROW
-// ON_ERROR_UNKNOWN
-//
-int Db::error_policy()
-{
- if (env_ != NULL)
- return (env_->error_policy());
- else {
- // If the env_ is null, that means that the user
- // did not attach an environment, so the correct error
- // policy can be deduced from constructor flags
- // for this Db.
- //
- if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
- return (ON_ERROR_RETURN);
- }
- else {
- return (ON_ERROR_THROW);
- }
- }
-}
-
-int Db::close(u_int32_t flags)
-{
- DB *db = unwrap(this);
- int err;
-
- // after a DB->close (no matter if success or failure),
- // the underlying DB object must not be accessed,
- // so we clean up in advance.
- //
- cleanup();
-
- // It's safe to throw an error after the close,
- // since our error mechanism does not peer into
- // the DB* structures.
- //
- if ((err = db->close(db, flags)) != 0 && err != DB_INCOMPLETE)
- DB_ERROR("Db::close", err, error_policy());
-
- return (err);
-}
-
-int Db::cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags)
-{
- DB *db = unwrap(this);
- DBC *dbc = 0;
- int err;
-
- if ((err = db->cursor(db, unwrap(txnid), &dbc, flags)) != 0) {
- DB_ERROR("Db::cursor", err, error_policy());
- return (err);
- }
-
- // The following cast implies that Dbc can be no larger than DBC
- *cursorp = (Dbc*)dbc;
- return (0);
-}
-
-int Db::del(DbTxn *txnid, Dbt *key, u_int32_t flags)
-{
- DB *db = unwrap(this);
- int err;
-
- if ((err = db->del(db, unwrap(txnid), key, flags)) != 0) {
- // DB_NOTFOUND is a "normal" return, so should not be
- // thrown as an error
- //
- if (err != DB_NOTFOUND) {
- DB_ERROR("Db::del", err, error_policy());
- return (err);
- }
- }
- return (err);
-}
-
-void Db::err(int error, const char *format, ...)
-{
- va_list args;
- DB *db = unwrap(this);
-
- va_start(args, format);
- __db_real_err(db->dbenv, error, 1, 1, format, args);
- va_end(args);
-}
-
-void Db::errx(const char *format, ...)
-{
- va_list args;
- DB *db = unwrap(this);
-
- va_start(args, format);
- __db_real_err(db->dbenv, 0, 0, 1, format, args);
- va_end(args);
-}
-
-int Db::fd(int *fdp)
-{
- DB *db = unwrap(this);
- int err;
-
- if ((err = db->fd(db, fdp)) != 0) {
- DB_ERROR("Db::fd", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-void _db_feedback_intercept_c(DB *db, int opcode, int pct)
-{
- Db::_feedback_intercept(db, opcode, pct);
-}
-
-//static
-void Db::_feedback_intercept(DB *db, int opcode, int pct)
-{
- if (db == 0) {
- DB_ERROR("Db::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
- return;
- }
- Db *cxxdb = (Db *)db->cj_internal;
- if (cxxdb == 0) {
- DB_ERROR("Db::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
- return;
- }
- if (cxxdb->feedback_callback_ == 0) {
- DB_ERROR("Db::feedback_callback", EINVAL, cxxdb->error_policy());
- return;
- }
- (*cxxdb->feedback_callback_)(cxxdb, opcode, pct);
-}
-
-int Db::set_feedback(void (*arg)(Db *, int, int))
-{
- DB *db = unwrap(this);
-
- feedback_callback_ = arg;
-
- return ((*(db->set_feedback))(db, _db_feedback_intercept_c));
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-int _db_append_recno_intercept_c(DB *db, DBT *data, db_recno_t recno)
-{
- return (Db::_append_recno_intercept(db, data, recno));
-}
-
-//static
-int Db::_append_recno_intercept(DB *db, DBT *data, db_recno_t recno)
-{
- int err;
-
- if (db == 0) {
- DB_ERROR("Db::append_recno_callback", EINVAL, ON_ERROR_UNKNOWN);
- return (EINVAL);
- }
- Db *cxxdb = (Db *)db->cj_internal;
- if (cxxdb == 0) {
- DB_ERROR("Db::append_recno_callback", EINVAL, ON_ERROR_UNKNOWN);
- return (EINVAL);
- }
- if (cxxdb->append_recno_callback_ == 0) {
- DB_ERROR("Db::append_recno_callback", EINVAL, cxxdb->error_policy());
- return (EINVAL);
- }
-
- // making these copies is slow but portable.
- // Another alternative is to cast the DBT* manufactured
- // by the C layer to a Dbt*. It 'should be' safe since
- // Dbt is a thin shell over DBT, adding no extra data,
- // but is nonportable, and could lead to errors if anything
- // were added to the Dbt class.
- //
- Dbt cxxdbt;
- memcpy((DBT *)&cxxdbt, data, sizeof(DBT));
- err = (*cxxdb->append_recno_callback_)(cxxdb, &cxxdbt, recno);
- memcpy(data, (DBT *)&cxxdbt, sizeof(DBT));
- return (err);
-}
-
-int Db::set_append_recno(int (*arg)(Db *, Dbt *, db_recno_t))
-{
- DB *db = unwrap(this);
-
- append_recno_callback_ = arg;
-
- return ((*(db->set_append_recno))(db, _db_append_recno_intercept_c));
-}
-
-int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
-{
- DB *db = unwrap(this);
- int err;
-
- if ((err = db->get(db, unwrap(txnid), key, value, flags)) != 0) {
- // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
- // so should not be thrown as an error
- //
- if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
- DB_ERROR("Db::get", err, error_policy());
- return (err);
- }
- }
- return (err);
-}
-
-int Db::get_byteswapped() const
-{
- DB *db = (DB *)unwrapConst(this);
- return (db->get_byteswapped(db));
-}
-
-DBTYPE Db::get_type() const
-{
- DB *db = (DB *)unwrapConst(this);
- return ((DBTYPE)db->get_type(db));
-}
-
-int Db::join(Dbc **curslist, Dbc **cursorp, u_int32_t flags)
-{
- // Dbc is a "compatible" subclass of DBC -
- // that is, no virtual functions or even extra data members,
- // so this cast, although technically non-portable,
- // "should" always be okay.
- //
- DBC **list = (DBC **)(curslist);
- DB *db = unwrap(this);
- DBC *dbc = 0;
- int err;
-
- if ((err = db->join(db, list, &dbc, flags)) != 0) {
- DB_ERROR("Db::join_cursor", err, error_policy());
- return (err);
- }
- *cursorp = (Dbc*)dbc;
- return (0);
-}
-
-int Db::key_range(DbTxn *txnid, Dbt *key,
- DB_KEY_RANGE *results, u_int32_t flags)
-{
- DB *db = unwrap(this);
- int err;
-
- if ((err = db->key_range(db, unwrap(txnid), key,
- results, flags)) != 0) {
- DB_ERROR("Db::key_range", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-// If an error occurred during the constructor, report it now.
-// Otherwise, call the underlying DB->open method.
-//
-int Db::open(const char *file, const char *database,
- DBTYPE type, u_int32_t flags, int mode)
-{
- int err;
- DB *db = unwrap(this);
-
- if ((err = construct_error_) != 0)
- DB_ERROR("Db::open", construct_error_, error_policy());
- else if ((err = db->open(db, file, database, type, flags, mode)) != 0)
- DB_ERROR("Db::open", err, error_policy());
-
- return (err);
-}
-
-int Db::put(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
-{
- int err;
- DB *db = unwrap(this);
-
- if ((err = db->put(db, unwrap(txnid), key, value, flags)) != 0) {
-
- // DB_KEYEXIST is a "normal" return, so should not be
- // thrown as an error
- //
- if (err != DB_KEYEXIST) {
- DB_ERROR("Db::put", err, error_policy());
- return (err);
- }
- }
- return (err);
-}
-
-int Db::rename(const char *file, const char *database,
- const char *newname, u_int32_t flags)
-{
- int err = 0;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::rename", EINVAL, error_policy());
- return (EINVAL);
- }
-
- // after a DB->rename (no matter if success or failure),
- // the underlying DB object must not be accessed,
- // so we clean up in advance.
- //
- cleanup();
-
- if ((err = db->rename(db, file, database, newname, flags)) != 0) {
- DB_ERROR("Db::rename", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int Db::remove(const char *file, const char *database, u_int32_t flags)
-{
- int err = 0;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::remove", EINVAL, error_policy());
- return (EINVAL);
- }
-
- // after a DB->remove (no matter if success or failure),
- // the underlying DB object must not be accessed,
- // so we clean up in advance.
- //
- cleanup();
-
- if ((err = db->remove(db, file, database, flags)) != 0)
- DB_ERROR("Db::remove", err, error_policy());
-
- return (err);
-}
-
-int Db::stat(void *sp, db_malloc_fcn_type db_malloc_fcn, u_int32_t flags)
-{
- int err;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::stat", EINVAL, error_policy());
- return (EINVAL);
- }
- if ((err = db->stat(db, sp, db_malloc_fcn, flags)) != 0) {
- DB_ERROR("Db::stat", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-int Db::sync(u_int32_t flags)
-{
- int err;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::sync", EINVAL, error_policy());
- return (EINVAL);
- }
- if ((err = db->sync(db, flags)) != 0 && err != DB_INCOMPLETE) {
- DB_ERROR("Db::sync", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-int Db::upgrade(const char *name, u_int32_t flags)
-{
- int err;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::upgrade", EINVAL, error_policy());
- return (EINVAL);
- }
- if ((err = db->upgrade(db, name, flags)) != 0) {
- DB_ERROR("Db::upgrade", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-static int _verify_callback_cxx(void *handle, const void *str_arg)
-{
- char *str;
- ostream *out;
-
- str = (char *)str_arg;
- out = (ostream *)handle;
-
- (*out) << str;
- if (out->fail())
- return (EIO);
-
- return (0);
-}
-
-// This is a 'glue' function declared as extern "C" so it will
-// be compatible with picky compilers that do not allow mixing
-// of function pointers to 'C' functions with function pointers
-// to C++ functions.
-//
-extern "C"
-int _verify_callback_c(void *handle, const void *str_arg)
-{
- return (_verify_callback_cxx(handle, str_arg));
-}
-
-int Db::verify(const char *name, const char *subdb,
- ostream *ostr, u_int32_t flags)
-{
- int err;
- DB *db = unwrap(this);
-
- if (!db) {
- DB_ERROR("Db::verify", EINVAL, error_policy());
- return (EINVAL);
- }
- if ((err = __db_verify_internal(db, name, subdb, ostr,
- _verify_callback_c, flags)) != 0) {
- DB_ERROR("Db::verify", err, error_policy());
- return (err);
- }
- return (0);
-}
-
-// This is a variant of the DB_WO_ACCESS macro to define a simple set_
-// method calling the underlying C method, but unlike a simple
-// set method, it may return an error or raise an exception.
-// Note this macro expects that input _argspec is an argument
-// list element (e.g. "char *arg") defined in terms of "arg".
-//
-#define DB_DB_ACCESS(_name, _argspec) \
-\
-int Db::set_##_name(_argspec) \
-{ \
- int ret; \
- DB *db = unwrap(this); \
- \
- if ((ret = (*(db->set_##_name))(db, arg)) != 0) { \
- DB_ERROR("Db::set_" # _name, ret, error_policy()); \
- } \
- return (ret); \
-}
-
-#define DB_DB_ACCESS_NORET(_name, _argspec) \
- \
-void Db::set_##_name(_argspec) \
-{ \
- DB *db = unwrap(this); \
- \
- (*(db->set_##_name))(db, arg); \
- return; \
-}
-
-DB_DB_ACCESS(bt_compare, bt_compare_fcn_type arg)
-DB_DB_ACCESS(bt_maxkey, u_int32_t arg)
-DB_DB_ACCESS(bt_minkey, u_int32_t arg)
-DB_DB_ACCESS(bt_prefix, bt_prefix_fcn_type arg)
-DB_DB_ACCESS(dup_compare, dup_compare_fcn_type arg)
-DB_DB_ACCESS_NORET(errfile, FILE *arg)
-DB_DB_ACCESS_NORET(errpfx, const char *arg)
-DB_DB_ACCESS(flags, u_int32_t arg)
-DB_DB_ACCESS(h_ffactor, u_int32_t arg)
-DB_DB_ACCESS(h_hash, h_hash_fcn_type arg)
-DB_DB_ACCESS(h_nelem, u_int32_t arg)
-DB_DB_ACCESS(lorder, int arg)
-DB_DB_ACCESS(malloc, db_malloc_fcn_type arg)
-DB_DB_ACCESS(pagesize, u_int32_t arg)
-DB_DB_ACCESS(realloc, db_realloc_fcn_type arg)
-DB_DB_ACCESS(re_delim, int arg)
-DB_DB_ACCESS(re_len, u_int32_t arg)
-DB_DB_ACCESS(re_pad, int arg)
-DB_DB_ACCESS(re_source, char *arg)
-DB_DB_ACCESS(q_extentsize, u_int32_t arg)
-
-// Here are the set methods that don't fit the above mold.
-//
-
-void Db::set_errcall(void (*arg)(const char *, char *))
-{
- env_->set_errcall(arg);
-}
-
-int Db::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
-{
- int ret;
- DB *db = unwrap(this);
-
- if ((ret = (*(db->set_cachesize))(db, gbytes, bytes, ncache)) != 0) {
- DB_ERROR("Db::set_cachesize", ret, error_policy());
- }
- return (ret);
-}
-
-int Db::set_paniccall(void (*callback)(DbEnv *, int))
-{
- return (env_->set_paniccall(callback));
-}
-
-void Db::set_error_stream(ostream *error_stream)
-{
- env_->set_error_stream(error_stream);
-}
-
-////////////////////////////////////////////////////////////////////////
-// //
-// Dbc //
-// //
-////////////////////////////////////////////////////////////////////////
-
-// It's private, and should never be called, but VC4.0 needs it resolved
-//
-Dbc::~Dbc()
-{
-}
-
-int Dbc::close()
-{
- DBC *cursor = this;
- int err;
-
- if ((err = cursor->c_close(cursor)) != 0) {
- DB_ERROR("Db::close", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- return (0);
-}
-
-int Dbc::count(db_recno_t *countp, u_int32_t flags_arg)
-{
- DBC *cursor = this;
- int err;
-
- if ((err = cursor->c_count(cursor, countp, flags_arg)) != 0) {
- DB_ERROR("Db::count", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- return (0);
-}
-
-int Dbc::del(u_int32_t flags_arg)
-{
- DBC *cursor = this;
- int err;
-
- if ((err = cursor->c_del(cursor, flags_arg)) != 0) {
-
- // DB_KEYEMPTY is a "normal" return, so should not be
- // thrown as an error
- //
- if (err != DB_KEYEMPTY) {
- DB_ERROR("Db::del", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- }
- return (err);
-}
-
-int Dbc::dup(Dbc** cursorp, u_int32_t flags_arg)
-{
- DBC *cursor = this;
- DBC *new_cursor = 0;
- int err;
-
- if ((err = cursor->c_dup(cursor, &new_cursor, flags_arg)) != 0) {
- DB_ERROR("Db::dup", err, ON_ERROR_UNKNOWN);
- return (err);
- }
-
- // The following cast implies that Dbc can be no larger than DBC
- *cursorp = (Dbc*)new_cursor;
- return (0);
-}
-
-int Dbc::get(Dbt* key, Dbt *data, u_int32_t flags_arg)
-{
- DBC *cursor = this;
- int err;
-
- if ((err = cursor->c_get(cursor, key, data, flags_arg)) != 0) {
-
- // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
- // so should not be thrown as an error
- //
- if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
- DB_ERROR("Db::get", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- }
- return (err);
-}
-
-int Dbc::put(Dbt* key, Dbt *data, u_int32_t flags_arg)
-{
- DBC *cursor = this;
- int err;
-
- if ((err = cursor->c_put(cursor, key, data, flags_arg)) != 0) {
-
- // DB_KEYEXIST is a "normal" return, so should not be
- // thrown as an error
- //
- if (err != DB_KEYEXIST) {
- DB_ERROR("Db::put", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- }
- return (err);
-}
-
-////////////////////////////////////////////////////////////////////////
-// //
-// Dbt //
-// //
-////////////////////////////////////////////////////////////////////////
-
-Dbt::Dbt()
-{
- DBT *dbt = this;
- memset(dbt, 0, sizeof(DBT));
-}
-
-Dbt::Dbt(void *data_arg, size_t size_arg)
-{
- DBT *dbt = this;
- memset(dbt, 0, sizeof(DBT));
- set_data(data_arg);
- set_size(size_arg);
-}
-
-Dbt::~Dbt()
-{
-}
-
-Dbt::Dbt(const Dbt &that)
-{
- const DBT *from = &that;
- DBT *to = this;
- memcpy(to, from, sizeof(DBT));
-}
-
-Dbt &Dbt::operator = (const Dbt &that)
-{
- if (this != &that) {
- const DBT *from = &that;
- DBT *to = this;
- memcpy(to, from, sizeof(DBT));
- }
- return (*this);
-}
-
-DB_RW_ACCESS(Dbt, void *, data, data)
-DB_RW_ACCESS(Dbt, u_int32_t, size, size)
-DB_RW_ACCESS(Dbt, u_int32_t, ulen, ulen)
-DB_RW_ACCESS(Dbt, u_int32_t, dlen, dlen)
-DB_RW_ACCESS(Dbt, u_int32_t, doff, doff)
-DB_RW_ACCESS(Dbt, u_int32_t, flags, flags)
diff --git a/bdb/cxx/cxx_txn.cpp b/bdb/cxx/cxx_txn.cpp
index 0abae982644..b04077c0f5b 100644
--- a/bdb/cxx/cxx_txn.cpp
+++ b/bdb/cxx/cxx_txn.cpp
@@ -1,136 +1,81 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: cxx_txn.cpp,v 11.13 2000/12/21 16:24:33 dda Exp $";
+static const char revid[] = "$Id: cxx_txn.cpp,v 11.27 2002/07/20 13:50:11 dda Exp $";
#endif /* not lint */
#include <errno.h>
#include "db_cxx.h"
-#include "cxx_int.h"
-
-////////////////////////////////////////////////////////////////////////
-// //
-// DbTxnMgr //
-// //
-////////////////////////////////////////////////////////////////////////
-
-int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
-{
- int err;
- DB_ENV *env = unwrap(this);
- DB_TXN *txn;
-
- if ((err = ::txn_begin(env, unwrap(pid), &txn, flags)) != 0) {
- DB_ERROR("DbEnv::txn_begin", err, error_policy());
- return (err);
- }
- DbTxn *result = new DbTxn();
- result->imp_ = wrap(txn);
- *tid = result;
- return (err);
-}
-
-int DbEnv::txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags)
-{
- int err;
- DB_ENV *env = unwrap(this);
- if ((err = ::txn_checkpoint(env, kbyte, min, flags)) != 0 &&
- err != DB_INCOMPLETE) {
- DB_ERROR("DbEnv::txn_checkpoint", err, error_policy());
- return (err);
- }
- return (err);
+#include "dbinc/cxx_int.h"
+
+#include "db_int.h"
+
+// Helper macro for simple methods that pass through to the
+// underlying C method. It may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g., "char *arg") and that _arglist is the arguments
+// that should be passed through to the C method (e.g., "(db, arg)")
+//
+#define DBTXN_METHOD(_name, _delete, _argspec, _arglist) \
+int DbTxn::_name _argspec \
+{ \
+ int ret; \
+ DB_TXN *txn = unwrap(this); \
+ \
+ ret = txn->_name _arglist; \
+ /* Weird, but safe if we don't access this again. */ \
+ if (_delete) \
+ delete this; \
+ if (!DB_RETOK_STD(ret)) \
+ DB_ERROR("DbTxn::" # _name, ret, ON_ERROR_UNKNOWN); \
+ return (ret); \
}
-int DbEnv::txn_stat(DB_TXN_STAT **statp, db_malloc_fcn_type db_malloc_fcn)
-{
- int err;
- DB_ENV *env = unwrap(this);
- if ((err = ::txn_stat(env, statp, db_malloc_fcn)) != 0) {
- DB_ERROR("DbEnv::txn_stat", err, error_policy());
- return (err);
- }
- return (err);
-}
-
-////////////////////////////////////////////////////////////////////////
-// //
-// DbTxn //
-// //
-////////////////////////////////////////////////////////////////////////
-
+// private constructor, never called but needed by some C++ linkers
DbTxn::DbTxn()
: imp_(0)
{
}
-DbTxn::~DbTxn()
+DbTxn::DbTxn(DB_TXN *txn)
+: imp_(wrap(txn))
{
+ txn->api_internal = this;
}
-int DbTxn::abort()
+DbTxn::~DbTxn()
{
- int err;
- DB_TXN *txn;
-
- txn = unwrap(this);
- err = txn_abort(txn);
-
- // It may seem weird to delete this, but is legal as long
- // as we don't access any of its data before returning.
- //
- delete this;
-
- if (err != 0)
- DB_ERROR("DbTxn::abort", err, ON_ERROR_UNKNOWN);
-
- return (err);
}
-int DbTxn::commit(u_int32_t flags)
-{
- int err;
- DB_TXN *txn;
-
- txn = unwrap(this);
- err = txn_commit(txn, flags);
-
- // It may seem weird to delete this, but is legal as long
- // as we don't access any of its data before returning.
- //
- delete this;
-
- if (err != 0)
- DB_ERROR("DbTxn::commit", err, ON_ERROR_UNKNOWN);
-
- return (err);
-}
+DBTXN_METHOD(abort, 1, (), (txn))
+DBTXN_METHOD(commit, 1, (u_int32_t flags), (txn, flags))
+DBTXN_METHOD(discard, 1, (u_int32_t flags), (txn, flags))
u_int32_t DbTxn::id()
{
DB_TXN *txn;
txn = unwrap(this);
- return (txn_id(txn)); // no error
+ return (txn->id(txn)); // no error
}
-int DbTxn::prepare()
-{
- int err;
- DB_TXN *txn;
+DBTXN_METHOD(prepare, 0, (u_int8_t *gid), (txn, gid))
+DBTXN_METHOD(set_timeout, 0, (db_timeout_t timeout, u_int32_t flags),
+ (txn, timeout, flags))
- txn = unwrap(this);
- if ((err = txn_prepare(txn)) != 0) {
- DB_ERROR("DbTxn::prepare", err, ON_ERROR_UNKNOWN);
- return (err);
- }
- return (0);
+// static method
+DbTxn *DbTxn::wrap_DB_TXN(DB_TXN *txn)
+{
+ DbTxn *wrapped_txn = get_DbTxn(txn);
+ if (wrapped_txn == NULL)
+ wrapped_txn = new DbTxn(txn);
+ return wrapped_txn;
}
diff --git a/bdb/cxx/namemap.txt b/bdb/cxx/namemap.txt
deleted file mode 100644
index 75207718577..00000000000
--- a/bdb/cxx/namemap.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-$Id: namemap.txt,v 10.4 2000/02/19 20:57:54 bostic Exp $
-
-The bulk of DB provides for wrapper classes and appropriately named methods
-that call into DB. For the most part, there is a straightforward mapping of
-names. For the purposes of referencing documentation, this chart shows the
-underlying C structure name for each C++ class. In some cases, using the
-given C prefix with a C++ method name gives the underlying C function name.
-For example, DbMpoolFile::close() is implemented by memp_fclose().
-
-C++ C C prefix
-
-Db DB
-DbEnv DB_ENV
-Dbc DBC
-DbException none
-DbInfo DB_INFO
-DbLock DB_LOCK lock_
-DbLsn DB_LSN
-DbMpoolFile DB_MPOOL_FILE memp_
-Dbt DBT
-DbTxn DB_TXN txn_
diff --git a/bdb/db/Design.fileop b/bdb/db/Design.fileop
deleted file mode 100644
index 187f1ffaf22..00000000000
--- a/bdb/db/Design.fileop
+++ /dev/null
@@ -1,452 +0,0 @@
-# $Id: Design.fileop,v 11.4 2000/02/19 20:57:54 bostic Exp $
-
-The design of file operation recovery.
-
-Keith has asked me to write up notes on our current status of database
-create and delete and recovery, why it's so hard, and how we've violated
-all the cornerstone assumptions on which our recovery framework is based.
-
-I am including two documents at the end of this one. The first is the
-initial design of the recoverability of file create and delete (there is
-no talk of subdatabases there, because we didn't think we'd have to do
-anything special there). I will annotate this document on where things
-changed.
-
-The second is the design of recd007 which is supposed to test our ability
-to recover these operations regardless of where one crashes. This test
-is fundamentally different from our other recovery tests in the following
-manner. Normally, the application controls transaction boundaries.
-Therefore, we can perform an operation and then decide whether to commit
-or abort it. In the normal recovery tests, we force the database into
-each of the four possible states from a recovery perspective:
-
- database is pre-op, undo (do nothing)
- database is pre-op, redo
- database is post-op, undo
- database is post-op, redo (do nothing)
-
-By copying databases at various points and initiating txn_commit and abort
-appropriately, we can make all these things happen. Notice that the one
-case we don't handle is where page A is in one state (e.g., pre-op) and
-page B is in another state (e.g., post-op). I will argue that these don't
-matter because each page is recovered independently. If anyone can poke
-holes in this, I'm interested.
-
-The problem with create/delete recovery testing is that the transaction
-is begun and ended all inside the library. Therefore, there is never any
-point (outside the library) where we can copy files and or initiate
-abort/commit. In order to still put the recovery code through its paces,
-Sue designed an infrastructure that lets you tell the library where to
-make copies of things and where to suddenly inject errors so that the
-transaction gets aborted. This level of detail allows us to push the
-create/delete recovery code through just about every recovery path
-possible (although I'm sure Mike will tell me I'm wrong when he starts to
-run code coverage tools).
-
-OK, so that's all preamble and a brief discussion of the documents I'm
-enclosing.
-
-Why was this so hard and painful and why is the code so Q@#$!% complicated?
-The following is a discussion/explanation, but to the best of my knowledge,
-the structure we have in place now works. The key question we need to be
-asking is, "Does this need to have to be so complex or should we redesign
-portions to simplify it?" At this point, there is no obvious way to simplify
-it in my book, but I may be having difficulty seeing this because my mind is
-too polluted at this point.
-
-Our overall strategy for recovery is that we do write-ahead logging,
-that is we log an operation and make sure it is on disk before any
-data corresponding to the data that log record describes is on disk.
-Typically we use log sequence numbers (LSNs) to mark the data so that
-during recovery, we can look at the data and determine if it is in a
-state before a particular log record or after a particular log record.
-
-In the good old days, opens were not transaction protected, so we could
-do regular old opens during recovery and if the file existed, we opened
-it and if it didn't (or appeared corrupt), we didn't and treated it like
-a missing file. As will be discussed below in detail, our states are much
-more complicated and recovery can't make such simplistic assumptions.
-
-Also, since we are now dealing with file system operations, we have less
-control about when they actually happen and what the state of the system
-can be. That is, we have to write create log records synchronously, because
-the create/open system call may force a newly created (0-length) file to
-disk. This file has to now be identified as being in the "being-created"
-state.
-
-A. We used to make a number of assumptions during recovery:
-
-1. We could call db_open at any time and one of three things would happen:
- a) the file would be opened cleanly
- b) the file would not exist
- c) we would encounter an error while opening the file
-
-Case a posed no difficulty.
-In Case b, we simply spit out a warning that a file was missing and then
- ignored all subsequent operations to that file.
-In Case c, we reported a fatal error.
-
-2. We can always generate a warning if a file is missing.
-
-3. We never encounter NULL file names in the log.
-
-B. We also made some assumptions in the main-line library:
-
-1. If you try to open a file and it exists but is 0-length, then
-someone else is trying to open it.
-
-2. You can write pages anywhere in a file and any non-existent pages
-are 0-filled. [This breaks on Windows.]
-
-3. If you have proper permissions then you can always evict pages from
-the buffer pool.
-
-4. During open, we can close the master database handle as soon as
-we're done with it since all the rest of the activity will take place
-on the subdatabase handle.
-
-In our brave new world, most of these assumptions are no longer valid.
-Let's address them one at a time.
-
-A.1 We could call db_open at any time and one of three things would happen:
- a) the file would be opened cleanly
- b) the file would not exist
- c) we would encounter an error while opening the file
-There are now additional states. Since we are trying to make file
-operations recoverable, you can now die in the middle of such an
-operation and we have to be able to pick up the pieces. What this
-now means is that:
-
- * a 0-length file can be an indication of a create in-progress
- * you can have a meta-data page but no root page (of a btree)
- * if a file doesn't exist, it could mean that it was just about
- to be created and needs to be rolled forward.
- * if you encounter an error in a file (e.g., the meta-data page
- is all 0's) you could still be in mid-open.
-
-I have now made this all work, but it required significant changes to the
-db_open code and error handling and this is the sort of change that makes
-everyone nervous.
-
-A.2. We can always generate a warning if a file is missing.
-
-Now that we have a delete file method in the API, we need to make sure
-that we do not generate warning messages for files that don't exist if
-we see that they were explicitly deleted.
-
-This means that we need to save state during recovery, determine which
-files were missing and were not being recreated and were not deleted and
-only complain about those.
-
-A.3. We never encounter NULL file names in the log.
-
-Now that we allow tranaction protection on memory-resident files, we write
-log messages for files with NULL file names. This means that our assumption
-of always being able to call "db_open" on any log_register OPEN message found
-in the log is no longer valid.
-
-B.1. If you try to open a file and it exists but is 0-length, then
-someone else is trying to open it.
-
-As discussed for A.1, this is no longer true. It may be instead that you
-are in the process of recovering a create.
-
-B.2. You can write pages anywhere in a file and any non-existent pages
-are 0-filled.
-
-It turns out that this is not true on Windows. This means that places
-we do group allocation (hash) must explicitly allocate each page, because
-we can't count on recognizing the uninitialized pages later.
-
-B.3. If you have proper permissions then you can always evict pages from
-the buffer pool.
-
-In the brave new world though, files can be deleted and they may
-have pages in the mpool. If you later try to evict these, you
-discover that the file doesn't exist. We'd get here when we had
-to dirty pages during a remove operation.
-
-B.4. You can close files any time you want.
-
-However, if the file takes part in the open/remove transaction,
-then we had better not close it until after the transaction
-commits/aborts, because we need to be able to get our hands on the
-dbp and the open happened in a different transaction.
-
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-Design for recovering file create and delete in the presence of subdatabases.
-
-Assumptions:
- Remove the O_TRUNCATE flag.
- Single-thread all open/create/delete operations.
- (Well, almost all; we'll optimize opens without DB_CREATE set.)
- The reasoning for this is that with two simultaneous
- open/creaters, during recovery, we cannot identify which
- transaction successfully created files and therefore cannot
- recovery correctly.
- File system creates/deletes are synchronous
- Once the file is open, subdatabase creates look like regular
- get/put operations and a metadata page creation.
-
-There are 4 cases to deal with:
- 1. Open/create file
- 2. Open/create subdatabase
- 3. Delete
- 4. Recovery records
-
- __db_fileopen_recover
- __db_metapage_recover
- __db_delete_recover
- existing c_put and c_get routines for subdatabase creation
-
- Note that the open/create of the file and the open/create of the
- subdatabase need to be in the same transaction.
-
-1. Open/create (full file and subdb version)
-
-If create
- LOCK_FILEOP
- txn_begin
- log create message (open message below)
- do file system open/create
- if we did not create
- abort transaction (before going to open_only)
- if (!subdb)
- set dbp->open_txn = NULL
- else
- txn_begin a new transaction for the subdb open
-
- construct meta-data page
- log meta-data page (see metapage)
- write the meta-data page
- * It may be the case that btrees need to log both meta-data pages
- and root pages. If that is the case, I believe that we can use
- this same record and recovery routines for both
-
- txn_commit
- UNLOCK_FILEOP
-
-2. Delete
- LOCK_FILEOP
- txn_begin
- log delete message (delete message below)
- mv file __db.file.lsn
- txn_commit
- unlink __db.file.lsn
- UNLOCK_FILEOP
-
-3. Recovery Routines
-
-__db_fileopen_recover
- if (argp->name.size == 0
- done;
-
- if (redo) /* Commit */
- __os_open(argp->name, DB_OSO_CREATE, argp->mode, &fh)
- __os_closehandle(fh)
- if (undo) /* Abort */
- if (argp->name exists)
- unlink(argp->name);
-
-__db_metapage_recover
- if (redo)
- __os_open(argp->name, 0, 0, &fh)
- __os_lseek(meta data page)
- __os_write(meta data page)
- __os_closehandle(fh);
- if (undo)
- done = 0;
- if (argp->name exists)
- if (length of argp->name != 0)
- __os_open(argp->name, 0, 0, &fh)
- __os_lseek(meta data page)
- __os_read(meta data page)
- if (read succeeds && page lsn != current_lsn)
- done = 1
- __os_closehandle(fh);
- if (!done)
- unlink(argp->name)
-
-__db_delete_recover
- if (redo)
- Check if the backup file still exists and if so, delete it.
-
- if (undo)
- if (__db_appname(__db.file.lsn exists))
- mv __db_appname(__db.file.lsn) __db_appname(file)
-
-__db_metasub_recover
- /* This is like a normal recovery routine */
- Get the metadata page
- if (cmp_n && redo)
- copy the log page onto the page
- update the lsn
- make sure page gets put dirty
- else if (cmp_p && undo)
- update the lsn to the lsn in the log record
- make sure page gets put dirty
-
- if the page was modified, put it back dirty
-
-In db.src
-
-# name: filename (before call to __db_appname)
-# mode: file system mode
-BEGIN open
-DBT name DBT s
-ARG mode u_int32_t o
-END
-
-# opcode: indicate if it is a create/delete and if it is a subdatabase
-# pgsize: page size on which we're going to write the meta-data page
-# pgno: page number on which to write this meta-data page
-# page: the actual meta-data page
-# lsn: LSN of the meta-data page -- 0 for new databases, may be non-0
-# for subdatabases.
-
-BEGIN metapage
-ARG opcode u_int32_t x
-DBT name DBT s
-ARG pgno db_pgno_t d
-DBT page DBT s
-POINTER lsn DB_LSN * lu
-END
-
-# We do not need a subdatabase name here because removing a subdatabase
-# name is simply a regular bt_delete operation from the master database.
-# It will get logged normally.
-# name: filename
-BEGIN delete
-DBT name DBT s
-END
-
-# We also need to reclaim pages, but we can use the existing
-# bt_pg_alloc routines.
-
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-Testing recoverability of create/delete.
-
-These tests are unlike other tests in that they are going to
-require hooks in the library. The reason is that the create
-and delete calls are internally wrapped in a transaction, so
-that if the call returns, the transaction has already either
-commited or aborted. Using only that interface limits what
-kind of testing we can do. To match our other recovery testing
-efforts, we need to add hooks to trigger aborts at particular
-times in the create/delete path.
-
-The general recovery testing strategy is that we wish to
-execute every path through every recovery routine. That
-means that we try to:
- catch each operation in its pre-operation state
- call the recovery function with redo
- call the recovery function with undo
- catch each operation in its post-operation state
- call the recovery function with redo
- call the recovery function with undo
-
-In addition, there are a few critical points in the create and
-delete path that we want to make sure we capture.
-
-1. Test Structure
-
-The test structure should be similar to the existing recovery
-tests. We will want to have a structure in place where we
-can execute different commands:
- create a file/database
- create a file that will contain subdatabases.
- create a subdatabase
- remove a subdatabase (that contains valid data)
- remove a subdatabase (that does not contain any data)
- remove a file that used to contain subdatabases
- remove a file that contains a database
-
-The tricky part is capturing the state of the world at the
-various points in the create/delete process.
-
-The critical points in the create process are:
-
- 1. After we've logged the create, but before we've done anything.
- in db/db.c
- after the open_retry
- after the __crdel_fileopen_log call (and before we've
- called __os_open).
-
- 2. Immediately after the __os_open
-
- 3. Immediately after each __db_log_page call
- in bt_open.c
- log meta-data page
- log root page
- in hash.c
- log meta-data page
-
- 4. With respect to the log records above, shortly after each
- log write is an memp_fput. We need to do a sync after
- each memp_fput and trigger a point after that sync.
-
-The critical points in the remove process are:
-
- 1. Right after the crdel_delete_log in db/db.c
-
- 2. Right after the __os_rename call (below the crdel_delete_log)
-
- 3. After the __db_remove_callback call.
-
-I believe that there are the places where we'll need some sort of hook.
-
-2. Adding hooks to the library.
-
-The hooks need two components. One component is to capture the state of
-the database at the hook point and the other is to trigger a txn_abort at
-the hook point. The second part is fairly trivial.
-
-The first part requires more thought. Let me explain what we do in a
-"normal" recovery test. In a normal recovery test, we save an intial
-copy of the database (this copy is called init). Then we execute one
-or more operations. Then, right before the commit/abort, we sync the
-file, and save another copy (the afterop copy). Finally, we call txn_commit
-or txn_abort, sync the file again, and save the database one last time (the
-final copy).
-
-Then we run recovery. The first time, this should be a no-op, because
-we've either committed the transaction and are checking to redo it or
-we aborted the transaction, undid it on the abort and are checking to
-undo it again.
-
-We then run recovery again on whatever database will force us through
-the path that requires work. In the commit case, this means we start
-with the init copy of the database and run recovery. This pushes us
-through all the redo paths. In the abort case, we start with the afterop
-copy which pushes us through all the undo cases.
-
-In some sense, we're asking the create/delete test to be more exhaustive
-by defining all the trigger points, but I think that's the correct thing
-to do, since the create/delete is not initiated by a user transaction.
-
-So, what do we have to do at the hook points?
- 1. sync the file to disk.
- 2. save the file itself
- 3. save any files named __db_backup_name(name, &backup_name, lsn)
- Since we may not know the right lsns, I think we should save
- every file of the form __db.name.0xNNNNNNNN.0xNNNNNNNN into
- some temporary files from which we can restore it to run
- recovery.
-
-3. Putting it all together
-
-So, the three pieces are writing the test structure, putting in the hooks
-and then writing the recovery portions so that we restore the right thing
-that the hooks saved in order to initiate recovery.
-
-Some of the technical issues that need to be solved are:
- How does the hook code become active (i.e., we don't
- want it in there normally, but it's got to be
- there when you configure for testing)?
- How do you (the test) tell the library that you want a
- particular hook to abort?
- How do you (the test) tell the library that you want the
- hook code doing its copies (do we really want
- *every* test doing these copies during testing?
- Maybe it's not a big deal, but maybe it is; we
- should at least think about it).
diff --git a/bdb/db/crdel.src b/bdb/db/crdel.src
index 17c061d6887..d89fa7a0382 100644
--- a/bdb/db/crdel.src
+++ b/bdb/db/crdel.src
@@ -1,13 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: crdel.src,v 11.12 2000/12/12 17:41:48 bostic Exp $
+ * $Id: crdel.src,v 11.24 2002/04/17 19:02:57 krinsky Exp $
*/
-PREFIX crdel
+PREFIX __crdel
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -15,30 +16,20 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
- * Fileopen -- log a potential file create operation
- *
- * name: filename
- * subname: sub database name
- * mode: file system mode
- */
-BEGIN fileopen 141
-DBT name DBT s
-ARG mode u_int32_t o
-END
-
-/*
* Metasub: log the creation of a subdatabase meta data page.
*
* fileid: identifies the file being acted upon.
@@ -47,57 +38,9 @@ END
* lsn: lsn of the page.
*/
BEGIN metasub 142
-ARG fileid int32_t ld
-ARG pgno db_pgno_t d
-DBT page DBT s
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+PGDBT page DBT s
POINTER lsn DB_LSN * lu
END
-/*
- * Metapage: log the creation of a meta data page for a new file.
- *
- * fileid: identifies the file being acted upon.
- * name: file containing the page.
- * pgno: page number on which to write this meta-data page
- * page: the actual meta-data page
- */
-BEGIN metapage 143
-ARG fileid int32_t ld
-DBT name DBT s
-ARG pgno db_pgno_t d
-DBT page DBT s
-END
-
-/*
- * Delete: remove a file.
- * Note that we don't need a special log record for subdatabase
- * removes, because we use normal btree operations to remove them.
- *
- * name: name of the file being removed (relative to DBHOME).
- */
-DEPRECATED old_delete 144
-DBT name DBT s
-END
-
-/*
- * Rename: rename a file
- * We do not need this for subdatabases
- *
- * name: name of the file being removed (relative to DBHOME).
- */
-BEGIN rename 145
-ARG fileid int32_t ld
-DBT name DBT s
-DBT newname DBT s
-END
-/*
- * Delete: remove a file.
- * Note that we don't need a special log record for subdatabase
- * removes, because we use normal btree operations to remove them.
- *
- * name: name of the file being removed (relative to DBHOME).
- */
-BEGIN delete 146
-ARG fileid int32_t ld
-DBT name DBT s
-END
diff --git a/bdb/db/crdel_rec.c b/bdb/db/crdel_rec.c
index 495b92a0ad7..542a0c358dd 100644
--- a/bdb/db/crdel_rec.c
+++ b/bdb/db/crdel_rec.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: crdel_rec.c,v 11.43 2000/12/13 08:06:34 krinsky Exp $";
+static const char revid[] = "$Id: crdel_rec.c,v 11.64 2002/08/14 20:27:34 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,112 +18,9 @@ static const char revid[] = "$Id: crdel_rec.c,v 11.43 2000/12/13 08:06:34 krinsk
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "log.h"
-#include "hash.h"
-#include "mp.h"
-#include "db_dispatch.h"
-
-/*
- * __crdel_fileopen_recover --
- * Recovery function for fileopen.
- *
- * PUBLIC: int __crdel_fileopen_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__crdel_fileopen_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __crdel_fileopen_args *argp;
- DBMETA ondisk;
- DB_FH fh;
- size_t nr;
- int do_unlink, ret;
- u_int32_t b, mb, io;
- char *real_name;
-
- COMPQUIET(info, NULL);
-
- real_name = NULL;
- REC_PRINT(__crdel_fileopen_print);
-
- if ((ret = __crdel_fileopen_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
- /*
- * If this is an in-memory database, then the name is going to
- * be NULL, which looks like a 0-length name in recovery.
- */
- if (argp->name.size == 0)
- goto done;
-
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (DB_REDO(op)) {
- /*
- * The create commited, so we need to make sure that the file
- * exists. A simple open should suffice.
- */
- if ((ret = __os_open(dbenv, real_name,
- DB_OSO_CREATE, argp->mode, &fh)) != 0)
- goto out;
- if ((ret = __os_closehandle(&fh)) != 0)
- goto out;
- } else if (DB_UNDO(op)) {
- /*
- * If the file is 0-length then it was in the process of being
- * created, so we should unlink it. If it is non-0 length, then
- * either someone else created it and we need to leave it
- * untouched or we were in the process of creating it, allocated
- * the first page on a system that requires you to actually
- * write pages as you allocate them, but never got any data
- * on it.
- * If the file doesn't exist, we never got around to creating
- * it, so that's fine.
- */
- if (__os_exists(real_name, NULL) != 0)
- goto done;
-
- if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
- goto out;
- if ((ret = __os_ioinfo(dbenv,
- real_name, &fh, &mb, &b, &io)) != 0)
- goto out;
- do_unlink = 0;
- if (mb != 0 || b != 0) {
- /*
- * We need to read the first page
- * to see if its got valid data on it.
- */
- if ((ret = __os_read(dbenv, &fh,
- &ondisk, sizeof(ondisk), &nr)) != 0 ||
- nr != sizeof(ondisk))
- goto out;
- if (ondisk.magic == 0)
- do_unlink = 1;
- }
- if ((ret = __os_closehandle(&fh)) != 0)
- goto out;
- /* Check for 0-length and if it is, delete it. */
- if (do_unlink || (mb == 0 && b == 0))
- if ((ret = __os_unlink(dbenv, real_name)) != 0)
- goto out;
- }
-
-done: *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: if (argp != NULL)
- __os_free(argp, 0);
- if (real_name != NULL)
- __os_freestr(real_name);
- return (ret);
-}
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
/*
* __crdel_metasub_recover --
@@ -145,16 +42,16 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- u_int8_t *file_uid, ptype;
- int cmp_p, modified, reopen, ret;
+ int cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__crdel_metasub_print);
REC_INTRO(__crdel_metasub_read, 0);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_REDO(op)) {
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
} else {
@@ -165,7 +62,6 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
}
modified = 0;
- reopen = 0;
cmp_p = log_compare(&LSN(pagep), &argp->lsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->lsn);
@@ -173,14 +69,6 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
memcpy(pagep, argp->page.data, argp->page.size);
LSN(pagep) = *lsnp;
modified = 1;
- /*
- * If this is a meta-data page, then we must reopen;
- * if it was a root page, then we do not.
- */
- ptype = ((DBMETA *)argp->page.data)->type;
- if (ptype == P_HASHMETA || ptype == P_BTREEMETA ||
- ptype == P_QAMMETA)
- reopen = 1;
} else if (DB_UNDO(op)) {
/*
* We want to undo this page creation. The page creation
@@ -196,451 +84,14 @@ __crdel_metasub_recover(dbenv, dbtp, lsnp, op, info)
LSN(pagep) = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
-
- /*
- * If we are redoing a subdatabase create, we must close and reopen the
- * file to be sure that we have the proper meta information in the
- * in-memory structures
- */
- if (reopen) {
- /* Close cursor if it's open. */
- if (dbc != NULL) {
- dbc->c_close(dbc);
- dbc = NULL;
- }
-
- if ((ret = __os_malloc(dbenv,
- DB_FILE_ID_LEN, NULL, &file_uid)) != 0)
- goto out;
- memcpy(file_uid, &file_dbp->fileid[0], DB_FILE_ID_LEN);
- ret = __log_reopen_file(dbenv,
- NULL, argp->fileid, file_uid, argp->pgno);
- (void)__os_free(file_uid, DB_FILE_ID_LEN);
- if (ret != 0)
- goto out;
- }
-
-done: *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: REC_CLOSE;
-}
-
-/*
- * __crdel_metapage_recover --
- * Recovery function for metapage.
- *
- * PUBLIC: int __crdel_metapage_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__crdel_metapage_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __crdel_metapage_args *argp;
- DB *dbp;
- DBMETA *meta, ondisk;
- DB_FH fh;
- size_t nr;
- u_int32_t b, io, mb, pagesize;
- int is_done, ret;
- char *real_name;
-
- COMPQUIET(info, NULL);
-
- real_name = NULL;
- memset(&fh, 0, sizeof(fh));
- REC_PRINT(__crdel_metapage_print);
-
- if ((ret = __crdel_metapage_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- /*
- * If this is an in-memory database, then the name is going to
- * be NULL, which looks like a 0-length name in recovery.
- */
- if (argp->name.size == 0)
- goto done;
-
- meta = (DBMETA *)argp->page.data;
- __ua_memcpy(&pagesize, &meta->pagesize, sizeof(pagesize));
-
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (DB_REDO(op)) {
- if ((ret = __db_fileid_to_db(dbenv,
- &dbp, argp->fileid, 0)) != 0) {
- if (ret == DB_DELETED)
- goto done;
- else
- goto out;
- }
-
- /*
- * We simply read the first page and if the LSN is 0, we
- * write the meta-data page.
- */
- if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
- goto out;
- if ((ret = __os_seek(dbenv, &fh,
- pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- goto out;
- /*
- * If the read succeeds then the page exists, then we need
- * to vrify that the page has actually been written, because
- * on some systems (e.g., Windows) we preallocate pages because
- * files aren't allowed to have holes in them. If the page
- * looks good then we're done.
- */
- if ((ret = __os_read(dbenv, &fh, &ondisk,
- sizeof(ondisk), &nr)) == 0 && nr == sizeof(ondisk)) {
- if (ondisk.magic != 0)
- goto done;
- if ((ret = __os_seek(dbenv, &fh,
- pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- goto out;
- }
-
- /*
- * Page didn't exist, update the LSN and write a new one.
- * (seek pointer shouldn't have moved)
- */
- __ua_memcpy(&meta->lsn, lsnp, sizeof(DB_LSN));
- if ((ret = __os_write(dbp->dbenv, &fh,
- argp->page.data, argp->page.size, &nr)) != 0)
- goto out;
- if (nr != (size_t)argp->page.size) {
- __db_err(dbenv, "Write failed during recovery");
- ret = EIO;
- goto out;
- }
-
- /*
- * We must close and reopen the file to be sure
- * that we have the proper meta information
- * in the in memory structures
- */
-
- if ((ret = __log_reopen_file(dbenv,
- argp->name.data, argp->fileid,
- meta->uid, argp->pgno)) != 0)
- goto out;
-
- /* Handle will be closed on exit. */
- } else if (DB_UNDO(op)) {
- is_done = 0;
-
- /* If file does not exist, there is nothing to undo. */
- if (__os_exists(real_name, NULL) != 0)
- goto done;
-
- /*
- * Before we can look at anything on disk, we have to check
- * if there is a valid dbp for this, and if there is, we'd
- * better flush it.
- */
- dbp = NULL;
- if ((ret =
- __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0)) == 0)
- (void)dbp->sync(dbp, 0);
-
- /*
- * We need to make sure that we do not remove a file that
- * someone else created. If the file is 0-length, then we
- * can assume that we created it and remove it. If it is
- * not 0-length, then we need to check the LSN and make
- * sure that it's the file we created.
- */
- if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
- goto out;
- if ((ret = __os_ioinfo(dbenv,
- real_name, &fh, &mb, &b, &io)) != 0)
- goto out;
- if (mb != 0 || b != 0) {
- /* The file has something in it. */
- if ((ret = __os_seek(dbenv, &fh,
- pagesize, argp->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- goto out;
- if ((ret = __os_read(dbenv, &fh,
- &ondisk, sizeof(ondisk), &nr)) != 0)
- goto out;
- if (log_compare(&ondisk.lsn, lsnp) != 0)
- is_done = 1;
- }
-
- /*
- * Must close here, because unlink with the file open fails
- * on some systems.
- */
- if ((ret = __os_closehandle(&fh)) != 0)
- goto out;
-
- if (!is_done) {
- /*
- * On some systems, you cannot unlink an open file so
- * we close the fd in the dbp here and make sure we
- * don't try to close it again. First, check for a
- * saved_open_fhp, then close down the mpool.
- */
- if (dbp != NULL && dbp->saved_open_fhp != NULL &&
- F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
- (ret = __os_closehandle(dbp->saved_open_fhp)) != 0)
- goto out;
- if (dbp != NULL && dbp->mpf != NULL) {
- (void)__memp_fremove(dbp->mpf);
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto out;
- F_SET(dbp, DB_AM_DISCARD);
- dbp->mpf = NULL;
- }
- if ((ret = __os_unlink(dbenv, real_name)) != 0)
- goto out;
- }
- }
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (argp != NULL)
- __os_free(argp, 0);
- if (real_name != NULL)
- __os_freestr(real_name);
- if (F_ISSET(&fh, DB_FH_VALID))
- (void)__os_closehandle(&fh);
- return (ret);
-}
-
-/*
- * __crdel_delete_recover --
- * Recovery function for delete.
- *
- * PUBLIC: int __crdel_delete_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__crdel_delete_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- DB *dbp;
- __crdel_delete_args *argp;
- int ret;
- char *backup, *real_back, *real_name;
-
- REC_PRINT(__crdel_delete_print);
-
- backup = real_back = real_name = NULL;
- if ((ret = __crdel_delete_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- if (DB_REDO(op)) {
- /*
- * On a recovery, as we recreate what was going on, we
- * recreate the creation of the file. And so, even though
- * it committed, we need to delete it. Try to delete it,
- * but it is not an error if that delete fails.
- */
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_name, NULL) == 0) {
- /*
- * If a file is deleted and then recreated, it's
- * possible for the __os_exists call above to
- * return success and for us to get here, but for
- * the fileid we're looking for to be marked
- * deleted. In that case, we needn't redo the
- * unlink even though the file exists, and it's
- * not an error.
- */
- ret = __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0);
- if (ret == 0) {
- /*
- * On Windows, the underlying file must be
- * closed to perform a remove.
- */
- (void)__memp_fremove(dbp->mpf);
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto out;
- dbp->mpf = NULL;
- if ((ret = __os_unlink(dbenv, real_name)) != 0)
- goto out;
- } else if (ret != DB_DELETED)
- goto out;
- }
- /*
- * The transaction committed, so the only thing that might
- * be true is that the backup file is still around. Try
- * to delete it, but it's not an error if that delete fails.
- */
- if ((ret = __db_backup_name(dbenv, argp->name.data,
- &backup, lsnp)) != 0)
- goto out;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
- goto out;
- if (__os_exists(real_back, NULL) == 0)
- if ((ret = __os_unlink(dbenv, real_back)) != 0)
- goto out;
- if ((ret = __db_txnlist_delete(dbenv, info,
- argp->name.data, TXNLIST_INVALID_ID, 1)) != 0)
- goto out;
- } else if (DB_UNDO(op)) {
- /*
- * Trying to undo. File may or may not have been deleted.
- * Try to move the backup to the original. If the backup
- * exists, then this is right. If it doesn't exist, then
- * nothing will happen and that's OK.
- */
- if ((ret = __db_backup_name(dbenv, argp->name.data,
- &backup, lsnp)) != 0)
- goto out;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
- goto out;
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_back, NULL) == 0)
- if ((ret =
- __os_rename(dbenv, real_back, real_name)) != 0)
- goto out;
- }
-
- *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: if (argp != NULL)
- __os_free(argp, 0);
- if (backup != NULL)
- __os_freestr(backup);
- if (real_back != NULL)
- __os_freestr(real_back);
- if (real_name != NULL)
- __os_freestr(real_name);
- return (ret);
-}
-/*
- * __crdel_rename_recover --
- * Recovery function for rename.
- *
- * PUBLIC: int __crdel_rename_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__crdel_rename_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- DB *dbp;
- __crdel_rename_args *argp;
- char *new_name, *real_name;
- int ret, set;
-
- COMPQUIET(info, NULL);
-
- REC_PRINT(__crdel_rename_print);
-
- new_name = real_name = NULL;
-
- if ((ret = __crdel_rename_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- if ((ret = __db_fileid_to_db(dbenv, &dbp, argp->fileid, 0)) != 0)
- goto out;
- if (DB_REDO(op)) {
- /*
- * We don't use the dbp parameter to __log_filelist_update
- * in the rename case, so passing NULL for it is OK.
- */
- if ((ret = __log_filelist_update(dbenv, NULL,
- argp->fileid, argp->newname.data, &set)) != 0)
- goto out;
- if (set != 0) {
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_name, NULL) == 0) {
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, argp->newname.data,
- 0, NULL, &new_name)) != 0)
- goto out;
- /*
- * On Windows, the underlying file
- * must be closed to perform a remove.
- * The db will be closed by a
- * log_register record. Rename
- * has exclusive access to the db.
- */
- (void)__memp_fremove(dbp->mpf);
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto out;
- dbp->mpf = NULL;
- if ((ret = __os_rename(dbenv,
- real_name, new_name)) != 0)
- goto out;
- }
- }
- } else {
- /*
- * We don't use the dbp parameter to __log_filelist_update
- * in the rename case, so passing NULL for it is OK.
- */
- if ((ret = __log_filelist_update(dbenv, NULL,
- argp->fileid, argp->name.data, &set)) != 0)
- goto out;
- if (set != 0) {
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->newname.data, 0, NULL, &new_name)) != 0)
- goto out;
- if (__os_exists(new_name, NULL) == 0) {
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, argp->name.data,
- 0, NULL, &real_name)) != 0)
- goto out;
- /*
- * On Windows, the underlying file
- * must be closed to perform a remove.
- * The file may have already been closed
- * if we are aborting the transaction.
- */
- if (dbp->mpf != NULL) {
- (void)__memp_fremove(dbp->mpf);
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto out;
- dbp->mpf = NULL;
- }
- if ((ret = __os_rename(dbenv,
- new_name, real_name)) != 0)
- goto out;
- }
- }
- }
-
- *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: if (argp != NULL)
- __os_free(argp, 0);
-
- if (new_name != NULL)
- __os_free(new_name, 0);
-
- if (real_name != NULL)
- __os_free(real_name, 0);
-
- return (ret);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
diff --git a/bdb/db/db.c b/bdb/db/db.c
index 6e74b4b21bd..986167d5ade 100644
--- a/bdb/db/db.c
+++ b/bdb/db/db.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -40,7 +40,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db.c,v 11.117 2001/01/11 18:19:50 bostic Exp $";
+static const char revid[] = "$Id: db.c,v 11.246 2002/08/20 14:40:00 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -52,352 +52,41 @@ static const char revid[] = "$Id: db.c,v 11.117 2001/01/11 18:19:50 bostic Exp $
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_swap.h"
-#include "btree.h"
-#include "db_am.h"
-#include "hash.h"
-#include "lock.h"
-#include "log.h"
-#include "mp.h"
-#include "qam.h"
-#include "common_ext.h"
-
-/* Actions that __db_master_update can take. */
-typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
-
-/* Flag values that __db_file_setup can return. */
-#define DB_FILE_SETUP_CREATE 0x01
-#define DB_FILE_SETUP_ZERO 0x02
-
-static int __db_file_setup __P((DB *,
- const char *, u_int32_t, int, db_pgno_t, int *));
-static int __db_master_update __P((DB *,
- const char *, u_int32_t,
- db_pgno_t *, mu_action, const char *, u_int32_t));
-static int __db_refresh __P((DB *));
-static int __db_remove_callback __P((DB *, void *));
-static int __db_set_pgsize __P((DB *, DB_FH *, char *));
-static int __db_subdb_remove __P((DB *, const char *, const char *));
-static int __db_subdb_rename __P(( DB *,
- const char *, const char *, const char *));
-#if CONFIG_TEST
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_disassociate __P((DB *));
+#if CONFIG_TEST
static void __db_makecopy __P((const char *, const char *));
-static int __db_testdocopy __P((DB *, const char *));
-static int __qam_testdocopy __P((DB *, const char *));
+static int __db_testdocopy __P((DB_ENV *, const char *));
+static int __qam_testdocopy __P((DB *, const char *));
#endif
/*
- * __db_open --
- * Main library interface to the DB access methods.
- *
- * PUBLIC: int __db_open __P((DB *,
- * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int));
+ * DB.C --
+ * This file contains the utility functions for the DBP layer.
*/
-int
-__db_open(dbp, name, subdb, type, flags, mode)
- DB *dbp;
- const char *name, *subdb;
- DBTYPE type;
- u_int32_t flags;
- int mode;
-{
- DB_ENV *dbenv;
- DB_LOCK open_lock;
- DB *mdbp;
- db_pgno_t meta_pgno;
- u_int32_t ok_flags;
- int ret, t_ret;
-
- dbenv = dbp->dbenv;
- mdbp = NULL;
-
- /* Validate arguments. */
-#define OKFLAGS \
- (DB_CREATE | DB_EXCL | DB_FCNTL_LOCKING | \
- DB_NOMMAP | DB_RDONLY | DB_RDWRMASTER | DB_THREAD | DB_TRUNCATE)
- if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
- return (ret);
- if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
- return (__db_ferr(dbenv, "DB->open", 1));
- if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE))
- return (__db_ferr(dbenv, "DB->open", 1));
-#ifdef HAVE_VXWORKS
- if (LF_ISSET(DB_TRUNCATE)) {
- __db_err(dbenv, "DB_TRUNCATE unsupported in VxWorks");
- return (__db_eopnotsup(dbenv));
- }
-#endif
- switch (type) {
- case DB_UNKNOWN:
- if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
- __db_err(dbenv,
- "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
- name);
- return (EINVAL);
- }
- ok_flags = 0;
- break;
- case DB_BTREE:
- ok_flags = DB_OK_BTREE;
- break;
- case DB_HASH:
- ok_flags = DB_OK_HASH;
- break;
- case DB_QUEUE:
- ok_flags = DB_OK_QUEUE;
- break;
- case DB_RECNO:
- ok_flags = DB_OK_RECNO;
- break;
- default:
- __db_err(dbenv, "unknown type: %lu", (u_long)type);
- return (EINVAL);
- }
- if (ok_flags)
- DB_ILLEGAL_METHOD(dbp, ok_flags);
-
- /* The environment may have been created, but never opened. */
- if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) {
- __db_err(dbenv, "environment not yet opened");
- return (EINVAL);
- }
-
- /*
- * Historically, you could pass in an environment that didn't have a
- * mpool, and DB would create a private one behind the scenes. This
- * no longer works.
- */
- if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) {
- __db_err(dbenv, "environment did not include a memory pool.");
- return (EINVAL);
- }
-
- /*
- * You can't specify threads during DB->open if subsystems in the
- * environment weren't configured with them.
- */
- if (LF_ISSET(DB_THREAD) &&
- !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) {
- __db_err(dbenv, "environment not created using DB_THREAD");
- return (EINVAL);
- }
-
- /*
- * If the environment was configured with threads, the DB handle
- * must also be free-threaded, so we force the DB_THREAD flag on.
- * (See SR #2033 for why this is a requirement--recovery needs
- * to be able to grab a dbp using __db_fileid_to_dbp, and it has
- * no way of knowing which dbp goes with which thread, so whichever
- * one it finds has to be usable in any of them.)
- */
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- LF_SET(DB_THREAD);
-
- /* DB_TRUNCATE is not transaction recoverable. */
- if (LF_ISSET(DB_TRUNCATE) && TXN_ON(dbenv)) {
- __db_err(dbenv,
- "DB_TRUNCATE illegal in a transaction protected environment");
- return (EINVAL);
- }
-
- /* Subdatabase checks. */
- if (subdb != NULL) {
- /* Subdatabases must be created in named files. */
- if (name == NULL) {
- __db_err(dbenv,
- "multiple databases cannot be created in temporary files");
- return (EINVAL);
- }
-
- /* QAM can't be done as a subdatabase. */
- if (type == DB_QUEUE) {
- __db_err(dbenv, "Queue databases must be one-per-file");
- return (EINVAL);
- }
- }
-
- /* Convert any DB->open flags. */
- if (LF_ISSET(DB_RDONLY))
- F_SET(dbp, DB_AM_RDONLY);
-
- /* Fill in the type. */
- dbp->type = type;
-
- /*
- * If we're potentially creating a database, wrap the open inside of
- * a transaction.
- */
- if (TXN_ON(dbenv) && LF_ISSET(DB_CREATE))
- if ((ret = __db_metabegin(dbp, &open_lock)) != 0)
- return (ret);
-
- /*
- * If we're opening a subdatabase, we have to open (and potentially
- * create) the main database, and then get (and potentially store)
- * our base page number in that database. Then, we can finally open
- * the subdatabase.
- */
- if (subdb == NULL)
- meta_pgno = PGNO_BASE_MD;
- else {
- /*
- * Open the master database, optionally creating or updating
- * it, and retrieve the metadata page number.
- */
- if ((ret =
- __db_master_open(dbp, name, flags, mode, &mdbp)) != 0)
- goto err;
-
- /* Copy the page size and file id from the master. */
- dbp->pgsize = mdbp->pgsize;
- F_SET(dbp, DB_AM_SUBDB);
- memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN);
-
- if ((ret = __db_master_update(mdbp,
- subdb, type, &meta_pgno, MU_OPEN, NULL, flags)) != 0)
- goto err;
-
- /*
- * Clear the exclusive open and truncation flags, they only
- * apply to the open of the master database.
- */
- LF_CLR(DB_EXCL | DB_TRUNCATE);
- }
-
- ret = __db_dbopen(dbp, name, flags, mode, meta_pgno);
-
- /*
- * You can open the database that describes the subdatabases in the
- * rest of the file read-only. The content of each key's data is
- * unspecified and applications should never be adding new records
- * or updating existing records. However, during recovery, we need
- * to open these databases R/W so we can redo/undo changes in them.
- * Likewise, we need to open master databases read/write during
- * rename and remove so we can be sure they're fully sync'ed, so
- * we provide an override flag for the purpose.
- */
- if (subdb == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) &&
- !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) {
- __db_err(dbenv,
- "files containing multiple databases may only be opened read-only");
- ret = EINVAL;
- goto err;
- }
-
-err: /*
- * End any transaction, committing if we were successful, aborting
- * otherwise.
- */
- if (TXN_ON(dbenv) && LF_ISSET(DB_CREATE))
- if ((t_ret = __db_metaend(dbp,
- &open_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
- ret = t_ret;
-
- /* If we were successful, don't discard the file on close. */
- if (ret == 0)
- F_CLR(dbp, DB_AM_DISCARD);
-
- /* If we were unsuccessful, destroy the DB handle. */
- if (ret != 0) {
- /* In recovery we set log_fileid early. */
- if (IS_RECOVERING(dbenv))
- dbp->log_fileid = DB_LOGFILEID_INVALID;
- __db_refresh(dbp);
- }
-
- if (mdbp != NULL) {
- /* If we were successful, don't discard the file on close. */
- if (ret == 0)
- F_CLR(mdbp, DB_AM_DISCARD);
- if ((t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
- ret = t_ret;
- }
-
- return (ret);
-}
-
-/*
- * __db_dbopen --
- * Open a database.
- * PUBLIC: int __db_dbopen __P((DB *, const char *, u_int32_t, int, db_pgno_t));
- */
-int
-__db_dbopen(dbp, name, flags, mode, meta_pgno)
- DB *dbp;
- const char *name;
- u_int32_t flags;
- int mode;
- db_pgno_t meta_pgno;
-{
- DB_ENV *dbenv;
- int ret, retinfo;
-
- dbenv = dbp->dbenv;
-
- /* Set up the underlying file. */
- if ((ret = __db_file_setup(dbp,
- name, flags, mode, meta_pgno, &retinfo)) != 0)
- return (ret);
-
- /*
- * If we created the file, set the truncate flag for the mpool. This
- * isn't for anything we've done, it's protection against stupid user
- * tricks: if the user deleted a file behind Berkeley DB's back, we
- * may still have pages in the mpool that match the file's "unique" ID.
- */
- if (retinfo & DB_FILE_SETUP_CREATE)
- flags |= DB_TRUNCATE;
-
- /* Set up the underlying environment. */
- if ((ret = __db_dbenv_setup(dbp, name, flags)) != 0)
- return (ret);
-
- /*
- * Do access method specific initialization.
- *
- * !!!
- * Set the open flag. (The underlying access method open functions
- * may want to do things like acquire cursors, so the open flag has
- * to be set before calling them.)
- */
- F_SET(dbp, DB_OPEN_CALLED);
-
- if (retinfo & DB_FILE_SETUP_ZERO)
- return (0);
-
- switch (dbp->type) {
- case DB_BTREE:
- ret = __bam_open(dbp, name, meta_pgno, flags);
- break;
- case DB_HASH:
- ret = __ham_open(dbp, name, meta_pgno, flags);
- break;
- case DB_RECNO:
- ret = __ram_open(dbp, name, meta_pgno, flags);
- break;
- case DB_QUEUE:
- ret = __qam_open(dbp, name, meta_pgno, mode, flags);
- break;
- case DB_UNKNOWN:
- return (__db_unknown_type(dbp->dbenv,
- "__db_dbopen", dbp->type));
- break;
- }
- return (ret);
-}
/*
* __db_master_open --
* Open up a handle on a master database.
*
* PUBLIC: int __db_master_open __P((DB *,
- * PUBLIC: const char *, u_int32_t, int, DB **));
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, int, DB **));
*/
int
-__db_master_open(subdbp, name, flags, mode, dbpp)
+__db_master_open(subdbp, txn, name, flags, mode, dbpp)
DB *subdbp;
+ DB_TXN *txn;
const char *name;
u_int32_t flags;
int mode;
@@ -417,30 +106,62 @@ __db_master_open(subdbp, name, flags, mode, dbpp)
* Flag that we're creating a database with subdatabases.
*/
dbp->type = DB_BTREE;
- dbp->open_txn = subdbp->open_txn;
dbp->pgsize = subdbp->pgsize;
F_SET(dbp, DB_AM_SUBDB);
+ F_SET(dbp, F_ISSET(subdbp,
+ DB_AM_RECOVER | DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM));
- if ((ret = __db_dbopen(dbp, name, flags, mode, PGNO_BASE_MD)) != 0) {
- if (!F_ISSET(dbp, DB_AM_DISCARD))
- dbp->close(dbp, 0);
- return (ret);
- }
+ /*
+ * If there was a subdb specified, then we only want to apply
+ * DB_EXCL to the subdb, not the actual file. We only got here
+ * because there was a subdb specified.
+ */
+ LF_CLR(DB_EXCL);
+ LF_SET(DB_RDWRMASTER);
+ if ((ret = __db_dbopen(dbp, txn, name, NULL, flags, mode, PGNO_BASE_MD))
+ != 0)
+ goto err;
- *dbpp = dbp;
- return (0);
+ /*
+ * Verify that pagesize is the same on both.
+ * The items in dbp were now initialized from the meta
+ * page. The items in dbp were set in __db_dbopen
+ * when we either read or created the master file.
+ * Other items such as checksum and encryption are
+ * checked when we read the meta-page. So we do not
+ * check those here. However, if the meta-page caused
+ * chksumming to be turned on and it wasn't already, set
+ * it here.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ F_SET(subdbp, DB_AM_CHKSUM);
+ if (subdbp->pgsize != 0 && dbp->pgsize != subdbp->pgsize) {
+ ret = EINVAL;
+ __db_err(dbp->dbenv,
+ "Different pagesize specified on existent file");
+ goto err;
+ }
+err:
+ if (ret != 0 && !F_ISSET(dbp, DB_AM_DISCARD))
+ __db_close_i(dbp, txn, 0);
+ else
+ *dbpp = dbp;
+ return (ret);
}
/*
* __db_master_update --
- * Add/Remove a subdatabase from a master database.
+ * Add/Open/Remove a subdatabase from a master database.
+ *
+ * PUBLIC: int __db_master_update __P((DB *, DB *, DB_TXN *, const char *,
+ * PUBLIC: DBTYPE, mu_action, const char *, u_int32_t));
*/
-static int
-__db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
- DB *mdbp;
+int
+__db_master_update(mdbp, sdbp, txn, subdb, type, action, newname, flags)
+ DB *mdbp, *sdbp;
+ DB_TXN *txn;
const char *subdb;
- u_int32_t type;
- db_pgno_t *meta_pgnop; /* may be NULL on MU_RENAME */
+ DBTYPE type;
mu_action action;
const char *newname;
u_int32_t flags;
@@ -456,33 +177,37 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
dbc = ndbc = NULL;
p = NULL;
- /* Might we modify the master database? If so, we'll need to lock. */
- modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0;
-
memset(&key, 0, sizeof(key));
memset(&data, 0, sizeof(data));
+ /* Might we modify the master database? If so, we'll need to lock. */
+ modify = (action != MU_OPEN || LF_ISSET(DB_CREATE)) ? 1 : 0;
+
/*
* Open up a cursor. If this is CDB and we're creating the database,
* make it an update cursor.
*/
- if ((ret = mdbp->cursor(mdbp, mdbp->open_txn, &dbc,
+ if ((ret = mdbp->cursor(mdbp, txn, &dbc,
(CDB_LOCKING(dbenv) && modify) ? DB_WRITECURSOR : 0)) != 0)
goto err;
/*
- * Try to point the cursor at the record.
+ * Point the cursor at the record.
*
* If we're removing or potentially creating an entry, lock the page
* with DB_RMW.
*
+ * We do multiple cursor operations with the cursor in some cases and
+ * subsequently access the data DBT information. Set DB_DBT_MALLOC so
+ * we don't risk modification of the data between our uses of it.
+ *
* !!!
* We don't include the name's nul termination in the database.
*/
- key.data = (char *)subdb;
- key.size = strlen(subdb);
- /* In the rename case, we do multiple cursor ops, so MALLOC is safer. */
+ key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
F_SET(&data, DB_DBT_MALLOC);
+
ret = dbc->c_get(dbc, &key, &data,
DB_SET | ((STD_LOCKING(dbc) && modify) ? DB_RMW : 0));
@@ -514,9 +239,10 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
* so it hasn't been converted to/from opposite
* endian architectures. Do it explicitly, now.
*/
- memcpy(meta_pgnop, data.data, sizeof(db_pgno_t));
- DB_NTOHL(meta_pgnop);
- if ((ret = memp_fget(mdbp->mpf, meta_pgnop, 0, &p)) != 0)
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
+ if ((ret =
+ mdbp->mpf->get(mdbp->mpf, &sdbp->meta_pgno, 0, &p)) != 0)
goto err;
/* Free and put the page. */
@@ -538,11 +264,11 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
* for the existence of newname; it shouldn't appear under
* us since we hold the metadata lock.
*/
- if ((ret = mdbp->cursor(mdbp, mdbp->open_txn, &ndbc, 0)) != 0)
+ if ((ret = mdbp->cursor(mdbp, txn, &ndbc, 0)) != 0)
goto err;
DB_ASSERT(newname != NULL);
- key.data = (void *) newname;
- key.size = strlen(newname);
+ key.data = (void *)newname;
+ key.size = (u_int32_t)strlen(newname);
/*
* We don't actually care what the meta page of the potentially-
@@ -583,8 +309,12 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
*/
switch (ret) {
case 0:
- memcpy(meta_pgnop, data.data, sizeof(db_pgno_t));
- DB_NTOHL(meta_pgnop);
+ if (LF_ISSET(DB_CREATE) && LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+ memcpy(&sdbp->meta_pgno, data.data, sizeof(db_pgno_t));
+ DB_NTOHL(&sdbp->meta_pgno);
goto done;
case DB_NOTFOUND:
if (LF_ISSET(DB_CREATE))
@@ -599,10 +329,22 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
goto err;
}
+ /*
+ * We need to check against the master lorder here because
+ * we only want to check this if we are creating. In the
+ * case where we don't create we just want to inherit.
+ */
+ if (F_ISSET(mdbp, DB_AM_SWAP) != F_ISSET(sdbp, DB_AM_SWAP)) {
+ ret = EINVAL;
+ __db_err(mdbp->dbenv,
+ "Different lorder specified on existent file");
+ goto err;
+ }
+ /* Create a subdatabase. */
if ((ret = __db_new(dbc,
type == DB_HASH ? P_HASHMETA : P_BTREEMETA, &p)) != 0)
goto err;
- *meta_pgnop = PGNO(p);
+ sdbp->meta_pgno = PGNO(p);
/*
* XXX
@@ -617,6 +359,7 @@ __db_master_update(mdbp, subdb, type, meta_pgnop, action, newname, flags)
ndata.size = sizeof(db_pgno_t);
if ((ret = dbc->c_put(dbc, &key, &ndata, DB_KEYLAST)) != 0)
goto err;
+ F_SET(sdbp, DB_AM_CREATED);
break;
}
@@ -628,7 +371,7 @@ done: /*
if (p != NULL) {
if (ret == 0) {
if ((t_ret =
- memp_fput(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0)
+ mdbp->mpf->put(mdbp->mpf, p, DB_MPOOL_DIRTY)) != 0)
ret = t_ret;
/*
* Since we cannot close this file until after
@@ -639,12 +382,12 @@ done: /*
if ((t_ret = mdbp->sync(mdbp, 0)) != 0 && ret == 0)
ret = t_ret;
} else
- (void)__db_free(dbc, p);
+ (void)mdbp->mpf->put(mdbp->mpf, p, 0);
}
/* Discard the cursor(s) and data. */
if (data.data != NULL)
- __os_free(data.data, data.size);
+ __os_ufree(dbenv, data.data);
if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
if (ndbc != NULL && (t_ret = ndbc->c_close(ndbc)) != 0 && ret == 0)
@@ -657,21 +400,25 @@ done: /*
* __db_dbenv_setup --
* Set up the underlying environment during a db_open.
*
- * PUBLIC: int __db_dbenv_setup __P((DB *, const char *, u_int32_t));
+ * PUBLIC: int __db_dbenv_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t, u_int32_t));
*/
int
-__db_dbenv_setup(dbp, name, flags)
+__db_dbenv_setup(dbp, txn, name, id, flags)
DB *dbp;
+ DB_TXN *txn;
const char *name;
+ u_int32_t id;
u_int32_t flags;
{
DB *ldbp;
- DB_ENV *dbenv;
DBT pgcookie;
- DB_MPOOL_FINFO finfo;
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+ DB_MPOOLFILE *mpf;
DB_PGINFO pginfo;
- int ret;
u_int32_t maxid;
+ int ftype, ret;
dbenv = dbp->dbenv;
@@ -690,8 +437,18 @@ __db_dbenv_setup(dbp, name, flags)
}
/* Register DB's pgin/pgout functions. */
- if ((ret =
- memp_register(dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0)
+ return (ret);
+
+ /* Create the DB_MPOOLFILE structure. */
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbp->mpf, 0)) != 0)
+ return (ret);
+ mpf = dbp->mpf;
+
+ /* Set the database's cache priority if we've been given one. */
+ if (dbp->priority != 0 &&
+ (ret = mpf->set_priority(mpf, dbp->priority)) != 0)
return (ret);
/*
@@ -704,22 +461,26 @@ __db_dbenv_setup(dbp, name, flags)
* need to page the file in and out. This has to be right -- we can't
* mmap files that are being paged in and out.
*/
- memset(&finfo, 0, sizeof(finfo));
switch (dbp->type) {
case DB_BTREE:
case DB_RECNO:
- finfo.ftype =
- F_ISSET(dbp, DB_AM_SWAP) ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
- finfo.clear_len = DB_PAGE_DB_LEN;
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
break;
case DB_HASH:
- finfo.ftype = DB_FTYPE_SET;
- finfo.clear_len = DB_PAGE_DB_LEN;
+ (void)mpf->set_ftype(mpf, DB_FTYPE_SET);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_DB_LEN));
break;
case DB_QUEUE:
- finfo.ftype =
- F_ISSET(dbp, DB_AM_SWAP) ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
- finfo.clear_len = DB_PAGE_QUEUE_LEN;
+ ftype = F_ISSET(dbp, DB_AM_SWAP | DB_AM_ENCRYPT | DB_AM_CHKSUM)
+ ? DB_FTYPE_SET : DB_FTYPE_NOTSET;
+ (void)mpf->set_ftype(mpf, ftype);
+ (void)mpf->set_clear_len(mpf, (CRYPTO_ON(dbenv) ?
+ dbp->pgsize : DB_PAGE_QUEUE_LEN));
break;
case DB_UNKNOWN:
/*
@@ -735,48 +496,63 @@ __db_dbenv_setup(dbp, name, flags)
* to salvage some data even with no metadata page.
*/
if (F_ISSET(dbp, DB_AM_VERIFYING)) {
- finfo.ftype = DB_FTYPE_NOTSET;
- finfo.clear_len = DB_PAGE_DB_LEN;
+ (void)mpf->set_ftype(mpf, DB_FTYPE_NOTSET);
+ (void)mpf->set_clear_len(mpf, DB_PAGE_DB_LEN);
break;
}
- return (__db_unknown_type(dbp->dbenv,
- "__db_dbenv_setup", dbp->type));
+ /* FALLTHROUGH */
+ default:
+ return (
+ __db_unknown_type(dbenv, "__db_dbenv_setup", dbp->type));
}
- finfo.pgcookie = &pgcookie;
- finfo.fileid = dbp->fileid;
- finfo.lsn_offset = 0;
+
+ (void)mpf->set_fileid(mpf, dbp->fileid);
+ (void)mpf->set_lsn_offset(mpf, 0);
pginfo.db_pagesize = dbp->pgsize;
- pginfo.needswap = F_ISSET(dbp, DB_AM_SWAP);
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = dbp->type;
pgcookie.data = &pginfo;
pgcookie.size = sizeof(DB_PGINFO);
+ (void)mpf->set_pgcookie(mpf, &pgcookie);
- if ((ret = memp_fopen(dbenv, name,
- LF_ISSET(DB_RDONLY | DB_NOMMAP | DB_ODDFILESIZE | DB_TRUNCATE),
- 0, dbp->pgsize, &finfo, &dbp->mpf)) != 0)
+ if ((ret = mpf->open(mpf, name,
+ LF_ISSET(DB_RDONLY | DB_NOMMAP | DB_ODDFILESIZE | DB_TRUNCATE) |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_DIRECT : 0),
+ 0, dbp->pgsize)) != 0)
return (ret);
/*
- * We may need a per-thread mutex. Allocate it from the environment
+ * We may need a per-thread mutex. Allocate it from the mpool
* region, there's supposed to be extra space there for that purpose.
*/
if (LF_ISSET(DB_THREAD)) {
- if ((ret = __db_mutex_alloc(
- dbenv, dbenv->reginfo, (MUTEX **)&dbp->mutexp)) != 0)
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
return (ret);
- if ((ret = __db_mutex_init(
- dbenv, dbp->mutexp, 0, MUTEX_THREAD)) != 0) {
- __db_mutex_free(dbenv, dbenv->reginfo, dbp->mutexp);
- return (ret);
- }
}
- /* Get a log file id. */
- if (LOGGING_ON(dbenv) && !IS_RECOVERING(dbenv) &&
+ /*
+ * Set up a bookkeeping entry for this database in the log region,
+ * if such a region exists. Note that even if we're in recovery
+ * or a replication client, where we won't log registries, we'll
+ * still need an FNAME struct, so LOGGING_ON is the correct macro.
+ */
+ if (LOGGING_ON(dbenv) &&
+ (ret = __dbreg_setup(dbp, name, id)) != 0)
+ return (ret);
+
+ /*
+ * If we're actively logging and our caller isn't a recovery function
+ * that already did so, assign this dbp a log fileid.
+ */
+ if (DBENV_LOGGING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER) &&
#if !defined(DEBUG_ROP)
!F_ISSET(dbp, DB_AM_RDONLY) &&
#endif
- (ret = log_register(dbenv, dbp, name)) != 0)
+ (ret = __dbreg_new_id(dbp, txn)) != 0)
return (ret);
/*
@@ -822,541 +598,69 @@ __db_dbenv_setup(dbp, name, flags)
}
/*
- * __db_file_setup --
- * Setup the file or in-memory data.
- * Read the database metadata and resolve it with our arguments.
+ * __db_close --
+ * DB destructor.
+ *
+ * PUBLIC: int __db_close __P((DB *, u_int32_t));
*/
-static int
-__db_file_setup(dbp, name, flags, mode, meta_pgno, retflags)
+int
+__db_close(dbp, flags)
DB *dbp;
- const char *name;
u_int32_t flags;
- int mode;
- db_pgno_t meta_pgno;
- int *retflags;
-{
- DB *mdb;
- DBT namedbt;
- DB_ENV *dbenv;
- DB_FH *fhp, fh;
- DB_LSN lsn;
- DB_TXN *txn;
- size_t nr;
- u_int32_t magic, oflags;
- int ret, retry_cnt, t_ret;
- char *real_name, mbuf[DBMETASIZE];
-
-#define IS_SUBDB_SETUP (meta_pgno != PGNO_BASE_MD)
-
- dbenv = dbp->dbenv;
- dbp->meta_pgno = meta_pgno;
- txn = NULL;
- *retflags = 0;
-
- /*
- * If we open a file handle and our caller is doing fcntl(2) locking,
- * we can't close it because that would discard the caller's lock.
- * Save it until we close the DB handle.
- */
- if (LF_ISSET(DB_FCNTL_LOCKING)) {
- if ((ret = __os_malloc(dbenv, sizeof(*fhp), NULL, &fhp)) != 0)
- return (ret);
- } else
- fhp = &fh;
- memset(fhp, 0, sizeof(*fhp));
-
- /*
- * If the file is in-memory, set up is simple. Otherwise, do the
- * hard work of opening and reading the file.
- *
- * If we have a file name, try and read the first page, figure out
- * what type of file it is, and initialize everything we can based
- * on that file's meta-data page.
- *
- * !!!
- * There's a reason we don't push this code down into the buffer cache.
- * The problem is that there's no information external to the file that
- * we can use as a unique ID. UNIX has dev/inode pairs, but they are
- * not necessarily unique after reboot, if the file was mounted via NFS.
- * Windows has similar problems, as the FAT filesystem doesn't maintain
- * dev/inode numbers across reboot. So, we must get something from the
- * file we can use to ensure that, even after a reboot, the file we're
- * joining in the cache is the right file for us to join. The solution
- * we use is to maintain a file ID that's stored in the database, and
- * that's why we have to open and read the file before calling into the
- * buffer cache.
- *
- * The secondary reason is that there's additional information that
- * we want to have before instantiating a file in the buffer cache:
- * the page size, file type (btree/hash), if swapping is required,
- * and flags (DB_RDONLY, DB_CREATE, DB_TRUNCATE). We could handle
- * needing this information by allowing it to be set for a file in
- * the buffer cache even after the file has been opened, and, of
- * course, supporting the ability to flush a file from the cache as
- * necessary, e.g., if we guessed wrongly about the page size. Given
- * that we have to read the file anyway to get the file ID, we might
- * as well get the rest, too.
- *
- * Get the real file name.
- */
- if (name == NULL) {
- F_SET(dbp, DB_AM_INMEM);
-
- if (dbp->type == DB_UNKNOWN) {
- __db_err(dbenv,
- "DBTYPE of unknown without existing file");
- return (EINVAL);
- }
- real_name = NULL;
-
- /* Set the page size if we don't have one yet. */
- if (dbp->pgsize == 0)
- dbp->pgsize = DB_DEF_IOSIZE;
-
- /*
- * If the file is a temporary file and we're doing locking,
- * then we have to create a unique file ID. We can't use our
- * normal dev/inode pair (or whatever this OS uses in place of
- * dev/inode pairs) because no backing file will be created
- * until the mpool cache is filled forcing the buffers to disk.
- * Grab a random locker ID to use as a file ID. The created
- * ID must never match a potential real file ID -- we know it
- * won't because real file IDs contain a time stamp after the
- * dev/inode pair, and we're simply storing a 4-byte value.
- *
- * !!!
- * Store the locker in the file id structure -- we can get it
- * from there as necessary, and it saves having two copies.
- */
- if (LOCKING_ON(dbenv) &&
- (ret = lock_id(dbenv, (u_int32_t *)dbp->fileid)) != 0)
- return (ret);
-
- return (0);
- }
-
- /* Get the real backing file name. */
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
- return (ret);
-
- /*
- * Open the backing file. We need to make sure that multiple processes
- * attempting to create the file at the same time are properly ordered
- * so that only one of them creates the "unique" file ID, so we open it
- * O_EXCL and O_CREAT so two simultaneous attempts to create the region
- * will return failure in one of the attempts. If we're the one that
- * fails, simply retry without the O_CREAT flag, which will require the
- * meta-data page exist.
- */
-
- /* Fill in the default file mode. */
- if (mode == 0)
- mode = __db_omode("rwrw--");
-
- oflags = 0;
- if (LF_ISSET(DB_RDONLY))
- oflags |= DB_OSO_RDONLY;
- if (LF_ISSET(DB_TRUNCATE))
- oflags |= DB_OSO_TRUNC;
-
- retry_cnt = 0;
-open_retry:
- *retflags = 0;
- ret = 0;
- if (!IS_SUBDB_SETUP && LF_ISSET(DB_CREATE)) {
- if (dbp->open_txn != NULL) {
- /*
- * Start a child transaction to wrap this individual
- * create.
- */
- if ((ret =
- txn_begin(dbenv, dbp->open_txn, &txn, 0)) != 0)
- goto err_msg;
-
- memset(&namedbt, 0, sizeof(namedbt));
- namedbt.data = (char *)name;
- namedbt.size = strlen(name) + 1;
- if ((ret = __crdel_fileopen_log(dbenv, txn,
- &lsn, DB_FLUSH, &namedbt, mode)) != 0)
- goto err_msg;
- }
- DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, name);
- if ((ret = __os_open(dbenv, real_name,
- oflags | DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp)) == 0) {
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, name);
-
- /* Commit the file create. */
- if (dbp->open_txn != NULL) {
- if ((ret = txn_commit(txn, DB_TXN_SYNC)) != 0)
- goto err_msg;
- txn = NULL;
- }
-
- /*
- * We created the file. This means that if we later
- * fail, we need to delete the file and if we're going
- * to do that, we need to trash any pages in the
- * memory pool. Since we only know here that we
- * created the file, we're going to set the flag here
- * and clear it later if we commit successfully.
- */
- F_SET(dbp, DB_AM_DISCARD);
- *retflags |= DB_FILE_SETUP_CREATE;
- } else {
- /*
- * Abort the file create. If the abort fails, report
- * the error returned by txn_abort(), rather than the
- * open error, for no particular reason.
- */
- if (dbp->open_txn != NULL) {
- if ((t_ret = txn_abort(txn)) != 0) {
- ret = t_ret;
- goto err_msg;
- }
- txn = NULL;
- }
-
- /*
- * If we were not doing an exclusive open, try again
- * without the create flag.
- */
- if (ret == EEXIST && !LF_ISSET(DB_EXCL)) {
- LF_CLR(DB_CREATE);
- DB_TEST_RECOVERY(dbp,
- DB_TEST_POSTOPEN, ret, name);
- goto open_retry;
- }
- }
- } else
- ret = __os_open(dbenv, real_name, oflags, mode, fhp);
-
- /*
- * Be quiet if we couldn't open the file because it didn't exist
- * or we did not have permission,
- * the customers don't like those messages appearing in the logs.
- * Otherwise, complain loudly.
- */
- if (ret != 0) {
- if (ret == EACCES || ret == ENOENT)
- goto err;
- goto err_msg;
- }
-
- /* Set the page size if we don't have one yet. */
- if (dbp->pgsize == 0) {
- if (IS_SUBDB_SETUP) {
- if ((ret = __db_master_open(dbp,
- name, flags, mode, &mdb)) != 0)
- goto err;
- dbp->pgsize = mdb->pgsize;
- (void)mdb->close(mdb, 0);
- } else if ((ret = __db_set_pgsize(dbp, fhp, real_name)) != 0)
- goto err;
- }
-
- /*
- * Seek to the metadata offset; if it's a master database open or a
- * database without subdatabases, we're seeking to 0, but that's OK.
- */
- if ((ret = __os_seek(dbenv, fhp,
- dbp->pgsize, meta_pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- goto err_msg;
-
- /*
- * Read the metadata page. We read DBMETASIZE bytes, which is larger
- * than any access method's metadata page and smaller than any disk
- * sector.
- */
- if ((ret = __os_read(dbenv, fhp, mbuf, sizeof(mbuf), &nr)) != 0)
- goto err_msg;
-
- if (nr == sizeof(mbuf)) {
- /*
- * Figure out what access method we're dealing with, and then
- * call access method specific code to check error conditions
- * based on conflicts between the found file and application
- * arguments. A found file overrides some user information --
- * we don't consider it an error, for example, if the user set
- * an expected byte order and the found file doesn't match it.
- */
- F_CLR(dbp, DB_AM_SWAP);
- magic = ((DBMETA *)mbuf)->magic;
-
-swap_retry: switch (magic) {
- case DB_BTREEMAGIC:
- if ((ret =
- __bam_metachk(dbp, name, (BTMETA *)mbuf)) != 0)
- goto err;
- break;
- case DB_HASHMAGIC:
- if ((ret =
- __ham_metachk(dbp, name, (HMETA *)mbuf)) != 0)
- goto err;
- break;
- case DB_QAMMAGIC:
- if ((ret =
- __qam_metachk(dbp, name, (QMETA *)mbuf)) != 0)
- goto err;
- break;
- case 0:
- /*
- * There are two ways we can get a 0 magic number.
- * If we're creating a subdatabase, then the magic
- * number will be 0. We allocate a page as part of
- * finding out what the base page number will be for
- * the new subdatabase, but it's not initialized in
- * any way.
- *
- * The second case happens if we are in recovery
- * and we are going to recreate a database, it's
- * possible that it's page was created (on systems
- * where pages must be created explicitly to avoid
- * holes in files) but is still 0.
- */
- if (IS_SUBDB_SETUP) { /* Case 1 */
- if ((IS_RECOVERING(dbenv)
- && F_ISSET((DB_LOG *)
- dbenv->lg_handle, DBLOG_FORCE_OPEN))
- || ((DBMETA *)mbuf)->pgno != PGNO_INVALID)
- goto empty;
-
- ret = EINVAL;
- goto err;
- }
- /* Case 2 */
- if (IS_RECOVERING(dbenv)) {
- *retflags |= DB_FILE_SETUP_ZERO;
- goto empty;
- }
- goto bad_format;
- default:
- if (F_ISSET(dbp, DB_AM_SWAP))
- goto bad_format;
-
- M_32_SWAP(magic);
- F_SET(dbp, DB_AM_SWAP);
- goto swap_retry;
- }
- } else {
- /*
- * Only newly created files are permitted to fail magic
- * number tests.
- */
- if (nr != 0 || (!IS_RECOVERING(dbenv) && IS_SUBDB_SETUP))
- goto bad_format;
-
- /* Let the caller know that we had a 0-length file. */
- if (!LF_ISSET(DB_CREATE | DB_TRUNCATE))
- *retflags |= DB_FILE_SETUP_ZERO;
-
- /*
- * The only way we can reach here with the DB_CREATE flag set
- * is if we created the file. If that's not the case, then
- * either (a) someone else created the file but has not yet
- * written out the metadata page, or (b) we truncated the file
- * (DB_TRUNCATE) leaving it zero-length. In the case of (a),
- * we want to sleep and give the file creator time to write
- * the metadata page. In the case of (b), we want to continue.
- *
- * !!!
- * There's a race in the case of two processes opening the file
- * with the DB_TRUNCATE flag set at roughly the same time, and
- * they could theoretically hurt each other. Sure hope that's
- * unlikely.
- */
- if (!LF_ISSET(DB_CREATE | DB_TRUNCATE) &&
- !IS_RECOVERING(dbenv)) {
- if (retry_cnt++ < 3) {
- __os_sleep(dbenv, 1, 0);
- goto open_retry;
- }
-bad_format: if (!IS_RECOVERING(dbenv))
- __db_err(dbenv,
- "%s: unexpected file type or format", name);
- ret = EINVAL;
- goto err;
- }
-
- DB_ASSERT (dbp->type != DB_UNKNOWN);
-
-empty: /*
- * The file is empty, and that's OK. If it's not a subdatabase,
- * though, we do need to generate a unique file ID for it. The
- * unique file ID includes a timestamp so that we can't collide
- * with any other files, even when the file IDs (dev/inode pair)
- * are reused.
- */
- if (!IS_SUBDB_SETUP) {
- if (*retflags & DB_FILE_SETUP_ZERO)
- memset(dbp->fileid, 0, DB_FILE_ID_LEN);
- else if ((ret = __os_fileid(dbenv,
- real_name, 1, dbp->fileid)) != 0)
- goto err_msg;
- }
- }
-
- if (0) {
-err_msg: __db_err(dbenv, "%s: %s", name, db_strerror(ret));
- }
-
- /*
- * Abort any running transaction -- it can only exist if something
- * went wrong.
- */
-err:
-DB_TEST_RECOVERY_LABEL
-
- /*
- * If we opened a file handle and our caller is doing fcntl(2) locking,
- * then we can't close it because that would discard the caller's lock.
- * Otherwise, close the handle.
- */
- if (F_ISSET(fhp, DB_FH_VALID)) {
- if (ret == 0 && LF_ISSET(DB_FCNTL_LOCKING))
- dbp->saved_open_fhp = fhp;
- else
- if ((t_ret = __os_closehandle(fhp)) != 0 && ret == 0)
- ret = t_ret;
- }
-
- /*
- * This must be done after the file is closed, since
- * txn_abort() may remove the file, and an open file
- * cannot be removed on a Windows platforms.
- */
- if (txn != NULL)
- (void)txn_abort(txn);
-
- if (real_name != NULL)
- __os_freestr(real_name);
-
- return (ret);
-}
-
-/*
- * __db_set_pgsize --
- * Set the page size based on file information.
- */
-static int
-__db_set_pgsize(dbp, fhp, name)
- DB *dbp;
- DB_FH *fhp;
- char *name;
{
DB_ENV *dbenv;
- u_int32_t iopsize;
- int ret;
dbenv = dbp->dbenv;
- /*
- * Use the filesystem's optimum I/O size as the pagesize if a pagesize
- * not specified. Some filesystems have 64K as their optimum I/O size,
- * but as that results in fairly large default caches, we limit the
- * default pagesize to 16K.
- */
- if ((ret = __os_ioinfo(dbenv, name, fhp, NULL, NULL, &iopsize)) != 0) {
- __db_err(dbenv, "%s: %s", name, db_strerror(ret));
- return (ret);
- }
- if (iopsize < 512)
- iopsize = 512;
- if (iopsize > 16 * 1024)
- iopsize = 16 * 1024;
-
- /*
- * Sheer paranoia, but we don't want anything that's not a power-of-2
- * (we rely on that for alignment of various types on the pages), and
- * we want a multiple of the sector size as well.
- */
- OS_ROUNDOFF(iopsize, 512);
+ PANIC_CHECK(dbenv);
- dbp->pgsize = iopsize;
- F_SET(dbp, DB_AM_PGDEF);
+ /* Validate arguments, but as a DB handle destructor, we can't fail. */
+ if (flags != 0 && flags != DB_NOSYNC)
+ (void)__db_ferr(dbenv, "DB->close", 0);
- return (0);
+ return (__db_close_i(dbp, NULL, flags));
}
/*
- * __db_close --
- * DB destructor.
+ * __db_close_i --
+ * Internal DB destructor.
*
- * PUBLIC: int __db_close __P((DB *, u_int32_t));
+ * PUBLIC: int __db_close_i __P((DB *, DB_TXN *, u_int32_t));
*/
int
-__db_close(dbp, flags)
+__db_close_i(dbp, txn, flags)
DB *dbp;
+ DB_TXN *txn;
u_int32_t flags;
{
DB_ENV *dbenv;
- DBC *dbc;
int ret, t_ret;
- ret = 0;
-
dbenv = dbp->dbenv;
- PANIC_CHECK(dbenv);
-
- /* Validate arguments. */
- if ((ret = __db_closechk(dbp, flags)) != 0)
- goto err;
-
- /* If never opened, or not currently open, it's easy. */
- if (!F_ISSET(dbp, DB_OPEN_CALLED))
- goto never_opened;
-
- /* Sync the underlying access method. */
- if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
- (t_ret = dbp->sync(dbp, 0)) != 0 && ret == 0)
- ret = t_ret;
-
- /*
- * Go through the active cursors and call the cursor recycle routine,
- * which resolves pending operations and moves the cursors onto the
- * free list. Then, walk the free list and call the cursor destroy
- * routine.
- */
- while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
- while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
- if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
- ret = t_ret;
+ ret = 0;
/*
- * Close any outstanding join cursors. Join cursors destroy
- * themselves on close and have no separate destroy routine.
+ * Validate arguments, but as a DB handle destructor, we can't fail.
+ *
+ * Check for consistent transaction usage -- ignore errors. Only
+ * internal callers specify transactions, so it's a serious problem
+ * if we get error messages.
*/
- while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL)
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
-
- /* Remove this DB handle from the DB_ENV's dblist. */
- MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
- LIST_REMOVE(dbp, dblistlinks);
- MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
-
- /* Sync the memory pool. */
- if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
- (t_ret = memp_fsync(dbp->mpf)) != 0 &&
- t_ret != DB_INCOMPLETE && ret == 0)
- ret = t_ret;
+ if (txn != NULL)
+ (void)__db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0);
- /* Close any handle we've been holding since the open. */
- if (dbp->saved_open_fhp != NULL &&
- F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
- (t_ret = __os_closehandle(dbp->saved_open_fhp)) != 0 && ret == 0)
+ /* Refresh the structure and close any local environment. */
+ if ((t_ret = __db_refresh(dbp, txn, flags)) != 0 && ret == 0)
ret = t_ret;
-never_opened:
/*
* Call the access specific close function.
*
* !!!
- * Because of where the function is called in the close process,
- * these routines can't do anything that would dirty pages or
- * otherwise affect closing down the database.
+ * Because of where these functions are called in the DB handle close
+ * process, these routines can't do anything that would dirty pages or
+ * otherwise affect closing down the database. Specifically, we can't
+ * abort and recover any of the information they control.
*/
if ((t_ret = __ham_db_close(dbp)) != 0 && ret == 0)
ret = t_ret;
@@ -1365,17 +669,14 @@ never_opened:
if ((t_ret = __qam_db_close(dbp)) != 0 && ret == 0)
ret = t_ret;
-err:
- /* Refresh the structure and close any local environment. */
- if ((t_ret = __db_refresh(dbp)) != 0 && ret == 0)
- ret = t_ret;
- if (F_ISSET(dbenv, DB_ENV_DBLOCAL) &&
- --dbenv->dblocal_ref == 0 &&
+ --dbenv->db_ref;
+ if (F_ISSET(dbenv, DB_ENV_DBLOCAL) && dbenv->db_ref == 0 &&
(t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
ret = t_ret;
+ /* Free the database handle. */
memset(dbp, CLEAR_BYTE, sizeof(*dbp));
- __os_free(dbp, sizeof(*dbp));
+ __os_free(dbenv, dbp);
return (ret);
}
@@ -1383,653 +684,257 @@ err:
/*
* __db_refresh --
* Refresh the DB structure, releasing any allocated resources.
+ * This does most of the work of closing files now because refresh
+ * is what is used during abort processing (since we can't destroy
+ * the actual handle) and during abort processing, we may have a
+ * fully opened handle.
+ *
+ * PUBLIC: int __db_refresh __P((DB *, DB_TXN *, u_int32_t));
*/
-static int
-__db_refresh(dbp)
+int
+__db_refresh(dbp, txn, flags)
DB *dbp;
+ DB_TXN *txn;
+ u_int32_t flags;
{
- DB_ENV *dbenv;
+ DB *sdbp;
DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCKREQ lreq;
+ DB_MPOOL *dbmp;
int ret, t_ret;
ret = 0;
dbenv = dbp->dbenv;
+ /* If never opened, or not currently open, it's easy. */
+ if (!F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ goto never_opened;
+
/*
- * Go through the active cursors and call the cursor recycle routine,
- * which resolves pending operations and moves the cursors onto the
- * free list. Then, walk the free list and call the cursor destroy
- * routine.
+ * If we have any secondary indices, disassociate them from us.
+ * We don't bother with the mutex here; it only protects some
+ * of the ops that will make us core-dump mid-close anyway, and
+ * if you're trying to do something with a secondary *while* you're
+ * closing the primary, you deserve what you get. The disassociation
+ * is mostly done just so we can close primaries and secondaries in
+ * any order--but within one thread of control.
*/
- while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
- while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
- if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ for (sdbp = LIST_FIRST(&dbp->s_secondaries);
+ sdbp != NULL; sdbp = LIST_NEXT(sdbp, s_links)) {
+ LIST_REMOVE(sdbp, s_links);
+ if ((t_ret = __db_disassociate(sdbp)) != 0 && ret == 0)
ret = t_ret;
-
- dbp->type = 0;
-
- /* Close the memory pool file handle. */
- if (dbp->mpf != NULL) {
- if (F_ISSET(dbp, DB_AM_DISCARD))
- (void)__memp_fremove(dbp->mpf);
- if ((t_ret = memp_fclose(dbp->mpf)) != 0 && ret == 0)
- ret = t_ret;
- dbp->mpf = NULL;
}
- /* Discard the thread mutex. */
- if (dbp->mutexp != NULL) {
- __db_mutex_free(dbenv, dbenv->reginfo, dbp->mutexp);
- dbp->mutexp = NULL;
- }
-
- /* Discard the log file id. */
- if (!IS_RECOVERING(dbenv)
- && dbp->log_fileid != DB_LOGFILEID_INVALID)
- (void)log_unregister(dbenv, dbp);
-
- F_CLR(dbp, DB_AM_DISCARD);
- F_CLR(dbp, DB_AM_INMEM);
- F_CLR(dbp, DB_AM_RDONLY);
- F_CLR(dbp, DB_AM_SWAP);
- F_CLR(dbp, DB_DBM_ERROR);
- F_CLR(dbp, DB_OPEN_CALLED);
-
- return (ret);
-}
-
-/*
- * __db_remove
- * Remove method for DB.
- *
- * PUBLIC: int __db_remove __P((DB *, const char *, const char *, u_int32_t));
- */
-int
-__db_remove(dbp, name, subdb, flags)
- DB *dbp;
- const char *name, *subdb;
- u_int32_t flags;
-{
- DBT namedbt;
- DB_ENV *dbenv;
- DB_LOCK remove_lock;
- DB_LSN newlsn;
- int ret, t_ret, (*callback_func) __P((DB *, void *));
- char *backup, *real_back, *real_name;
- void *cookie;
-
- dbenv = dbp->dbenv;
- ret = 0;
- backup = real_back = real_name = NULL;
-
- PANIC_CHECK(dbenv);
/*
- * Cannot use DB_ILLEGAL_AFTER_OPEN here because that returns
- * and we cannot return, but must deal with the error and destroy
- * the handle anyway.
+ * Sync the underlying access method. Do before closing the cursors
+ * because DB->sync allocates cursors in order to write Recno backing
+ * source text files.
*/
- if (F_ISSET(dbp, DB_OPEN_CALLED)) {
- ret = __db_mi_open(dbp->dbenv, "remove", 1);
- goto err_close;
- }
-
- /* Validate arguments. */
- if ((ret = __db_removechk(dbp, flags)) != 0)
- goto err_close;
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->sync(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
/*
- * Subdatabases.
+ * Go through the active cursors and call the cursor recycle routine,
+ * which resolves pending operations and moves the cursors onto the
+ * free list. Then, walk the free list and call the cursor destroy
+ * routine. Note that any failure on a close is considered "really
+ * bad" and we just break out of the loop and force forward.
*/
- if (subdb != NULL) {
- /* Subdatabases must be created in named files. */
- if (name == NULL) {
- __db_err(dbenv,
- "multiple databases cannot be created in temporary files");
- goto err_close;
+ while ((dbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
}
- return (__db_subdb_remove(dbp, name, subdb));
- }
-
- if ((ret = dbp->open(dbp,
- name, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0)
- goto err_close;
-
- if (LOGGING_ON(dbenv) && (ret = __log_file_lock(dbp)) != 0)
- goto err_close;
- if ((ret = dbp->sync(dbp, 0)) != 0)
- goto err_close;
-
- /* Start the transaction and log the delete. */
- if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
- goto err_close;
-
- if (LOGGING_ON(dbenv)) {
- memset(&namedbt, 0, sizeof(namedbt));
- namedbt.data = (char *)name;
- namedbt.size = strlen(name) + 1;
-
- if ((ret = __crdel_delete_log(dbenv,
- dbp->open_txn, &newlsn, DB_FLUSH,
- dbp->log_fileid, &namedbt)) != 0) {
- __db_err(dbenv,
- "%s: %s", name, db_strerror(ret));
- goto err;
+ while ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
}
- }
-
- /* Find the real name of the file. */
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
- goto err;
/*
- * XXX
- * We don't bother to open the file and call __memp_fremove on the mpf.
- * There is a potential race here. It is at least possible that, if
- * the unique filesystem ID (dev/inode pair on UNIX) is reallocated
- * within a second (the granularity of the fileID timestamp), a new
- * file open will get the same fileID as the file being "removed".
- * We may actually want to open the file and call __memp_fremove on
- * the mpf to get around this.
- */
-
- /* Create name for backup file. */
- if (TXN_ON(dbenv)) {
- if ((ret =
- __db_backup_name(dbenv, name, &backup, &newlsn)) != 0)
- goto err;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
- goto err;
- }
-
- callback_func = __db_remove_callback;
- cookie = real_back;
- DB_TEST_RECOVERY(dbp, DB_TEST_PRERENAME, ret, name);
- if (dbp->db_am_remove != NULL &&
- (ret = dbp->db_am_remove(dbp,
- name, subdb, &newlsn, &callback_func, &cookie)) != 0)
- goto err;
- /*
- * On Windows, the underlying file must be closed to perform a remove.
- * Nothing later in __db_remove requires that it be open, and the
- * dbp->close closes it anyway, so we just close it early.
+ * Close any outstanding join cursors. Join cursors destroy
+ * themselves on close and have no separate destroy routine.
*/
- (void)__memp_fremove(dbp->mpf);
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto err;
- dbp->mpf = NULL;
-
- if (TXN_ON(dbenv))
- ret = __os_rename(dbenv, real_name, real_back);
- else
- ret = __os_unlink(dbenv, real_name);
-
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTRENAME, ret, name);
+ while ((dbc = TAILQ_FIRST(&dbp->join_queue)) != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ break;
+ }
-err:
-DB_TEST_RECOVERY_LABEL
/*
- * End the transaction, committing the transaction if we were
- * successful, aborting otherwise.
+ * Sync the memory pool, even though we've already called DB->sync,
+ * because closing cursors can dirty pages by deleting items they
+ * referenced.
*/
- if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp, &remove_lock,
- ret == 0, callback_func, cookie)) != 0 && ret == 0)
+ if (!LF_ISSET(DB_NOSYNC) && !F_ISSET(dbp, DB_AM_DISCARD) &&
+ (t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
ret = t_ret;
- /* FALLTHROUGH */
-
-err_close:
- if (real_back != NULL)
- __os_freestr(real_back);
- if (real_name != NULL)
- __os_freestr(real_name);
- if (backup != NULL)
- __os_freestr(backup);
-
- /* We no longer have an mpool, so syncing would be disastrous. */
- if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ /* Close any handle we've been holding since the open. */
+ if (dbp->saved_open_fhp != NULL &&
+ F_ISSET(dbp->saved_open_fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbp->saved_open_fhp)) != 0 &&
+ ret == 0)
ret = t_ret;
- return (ret);
-}
-
-/*
- * __db_subdb_remove --
- * Remove a subdatabase.
- */
-static int
-__db_subdb_remove(dbp, name, subdb)
- DB *dbp;
- const char *name, *subdb;
-{
- DB *mdbp;
- DBC *dbc;
- DB_ENV *dbenv;
- DB_LOCK remove_lock;
- db_pgno_t meta_pgno;
- int ret, t_ret;
-
- mdbp = NULL;
- dbc = NULL;
- dbenv = dbp->dbenv;
-
- /* Start the transaction. */
- if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
- goto err_close;
-
+never_opened:
/*
- * Open the subdatabase. We can use the user's DB handle for this
- * purpose, I think.
+ * We are not releasing the handle lock here because we're about
+ * to release all locks held by dbp->lid below. There are two
+ * ways that we can get in here with a handle_lock, but not a
+ * dbp->lid. The first is when our lid has been hijacked by a
+ * subdb. The second is when we are a Queue database in the midst
+ * of a rename. If the queue file hasn't actually been opened, we
+ * hijack the main dbp's locker id to do the open so we can get the
+ * extent files. In both cases, we needn't free the handle lock
+ * because it will be freed when the hijacked locker-id is freed.
*/
- if ((ret = __db_open(dbp, name, subdb, DB_UNKNOWN, 0, 0)) != 0)
- goto err;
+ DB_ASSERT(!LOCK_ISSET(dbp->handle_lock) ||
+ dbp->lid != DB_LOCK_INVALIDID ||
+ dbp->type == DB_QUEUE ||
+ F_ISSET(dbp, DB_AM_SUBDB));
+
+ if (dbp->lid != DB_LOCK_INVALIDID) {
+ /* We may have pending trade operations on this dbp. */
+ if (txn != NULL)
+ __txn_remlock(dbenv, txn, &dbp->handle_lock, dbp->lid);
+
+ /* We may be holding the handle lock; release it. */
+ lreq.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = __lock_vec(dbenv,
+ dbp->lid, 0, &lreq, 1, NULL)) != 0 && ret == 0)
+ ret = t_ret;
- /* Free up the pages in the subdatabase. */
- switch (dbp->type) {
- case DB_BTREE:
- case DB_RECNO:
- if ((ret = __bam_reclaim(dbp, dbp->open_txn)) != 0)
- goto err;
- break;
- case DB_HASH:
- if ((ret = __ham_reclaim(dbp, dbp->open_txn)) != 0)
- goto err;
- break;
- default:
- ret = __db_unknown_type(dbp->dbenv,
- "__db_subdb_remove", dbp->type);
- goto err;
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, dbp->lid)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
}
- /*
- * Remove the entry from the main database and free the subdatabase
- * metadata page.
- */
- if ((ret = __db_master_open(dbp, name, 0, 0, &mdbp)) != 0)
- goto err;
-
- if ((ret = __db_master_update(mdbp,
- subdb, dbp->type, &meta_pgno, MU_REMOVE, NULL, 0)) != 0)
- goto err;
-
-err: /*
- * End the transaction, committing the transaction if we were
- * successful, aborting otherwise.
- */
- if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
- &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
+ /* Discard the locker ID allocated as the fileid. */
+ if (F_ISSET(dbp, DB_AM_INMEM) &&
+ LOCKING_ON(dbenv) && (t_ret = dbenv->lock_id_free(
+ dbenv, *(u_int32_t *)dbp->fileid)) != 0 && ret == 0)
ret = t_ret;
-err_close:
- /*
- * Close the user's DB handle -- do this LAST to avoid smashing the
- * the transaction information.
- */
- if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
- ret = t_ret;
-
- if (mdbp != NULL && (t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
- ret = t_ret;
+ dbp->type = DB_UNKNOWN;
- return (ret);
-}
-
-/*
- * __db_rename
- * Rename method for DB.
- *
- * PUBLIC: int __db_rename __P((DB *,
- * PUBLIC: const char *, const char *, const char *, u_int32_t));
- */
-int
-__db_rename(dbp, filename, subdb, newname, flags)
- DB *dbp;
- const char *filename, *subdb, *newname;
- u_int32_t flags;
-{
- DBT namedbt, newnamedbt;
- DB_ENV *dbenv;
- DB_LOCK remove_lock;
- DB_LSN newlsn;
- char *real_name, *real_newname;
- int ret, t_ret;
-
- dbenv = dbp->dbenv;
- ret = 0;
- real_name = real_newname = NULL;
-
- PANIC_CHECK(dbenv);
- /*
- * Cannot use DB_ILLEGAL_AFTER_OPEN here because that returns
- * and we cannot return, but must deal with the error and destroy
- * the handle anyway.
- */
- if (F_ISSET(dbp, DB_OPEN_CALLED)) {
- ret = __db_mi_open(dbp->dbenv, "rename", 1);
- goto err_close;
+ /* Discard the thread mutex. */
+ if (dbp->mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbp->mutexp);
+ dbp->mutexp = NULL;
}
- /* Validate arguments -- has same rules as remove. */
- if ((ret = __db_removechk(dbp, flags)) != 0)
- goto err_close;
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
+
+ /* For safety's sake; we may refresh twice. */
+ memset(&dbp->my_rskey, 0, sizeof(DBT));
+ memset(&dbp->my_rkey, 0, sizeof(DBT));
+ memset(&dbp->my_rdata, 0, sizeof(DBT));
/*
- * Subdatabases.
+ * Remove this DB handle from the DB_ENV's dblist, if it's been added.
*/
- if (subdb != NULL) {
- if (filename == NULL) {
- __db_err(dbenv,
- "multiple databases cannot be created in temporary files");
- goto err_close;
- }
- return (__db_subdb_rename(dbp, filename, subdb, newname));
- }
-
- if ((ret = dbp->open(dbp,
- filename, NULL, DB_UNKNOWN, DB_RDWRMASTER, 0)) != 0)
- goto err_close;
-
- if (LOGGING_ON(dbenv) && (ret = __log_file_lock(dbp)) != 0)
- goto err_close;
-
- if ((ret = dbp->sync(dbp, 0)) != 0)
- goto err_close;
-
- /* Start the transaction and log the rename. */
- if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
- goto err_close;
-
- if (LOGGING_ON(dbenv)) {
- memset(&namedbt, 0, sizeof(namedbt));
- namedbt.data = (char *)filename;
- namedbt.size = strlen(filename) + 1;
-
- memset(&newnamedbt, 0, sizeof(namedbt));
- newnamedbt.data = (char *)newname;
- newnamedbt.size = strlen(newname) + 1;
-
- if ((ret = __crdel_rename_log(dbenv, dbp->open_txn,
- &newlsn, 0, dbp->log_fileid, &namedbt, &newnamedbt)) != 0) {
- __db_err(dbenv, "%s: %s", filename, db_strerror(ret));
- goto err;
- }
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ if (dbp->dblistlinks.le_prev != NULL)
+ LIST_REMOVE(dbp, dblistlinks);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+ dbp->dblistlinks.le_prev = NULL;
- if ((ret = __log_filelist_update(dbenv, dbp,
- dbp->log_fileid, newname, NULL)) != 0)
- goto err;
+ /* Close the memory pool file handle. */
+ if (dbp->mpf != NULL) {
+ if ((t_ret = dbp->mpf->close(dbp->mpf,
+ F_ISSET(dbp, DB_AM_DISCARD) ? DB_MPOOL_DISCARD : 0)) != 0 &&
+ ret == 0)
+ ret = t_ret;
+ dbp->mpf = NULL;
}
- /* Find the real name of the file. */
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, filename, 0, NULL, &real_name)) != 0)
- goto err;
-
- /* Find the real newname of the file. */
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, newname, 0, NULL, &real_newname)) != 0)
- goto err;
+ if (LOGGING_ON(dbp->dbenv)) {
+ /*
+ * Discard the log file id, if any. We want to log the close
+ * if and only if this is not a recovery dbp.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ (void)__dbreg_revoke_id(dbp, 0);
+ else
+ (void)__dbreg_close_id(dbp, txn);
- /*
- * It is an error to rename a file over one that already exists,
- * as that wouldn't be transaction-safe.
- */
- if (__os_exists(real_newname, NULL) == 0) {
- ret = EEXIST;
- __db_err(dbenv, "rename: file %s exists", real_newname);
- goto err;
+ /* Discard the log FNAME. */
+ (void)__dbreg_teardown(dbp);
}
- DB_TEST_RECOVERY(dbp, DB_TEST_PRERENAME, ret, filename);
- if (dbp->db_am_rename != NULL &&
- (ret = dbp->db_am_rename(dbp, filename, subdb, newname)) != 0)
- goto err;
- /*
- * We have to flush the cache for a couple of reasons. First, the
- * underlying MPOOLFILE maintains a "name" that unrelated processes
- * can use to open the file in order to flush pages, and that name
- * is about to be wrong. Second, on Windows the unique file ID is
- * generated from the file's name, not other file information as is
- * the case on UNIX, and so a subsequent open of the old file name
- * could conceivably result in a matching "unique" file ID.
- */
- if ((ret = __memp_fremove(dbp->mpf)) != 0)
- goto err;
-
- /*
- * On Windows, the underlying file must be closed to perform a rename.
- * Nothing later in __db_rename requires that it be open, and the call
- * to dbp->close closes it anyway, so we just close it early.
- */
- if ((ret = memp_fclose(dbp->mpf)) != 0)
- goto err;
- dbp->mpf = NULL;
-
- ret = __os_rename(dbenv, real_name, real_newname);
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTRENAME, ret, newname);
-
-DB_TEST_RECOVERY_LABEL
-err: if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
- &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
- ret = t_ret;
-
-err_close:
- /* We no longer have an mpool, so syncing would be disastrous. */
- dbp->close(dbp, DB_NOSYNC);
- if (real_name != NULL)
- __os_freestr(real_name);
- if (real_newname != NULL)
- __os_freestr(real_newname);
-
- return (ret);
-}
-
-/*
- * __db_subdb_rename --
- * Rename a subdatabase.
- */
-static int
-__db_subdb_rename(dbp, name, subdb, newname)
- DB *dbp;
- const char *name, *subdb, *newname;
-{
- DB *mdbp;
- DBC *dbc;
- DB_ENV *dbenv;
- DB_LOCK remove_lock;
- int ret, t_ret;
-
- mdbp = NULL;
- dbc = NULL;
- dbenv = dbp->dbenv;
-
- /* Start the transaction. */
- if (TXN_ON(dbenv) && (ret = __db_metabegin(dbp, &remove_lock)) != 0)
- goto err_close;
-
- /*
- * Open the subdatabase. We can use the user's DB handle for this
- * purpose, I think.
- */
- if ((ret = __db_open(dbp, name, subdb, DB_UNKNOWN, 0, 0)) != 0)
- goto err;
-
- /*
- * Rename the entry in the main database.
- */
- if ((ret = __db_master_open(dbp, name, 0, 0, &mdbp)) != 0)
- goto err;
-
- if ((ret = __db_master_update(mdbp,
- subdb, dbp->type, NULL, MU_RENAME, newname, 0)) != 0)
- goto err;
-
-err: /*
- * End the transaction, committing the transaction if we were
- * successful, aborting otherwise.
- */
- if (dbp->open_txn != NULL && (t_ret = __db_metaend(dbp,
- &remove_lock, ret == 0, NULL, NULL)) != 0 && ret == 0)
- ret = t_ret;
-
-err_close:
- /*
- * Close the user's DB handle -- do this LAST to avoid smashing the
- * the transaction information.
- */
- if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
- ret = t_ret;
-
- if (mdbp != NULL && (t_ret = mdbp->close(mdbp, 0)) != 0 && ret == 0)
- ret = t_ret;
-
- return (ret);
-}
-
-/*
- * __db_metabegin --
- *
- * Begin a meta-data operation. This involves doing any required locking,
- * potentially beginning a transaction and then telling the caller if you
- * did or did not begin the transaction.
- *
- * The writing flag indicates if the caller is actually allowing creates
- * or doing deletes (i.e., if the caller is opening and not creating, then
- * we don't need to do any of this).
- * PUBLIC: int __db_metabegin __P((DB *, DB_LOCK *));
- */
-int
-__db_metabegin(dbp, lockp)
- DB *dbp;
- DB_LOCK *lockp;
-{
- DB_ENV *dbenv;
- DBT dbplock;
- u_int32_t locker, lockval;
- int ret;
-
- dbenv = dbp->dbenv;
-
- lockp->off = LOCK_INVALID;
+ /* Clear out fields that normally get set during open. */
+ memset(dbp->fileid, 0, sizeof(dbp->fileid));
+ dbp->adj_fileid = 0;
+ dbp->meta_pgno = 0;
+ dbp->cur_lid = DB_LOCK_INVALIDID;
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+ dbp->cl_id = 0;
/*
- * There is no single place where we can know that we are or are not
- * going to be creating any files and/or subdatabases, so we will
- * always begin a tranasaction when we start creating one. If we later
- * discover that this was unnecessary, we will abort the transaction.
- * Recovery is written so that if we log a file create, but then
- * discover that we didn't have to do it, we recover correctly. The
- * file recovery design document has details.
- *
- * We need to single thread all create and delete operations, so if we
- * are running with locking, we must obtain a lock. We use lock_id to
- * generate a unique locker id and use a handcrafted DBT as the object
- * on which we are locking.
+ * If we are being refreshed with a txn specified, then we need
+ * to make sure that we clear out the lock handle field, because
+ * releasing all the locks for this transaction will release this
+ * lock and we don't want close to stumble upon this handle and
+ * try to close it.
*/
- if (LOCKING_ON(dbenv)) {
- if ((ret = lock_id(dbenv, &locker)) != 0)
- return (ret);
- lockval = 0;
- dbplock.data = &lockval;
- dbplock.size = sizeof(lockval);
- if ((ret = lock_get(dbenv,
- locker, 0, &dbplock, DB_LOCK_WRITE, lockp)) != 0)
- return (ret);
- }
-
- return (txn_begin(dbenv, NULL, &dbp->open_txn, 0));
-}
-
-/*
- * __db_metaend --
- * End a meta-data operation.
- * PUBLIC: int __db_metaend __P((DB *,
- * PUBLIC: DB_LOCK *, int, int (*)(DB *, void *), void *));
- */
-int
-__db_metaend(dbp, lockp, commit, callback, cookie)
- DB *dbp;
- DB_LOCK *lockp;
- int commit, (*callback) __P((DB *, void *));
- void *cookie;
-{
- DB_ENV *dbenv;
- int ret, t_ret;
-
- ret = 0;
- dbenv = dbp->dbenv;
-
- /* End the transaction. */
- if (commit) {
- if ((ret = txn_commit(dbp->open_txn, DB_TXN_SYNC)) == 0) {
- /*
- * Unlink any underlying file, we've committed the
- * transaction.
- */
- if (callback != NULL)
- ret = callback(dbp, cookie);
- }
- } else if ((t_ret = txn_abort(dbp->open_txn)) && ret == 0)
- ret = t_ret;
+ if (txn != NULL)
+ LOCK_INIT(dbp->handle_lock);
- /* Release our lock. */
- if (lockp->off != LOCK_INVALID &&
- (t_ret = lock_put(dbenv, lockp)) != 0 && ret == 0)
- ret = t_ret;
+ F_CLR(dbp, DB_AM_DBM_ERROR);
+ F_CLR(dbp, DB_AM_DISCARD);
+ F_CLR(dbp, DB_AM_INMEM);
+ F_CLR(dbp, DB_AM_RECOVER);
+ F_CLR(dbp, DB_AM_OPEN_CALLED);
+ F_CLR(dbp, DB_AM_RDONLY);
+ F_CLR(dbp, DB_AM_SWAP);
return (ret);
}
/*
* __db_log_page
- * Log a meta-data or root page during a create operation.
+ * Log a meta-data or root page during a subdatabase create operation.
*
- * PUBLIC: int __db_log_page __P((DB *,
- * PUBLIC: const char *, DB_LSN *, db_pgno_t, PAGE *));
+ * PUBLIC: int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *));
*/
int
-__db_log_page(dbp, name, lsn, pgno, page)
+__db_log_page(dbp, txn, lsn, pgno, page)
DB *dbp;
- const char *name;
+ DB_TXN *txn;
DB_LSN *lsn;
db_pgno_t pgno;
PAGE *page;
{
- DBT name_dbt, page_dbt;
+ DBT page_dbt;
DB_LSN new_lsn;
int ret;
- if (dbp->open_txn == NULL)
+ if (!LOGGING_ON(dbp->dbenv) || txn == NULL)
return (0);
memset(&page_dbt, 0, sizeof(page_dbt));
page_dbt.size = dbp->pgsize;
page_dbt.data = page;
- if (pgno == PGNO_BASE_MD) {
- /*
- * !!!
- * Make sure that we properly handle a null name. The old
- * Tcl sent us pathnames of the form ""; it may be the case
- * that the new Tcl doesn't do that, so we can get rid of
- * the second check here.
- */
- memset(&name_dbt, 0, sizeof(name_dbt));
- name_dbt.data = (char *)name;
- if (name == NULL || *name == '\0')
- name_dbt.size = 0;
- else
- name_dbt.size = strlen(name) + 1;
- ret = __crdel_metapage_log(dbp->dbenv,
- dbp->open_txn, &new_lsn, DB_FLUSH,
- dbp->log_fileid, &name_dbt, pgno, &page_dbt);
- } else
- ret = __crdel_metasub_log(dbp->dbenv, dbp->open_txn,
- &new_lsn, 0, dbp->log_fileid, pgno, &page_dbt, lsn);
+ ret = __crdel_metasub_log(dbp, txn, &new_lsn, 0, pgno, &page_dbt, lsn);
if (ret == 0)
page->lsn = new_lsn;
@@ -2041,50 +946,89 @@ __db_log_page(dbp, name, lsn, pgno, page)
* Create the backup file name for a given file.
*
* PUBLIC: int __db_backup_name __P((DB_ENV *,
- * PUBLIC: const char *, char **, DB_LSN *));
+ * PUBLIC: const char *, DB_TXN *, char **));
*/
#undef BACKUP_PREFIX
#define BACKUP_PREFIX "__db."
#undef MAX_LSN_TO_TEXT
-#define MAX_LSN_TO_TEXT 21
+#define MAX_LSN_TO_TEXT 17
+
int
-__db_backup_name(dbenv, name, backup, lsn)
+__db_backup_name(dbenv, name, txn, backup)
DB_ENV *dbenv;
const char *name;
+ DB_TXN *txn;
char **backup;
- DB_LSN *lsn;
{
+ DB_LSN lsn;
size_t len;
int plen, ret;
char *p, *retp;
- len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 1;
-
- if ((ret = __os_malloc(dbenv, len, NULL, &retp)) != 0)
- return (ret);
-
/*
- * Create the name. Backup file names are of the form:
+ * Create the name. Backup file names are in one of two forms:
*
- * __db.name.0x[lsn-file].0x[lsn-offset]
+ * In a transactional env: __db.LSN(8).LSN(8)
+ * and
+ * in a non-transactional env: __db.FILENAME.
*
- * which guarantees uniqueness.
+ * If the transaction doesn't have a current LSN, we write
+ * a dummy log record to force it, so that we ensure that
+ * all tmp names are unique.
*
- * However, name may contain an env-relative path in it.
- * In that case, put the __db. after the last portion of
- * the pathname.
+ * In addition, the name passed may contain an env-relative path.
+ * In that case, put the __db. in the right place (in the last
+ * component of the pathname).
*/
- if ((p = __db_rpath(name)) == NULL)
- snprintf(retp, len,
- "%s%s.0x%x0x%x", BACKUP_PREFIX, name,
- lsn->file, lsn->offset);
- else {
- plen = p - name + 1;
+ if (txn != NULL) {
+ if (IS_ZERO_LSN(txn->last_lsn)) {
+ /*
+ * Write dummy log record. The two choices for
+ * dummy log records are __db_noop_log and
+ * __db_debug_log; unfortunately __db_noop_log requires
+ * a valid dbp, and we aren't guaranteed to be able
+ * to pass one in here.
+ */
+ if ((ret = __db_debug_log(dbenv, txn, &lsn, 0,
+ NULL, 0, NULL, NULL, 0)) != 0)
+ return (ret);
+ } else
+ lsn = txn->last_lsn;
+ }
+
+ /*
+ * Part of the name may be a full path, so we need to make sure that
+ * we allocate enough space for it, even in the case where we don't
+ * use the entire filename for the backup name.
+ */
+ len = strlen(name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT;
+
+ if ((ret = __os_malloc(dbenv, len, &retp)) != 0)
+ return (ret);
+
+ /*
+ * There are four cases here:
+ * 1. simple path w/out transaction
+ * 2. simple path + transaction
+ * 3. multi-component path w/out transaction
+ * 4. multi-component path + transaction
+ */
+ if ((p = __db_rpath(name)) == NULL) {
+ if (txn == NULL) /* case 1 */
+ snprintf(retp, len, "%s%s.", BACKUP_PREFIX, name);
+ else /* case 2 */
+ snprintf(retp, len,
+ "%s%x.%x", BACKUP_PREFIX, lsn.file, lsn.offset);
+ } else {
+ plen = (int)(p - name) + 1;
p++;
- snprintf(retp, len,
- "%.*s%s%s.0x%x0x%x", plen, name, BACKUP_PREFIX, p,
- lsn->file, lsn->offset);
+ if (txn == NULL) /* case 3 */
+ snprintf(retp, len,
+ "%.*s%s%s.", plen, name, BACKUP_PREFIX, p);
+ else /* case 4 */
+ snprintf(retp, len,
+ "%.*s%x.%x.", plen, name, lsn.file, lsn.offset);
}
*backup = retp;
@@ -2092,19 +1036,6 @@ __db_backup_name(dbenv, name, backup, lsn)
}
/*
- * __db_remove_callback --
- * Callback function -- on file remove commit, it unlinks the backing
- * file.
- */
-static int
-__db_remove_callback(dbp, cookie)
- DB *dbp;
- void *cookie;
-{
- return (__os_unlink(dbp->dbenv, cookie));
-}
-
-/*
* __dblist_get --
* Get the first element of dbenv->dblist with
* dbp->adj_fileid matching adjid.
@@ -2126,22 +1057,73 @@ __dblist_get(dbenv, adjid)
return (dbp);
}
-#if CONFIG_TEST
+/*
+ * __db_disassociate --
+ * Destroy the association between a given secondary and its primary.
+ */
+static int
+__db_disassociate(sdbp)
+ DB *sdbp;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+
+ sdbp->s_callback = NULL;
+ sdbp->s_primary = NULL;
+ sdbp->get = sdbp->stored_get;
+ sdbp->close = sdbp->stored_close;
+
+ /*
+ * Complain, but proceed, if we have any active cursors. (We're in
+ * the middle of a close, so there's really no turning back.)
+ */
+ if (sdbp->s_refcnt != 1 ||
+ TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(sdbp->dbenv,
+ "Closing a primary DB while a secondary DB has active cursors is unsafe");
+ ret = EINVAL;
+ }
+ sdbp->s_refcnt = 0;
+
+ while ((dbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((t_ret = __db_c_destroy(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ F_CLR(sdbp, DB_AM_SECONDARY);
+ return (ret);
+}
+
+#if CONFIG_TEST
/*
* __db_testcopy
* Create a copy of all backup files and our "main" DB.
*
- * PUBLIC: int __db_testcopy __P((DB *, const char *));
+ * PUBLIC: #if CONFIG_TEST
+ * PUBLIC: int __db_testcopy __P((DB_ENV *, DB *, const char *));
+ * PUBLIC: #endif
*/
int
-__db_testcopy(dbp, name)
+__db_testcopy(dbenv, dbp, name)
+ DB_ENV *dbenv;
DB *dbp;
const char *name;
{
- if (dbp->type == DB_QUEUE)
+ DB_MPOOLFILE *mpf;
+
+ DB_ASSERT(dbp != NULL || name != NULL);
+
+ if (name == NULL) {
+ mpf = dbp->mpf;
+ name = R_ADDR(mpf->dbmp->reginfo, mpf->mfp->path_off);
+ }
+
+ if (dbp != NULL && dbp->type == DB_QUEUE)
return (__qam_testdocopy(dbp, name));
else
- return (__db_testdocopy(dbp, name));
+ return (__db_testdocopy(dbenv, name));
}
static int
@@ -2154,7 +1136,7 @@ __qam_testdocopy(dbp, name)
int ret;
filelist = NULL;
- if ((ret = __db_testdocopy(dbp, name)) != 0)
+ if ((ret = __db_testdocopy(dbp->dbenv, name)) != 0)
return (ret);
if (dbp->mpf != NULL &&
(ret = __qam_gen_filelist(dbp, &filelist)) != 0)
@@ -2164,12 +1146,13 @@ __qam_testdocopy(dbp, name)
return (0);
dir = ((QUEUE *)dbp->q_internal)->dir;
for (fp = filelist; fp->mpf != NULL; fp++) {
- snprintf(buf, sizeof(buf), QUEUE_EXTENT, dir, name, fp->id);
- if ((ret = __db_testdocopy(dbp, buf)) != 0)
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ if ((ret = __db_testdocopy(dbp->dbenv, buf)) != 0)
return (ret);
}
- __os_free(filelist, 0);
+ __os_free(dbp->dbenv, filelist);
return (0);
}
@@ -2179,8 +1162,8 @@ __qam_testdocopy(dbp, name)
*
*/
static int
-__db_testdocopy(dbp, name)
- DB *dbp;
+__db_testdocopy(dbenv, name)
+ DB_ENV *dbenv;
const char *name;
{
size_t len;
@@ -2188,8 +1171,8 @@ __db_testdocopy(dbp, name)
char **namesp, *backup, *copy, *dir, *p, *real_name;
real_name = NULL;
/* Get the real backing file name. */
- if ((ret = __db_appname(dbp->dbenv,
- DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
return (ret);
copy = backup = NULL;
@@ -2200,10 +1183,10 @@ __db_testdocopy(dbp, name)
*/
len = strlen(real_name) + strlen(BACKUP_PREFIX) + MAX_LSN_TO_TEXT + 9;
- if ((ret = __os_malloc(dbp->dbenv, len, NULL, &copy)) != 0)
+ if ((ret = __os_malloc(dbenv, len, &copy)) != 0)
goto out;
- if ((ret = __os_malloc(dbp->dbenv, len, NULL, &backup)) != 0)
+ if ((ret = __os_malloc(dbenv, len, &backup)) != 0)
goto out;
/*
@@ -2212,9 +1195,9 @@ __db_testdocopy(dbp, name)
snprintf(copy, len, "%s.afterop", real_name);
__db_makecopy(real_name, copy);
- if ((ret = __os_strdup(dbp->dbenv, real_name, &dir)) != 0)
+ if ((ret = __os_strdup(dbenv, real_name, &dir)) != 0)
goto out;
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
real_name = NULL;
/*
* Create the name. Backup file names are of the form:
@@ -2234,7 +1217,7 @@ __db_testdocopy(dbp, name)
p = __db_rpath(dir);
if (p != NULL)
*p = '\0';
- ret = __os_dirlist(dbp->dbenv, dir, &namesp, &dircnt);
+ ret = __os_dirlist(dbenv, dir, &namesp, &dircnt);
#if DIAGNOSTIC
/*
* XXX
@@ -2245,7 +1228,7 @@ __db_testdocopy(dbp, name)
*/
*p = '/';
#endif
- __os_freestr(dir);
+ __os_free(dbenv, dir);
if (ret != 0)
goto out;
for (i = 0; i < dircnt; i++) {
@@ -2258,8 +1241,8 @@ __db_testdocopy(dbp, name)
* know its LSN's.
*/
if (strncmp(namesp[i], backup, strlen(backup)) == 0) {
- if ((ret = __db_appname(dbp->dbenv, DB_APP_DATA,
- NULL, namesp[i], 0, NULL, &real_name)) != 0)
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ namesp[i], 0, NULL, &real_name)) != 0)
goto out;
/*
@@ -2268,25 +1251,25 @@ __db_testdocopy(dbp, name)
* If so, just move on.
*/
if (strstr(real_name, ".afterop") != NULL) {
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
real_name = NULL;
continue;
}
snprintf(copy, len, "%s.afterop", real_name);
__db_makecopy(real_name, copy);
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
real_name = NULL;
}
}
out:
if (backup != NULL)
- __os_freestr(backup);
+ __os_free(dbenv, backup);
if (copy != NULL)
- __os_freestr(copy);
+ __os_free(dbenv, copy);
if (namesp != NULL)
- __os_dirfree(namesp, dircnt);
+ __os_dirfree(dbenv, namesp, dircnt);
if (real_name != NULL)
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
return (ret);
}
@@ -2301,7 +1284,7 @@ __db_makecopy(src, dest)
memset(&rfh, 0, sizeof(rfh));
memset(&wfh, 0, sizeof(wfh));
- if (__os_malloc(NULL, 1024, NULL, &buf) != 0)
+ if (__os_malloc(NULL, 1024, &buf) != 0)
return;
if (__os_open(NULL,
@@ -2313,13 +1296,13 @@ __db_makecopy(src, dest)
for (;;)
if (__os_read(NULL, &rfh, buf, 1024, &rcnt) < 0 || rcnt == 0 ||
- __os_write(NULL, &wfh, buf, rcnt, &wcnt) < 0 || wcnt != rcnt)
+ __os_write(NULL, &wfh, buf, rcnt, &wcnt) < 0)
break;
-err: __os_free(buf, 1024);
+err: __os_free(NULL, buf);
if (F_ISSET(&rfh, DB_FH_VALID))
- __os_closehandle(&rfh);
+ __os_closehandle(NULL, &rfh);
if (F_ISSET(&wfh, DB_FH_VALID))
- __os_closehandle(&wfh);
+ __os_closehandle(NULL, &wfh);
}
#endif
diff --git a/bdb/db/db.src b/bdb/db/db.src
index b695e1360c5..414321fcbbd 100644
--- a/bdb/db/db.src
+++ b/bdb/db/db.src
@@ -1,13 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db.src,v 11.8 2000/02/17 20:24:07 bostic Exp $
+ * $Id: db.src,v 11.18 2002/04/17 19:02:58 krinsky Exp $
*/
-PREFIX db
+PREFIX __db
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -15,15 +16,17 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
@@ -44,33 +47,16 @@ INCLUDE
*/
BEGIN addrem 41
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
ARG indx u_int32_t lu
-ARG nbytes size_t lu
-DBT hdr DBT s
+ARG nbytes u_int32_t lu
+PGDBT hdr DBT s
DBT dbt DBT s
POINTER pagelsn DB_LSN * lu
END
/*
- * split -- Handles the split of a duplicate page.
- *
- * opcode: defines whether we are splitting from or splitting onto
- * fileid: file identifier of the file being modified.
- * pgno: page number being split.
- * pageimage: entire page contents.
- * pagelsn: former lsn of the page.
- */
-DEPRECATED split 42
-ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-DBT pageimage DBT s
-POINTER pagelsn DB_LSN * lu
-END
-
-/*
* big -- Handles addition and deletion of big key/data items.
*
* opcode: identifies get/put.
@@ -87,10 +73,10 @@ END
*/
BEGIN big 43
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-ARG prev_pgno db_pgno_t lu
-ARG next_pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+WRLOCKNZ prev_pgno db_pgno_t lu
+WRLOCKNZ next_pgno db_pgno_t lu
DBT dbt DBT s
POINTER pagelsn DB_LSN * lu
POINTER prevlsn DB_LSN * lu
@@ -106,8 +92,8 @@ END
* lsn: the page's original lsn.
*/
BEGIN ovref 44
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
ARG adjust int32_t ld
POINTER lsn DB_LSN * lu
END
@@ -125,33 +111,16 @@ END
*/
BEGIN relink 45
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER lsn DB_LSN * lu
-ARG prev db_pgno_t lu
+WRLOCKNZ prev db_pgno_t lu
POINTER lsn_prev DB_LSN * lu
-ARG next db_pgno_t lu
+WRLOCKNZ next db_pgno_t lu
POINTER lsn_next DB_LSN * lu
END
/*
- * Addpage -- Handles adding a new duplicate page onto the end of
- * an existing duplicate page.
- * fileid: identifies the file being changed.
- * pgno: page number to which a new page is being added.
- * lsn: lsn of pgno
- * nextpgno: new page number being added.
- * nextlsn: lsn of nextpgno;
- */
-DEPRECATED addpage 46
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-POINTER lsn DB_LSN * lu
-ARG nextpgno db_pgno_t lu
-POINTER nextlsn DB_LSN * lu
-END
-
-/*
* Debug -- log an operation upon entering an access method.
* op: Operation (cursor, c_close, c_get, c_put, c_del,
* get, put, delete).
@@ -172,7 +141,55 @@ END
* noop -- do nothing, but get an LSN.
*/
BEGIN noop 48
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER prevlsn DB_LSN * lu
END
+
+/*
+ * pg_alloc: used to record allocating a new page.
+ *
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno the meta-data page number.
+ * page_lsn: the allocated page's original lsn.
+ * pgno: the page allocated.
+ * ptype: the type of the page allocated.
+ * next: the next page on the free list.
+ */
+BEGIN pg_alloc 49
+DB fileid int32_t ld
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+POINTER page_lsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
+ARG ptype u_int32_t lu
+ARG next db_pgno_t lu
+END
+
+/*
+ * pg_free: used to record freeing a page.
+ *
+ * pgno: the page being freed.
+ * meta_lsn: the meta-data page's original lsn.
+ * meta_pgno: the meta-data page number.
+ * header: the header from the free'd page.
+ * next: the previous next pointer on the metadata page.
+ */
+BEGIN pg_free 50
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
+POINTER meta_lsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
+PGDBT header DBT s
+ARG next db_pgno_t lu
+END
+
+/*
+ * cksum --
+ * This log record is written when we're unable to checksum a page,
+ * before returning DB_RUNRECOVERY. This log record causes normal
+ * recovery to itself return DB_RUNRECOVERY, as only catastrophic
+ * recovery can fix things.
+ */
+BEGIN cksum 51
+END
diff --git a/bdb/db/db_am.c b/bdb/db/db_am.c
index 2d224566904..cf6ef18549b 100644
--- a/bdb/db/db_am.c
+++ b/bdb/db/db_am.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_am.c,v 11.42 2001/01/11 18:19:50 bostic Exp $";
+static const char revid[] = "$Id: db_am.c,v 11.96 2002/08/27 15:17:32 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,16 +18,22 @@ static const char revid[] = "$Id: db_am.c,v 11.42 2001/01/11 18:19:50 bostic Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "lock.h"
-#include "mp.h"
-#include "txn.h"
-#include "db_am.h"
-#include "db_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+
+static int __db_append_primary __P((DBC *, DBT *, DBT *));
+static int __db_secondary_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+static int __db_secondary_close __P((DB *, u_int32_t));
+
+#ifdef DEBUG
+static int __db_cprint_item __P((DBC *));
+#endif
/*
* __db_cursor --
@@ -53,12 +59,22 @@ __db_cursor(dbp, txn, dbcp, flags)
PANIC_CHECK(dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->cursor");
- /* Check for invalid flags. */
- if ((ret = __db_cursorchk(dbp, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
+ /* Validate arguments. */
+ if ((ret = __db_cursorchk(dbp, flags)) != 0)
return (ret);
- if ((ret =
- __db_icursor(dbp, txn, dbp->type, PGNO_INVALID, 0, dbcp)) != 0)
+ /*
+ * Check for consistent transaction usage. For now, assume that
+ * this cursor might be used for read operations only (in which
+ * case it may not require a txn). We'll check more stringently
+ * in c_del and c_put. (Note that this all means that the
+ * read-op txn tests have to be a subset of the write-op ones.)
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, DB_LOCK_INVALIDID, dbcp)) != 0)
return (ret);
dbc = *dbcp;
@@ -70,7 +86,7 @@ __db_cursor(dbp, txn, dbcp, flags)
op = LF_ISSET(DB_OPFLAGS_MASK);
mode = (op == DB_WRITELOCK) ? DB_LOCK_WRITE :
((op == DB_WRITECURSOR) ? DB_LOCK_IWRITE : DB_LOCK_READ);
- if ((ret = lock_get(dbenv, dbc->locker, 0,
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker, 0,
&dbc->lock_dbt, mode, &dbc->mylock)) != 0) {
(void)__db_c_close(dbc);
return (ret);
@@ -81,6 +97,9 @@ __db_cursor(dbp, txn, dbcp, flags)
F_SET(dbc, DBC_WRITER);
}
+ if (LF_ISSET(DB_DIRTY_READ) ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
return (0);
}
@@ -91,15 +110,16 @@ __db_cursor(dbp, txn, dbcp, flags)
* initialize as a cursor.
*
* PUBLIC: int __db_icursor
- * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, DBC **));
+ * PUBLIC: __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **));
*/
int
-__db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
+__db_icursor(dbp, txn, dbtype, root, is_opd, lockerid, dbcp)
DB *dbp;
DB_TXN *txn;
DBTYPE dbtype;
db_pgno_t root;
int is_opd;
+ u_int32_t lockerid;
DBC **dbcp;
{
DBC *dbc, *adbc;
@@ -120,7 +140,7 @@ __db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
if (dbtype == dbc->dbtype) {
TAILQ_REMOVE(&dbp->free_queue, dbc, links);
- dbc->flags = 0;
+ F_CLR(dbc, ~DBC_OWN_LID);
break;
}
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
@@ -144,11 +164,35 @@ __db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
if (!DB_IS_THREADED(dbp) &&
(adbc = TAILQ_FIRST(&dbp->active_queue)) != NULL)
dbc->lid = adbc->lid;
- else
- if ((ret = lock_id(dbenv, &dbc->lid)) != 0)
+ else {
+ if ((ret =
+ dbenv->lock_id(dbenv, &dbc->lid)) != 0)
goto err;
+ F_SET(dbc, DBC_OWN_LID);
+ }
+
+ /*
+ * In CDB, secondary indices should share a lock file
+ * ID with the primary; otherwise we're susceptible to
+ * deadlocks. We also use __db_icursor rather
+ * than sdbp->cursor to create secondary update
+ * cursors in c_put and c_del; these won't
+ * acquire a new lock.
+ *
+ * !!!
+ * Since this is in the one-time cursor allocation
+ * code, we need to be sure to destroy, not just
+ * close, all cursors in the secondary when we
+ * associate.
+ */
+ if (CDB_LOCKING(dbp->dbenv) &&
+ F_ISSET(dbp, DB_AM_SECONDARY))
+ memcpy(dbc->lock.fileid,
+ dbp->s_primary->fileid, DB_FILE_ID_LEN);
+ else
+ memcpy(dbc->lock.fileid,
+ dbp->fileid, DB_FILE_ID_LEN);
- memcpy(dbc->lock.fileid, dbp->fileid, DB_FILE_ID_LEN);
if (CDB_LOCKING(dbenv)) {
if (F_ISSET(dbenv, DB_ENV_CDB_ALLDB)) {
/*
@@ -198,18 +242,55 @@ __db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
/* Refresh the DBC structure. */
dbc->dbtype = dbtype;
+ RESET_RET_MEM(dbc);
- if ((dbc->txn = txn) == NULL)
- dbc->locker = dbc->lid;
- else {
+ if ((dbc->txn = txn) == NULL) {
+ /*
+ * There are certain cases in which we want to create a
+ * new cursor with a particular locker ID that is known
+ * to be the same as (and thus not conflict with) an
+ * open cursor.
+ *
+ * The most obvious case is cursor duplication; when we
+ * call DBC->c_dup or __db_c_idup, we want to use the original
+ * cursor's locker ID.
+ *
+ * Another case is when updating secondary indices. Standard
+ * CDB locking would mean that we might block ourself: we need
+ * to open an update cursor in the secondary while an update
+ * cursor in the primary is open, and when the secondary and
+ * primary are subdatabases or we're using env-wide locking,
+ * this is disastrous.
+ *
+ * In these cases, our caller will pass a nonzero locker ID
+ * into this function. Use this locker ID instead of dbc->lid
+ * as the locker ID for our new cursor.
+ */
+ if (lockerid != DB_LOCK_INVALIDID)
+ dbc->locker = lockerid;
+ else
+ dbc->locker = dbc->lid;
+ } else {
dbc->locker = txn->txnid;
txn->cursors++;
}
+ /*
+ * These fields change when we are used as a secondary index, so
+ * if the DB is a secondary, make sure they're set properly just
+ * in case we opened some cursors before we were associated.
+ *
+ * __db_c_get is used by all access methods, so this should be safe.
+ */
+ if (F_ISSET(dbp, DB_AM_SECONDARY))
+ dbc->c_get = __db_c_secondary_get;
+
if (is_opd)
F_SET(dbc, DBC_OPD);
if (F_ISSET(dbp, DB_AM_RECOVER))
F_SET(dbc, DBC_RECOVER);
+ if (F_ISSET(dbp, DB_AM_COMPENSATE))
+ F_SET(dbc, DBC_COMPENSATE);
/* Refresh the DBC internal structure. */
cp = dbc->internal;
@@ -243,14 +324,14 @@ __db_icursor(dbp, txn, dbtype, root, is_opd, dbcp)
return (0);
err: if (allocated)
- __os_free(dbc, sizeof(*dbc));
+ __os_free(dbp->dbenv, dbc);
return (ret);
}
#ifdef DEBUG
/*
* __db_cprint --
- * Display the current cursor list.
+ * Display the cursor active and free queues.
*
* PUBLIC: int __db_cprint __P((DB *));
*/
@@ -258,60 +339,76 @@ int
__db_cprint(dbp)
DB *dbp;
{
+ DBC *dbc;
+ int ret, t_ret;
+
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ fprintf(stderr, "Active queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->active_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ fprintf(stderr, "Free queue:\n");
+ for (dbc = TAILQ_FIRST(&dbp->free_queue);
+ dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
+ if ((t_ret = __db_cprint_item(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+
+ return (ret);
+}
+
+static
+int __db_cprint_item(dbc)
+ DBC *dbc;
+{
static const FN fn[] = {
{ DBC_ACTIVE, "active" },
+ { DBC_COMPENSATE, "compensate" },
{ DBC_OPD, "off-page-dup" },
{ DBC_RECOVER, "recover" },
{ DBC_RMW, "read-modify-write" },
+ { DBC_TRANSIENT, "transient" },
{ DBC_WRITECURSOR, "write cursor" },
{ DBC_WRITEDUP, "internally dup'ed write cursor" },
{ DBC_WRITER, "short-term write cursor" },
{ 0, NULL }
};
- DBC *dbc;
+ DB *dbp;
DBC_INTERNAL *cp;
- char *s;
+ const char *s;
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
- for (dbc = TAILQ_FIRST(&dbp->active_queue);
- dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
- switch (dbc->dbtype) {
- case DB_BTREE:
- s = "btree";
- break;
- case DB_HASH:
- s = "hash";
- break;
- case DB_RECNO:
- s = "recno";
- break;
- case DB_QUEUE:
- s = "queue";
- break;
- default:
- DB_ASSERT(0);
- return (1);
- }
- cp = dbc->internal;
- fprintf(stderr, "%s/%#0lx: opd: %#0lx\n",
- s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd));
- fprintf(stderr, "\ttxn: %#0lx lid: %lu locker: %lu\n",
- P_TO_ULONG(dbc->txn),
- (u_long)dbc->lid, (u_long)dbc->locker);
- fprintf(stderr, "\troot: %lu page/index: %lu/%lu",
- (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx);
- __db_prflags(dbc->flags, fn, stderr);
- fprintf(stderr, "\n");
-
- if (dbp->type == DB_BTREE)
- __bam_cprint(dbc);
+ dbp = dbc->dbp;
+ cp = dbc->internal;
+
+ s = __db_dbtype_to_string(dbc->dbtype);
+ if (strcmp(s, "UNKNOWN TYPE") == 0) {
+ DB_ASSERT(0);
+ return (1);
}
- for (dbc = TAILQ_FIRST(&dbp->free_queue);
- dbc != NULL; dbc = TAILQ_NEXT(dbc, links))
- fprintf(stderr, "free: %#0lx ", P_TO_ULONG(dbc));
+ fprintf(stderr, "%s/%#0lx: opd: %#0lx\n",
+ s, P_TO_ULONG(dbc), P_TO_ULONG(cp->opd));
+
+ fprintf(stderr, "\ttxn: %#0lx lid: %lu locker: %lu\n",
+ P_TO_ULONG(dbc->txn), (u_long)dbc->lid, (u_long)dbc->locker);
+
+ fprintf(stderr, "\troot: %lu page/index: %lu/%lu",
+ (u_long)cp->root, (u_long)cp->pgno, (u_long)cp->indx);
+
+ __db_prflags(dbc->flags, fn, stderr);
fprintf(stderr, "\n");
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ switch (dbp->type) {
+ case DB_BTREE:
+ __bam_cprint(dbc);
+ break;
+ case DB_HASH:
+ __ham_cprint(dbc);
+ break;
+ default:
+ break;
+ }
return (0);
}
#endif /* DEBUG */
@@ -345,7 +442,7 @@ __db_fd(dbp, fdp)
return (0);
} else {
*fdp = -1;
- __db_err(dbp->dbenv, "DB does not have a valid file handle.");
+ __db_err(dbp->dbenv, "DB does not have a valid file handle");
return (ENOENT);
}
}
@@ -372,8 +469,16 @@ __db_get(dbp, txn, key, data, flags)
if ((ret = __db_getchk(dbp, key, data, flags)) != 0)
return (ret);
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 1)) != 0)
+ return (ret);
+
mode = 0;
- if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ mode = DB_DIRTY_READ;
+ LF_CLR(DB_DIRTY_READ);
+ }
+ else if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
mode = DB_WRITELOCK;
if ((ret = dbp->cursor(dbp, txn, &dbc, mode)) != 0)
return (ret);
@@ -387,11 +492,17 @@ __db_get(dbp, txn, key, data, flags)
* going to close it right away. Thus, we can perform the get
* without duplicating the cursor, saving some cycles in this
* common case.
+ *
+ * SET_RET_MEM indicates that if key and/or data have no DBT
+ * flags set and DB manages the returned-data memory, that memory
+ * will belong to this handle, not to the underlying cursor.
*/
F_SET(dbc, DBC_TRANSIENT);
+ SET_RET_MEM(dbc, dbp);
- ret = dbc->c_get(dbc, key, data,
- flags == 0 || flags == DB_RMW ? flags | DB_SET : flags);
+ if (LF_ISSET(~(DB_RMW | DB_MULTIPLE)) == 0)
+ LF_SET(DB_SET);
+ ret = dbc->c_get(dbc, key, data, flags);
if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
@@ -414,20 +525,39 @@ __db_put(dbp, txn, key, data, flags)
{
DBC *dbc;
DBT tdata;
- int ret, t_ret;
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
- PANIC_CHECK(dbp->dbenv);
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->put");
+ /* Validate arguments. */
if ((ret = __db_putchk(dbp, key, data,
- flags, F_ISSET(dbp, DB_AM_RDONLY),
- F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) != 0)
+ flags, F_ISSET(dbp, DB_AM_DUP) || F_ISSET(key, DB_DBT_DUPOK))) != 0)
return (ret);
- DB_CHECK_TXN(dbp, txn);
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_put", key, data, flags);
+
+ SET_RET_MEM(dbc, dbp);
/*
* See the comment in __db_get().
@@ -441,9 +571,58 @@ __db_put(dbp, txn, key, data, flags)
*/
F_SET(dbc, DBC_TRANSIENT);
- DEBUG_LWRITE(dbc, txn, "__db_put", key, data, flags);
+ switch (flags) {
+ case DB_APPEND:
+ /*
+ * If there is an append callback, the value stored in
+ * data->data may be replaced and then freed. To avoid
+ * passing a freed pointer back to the user, just operate
+ * on a copy of the data DBT.
+ */
+ tdata = *data;
- if (flags == DB_NOOVERWRITE) {
+ /*
+ * Append isn't a normal put operation; call the appropriate
+ * access method's append function.
+ */
+ switch (dbp->type) {
+ case DB_QUEUE:
+ if ((ret = __qam_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ case DB_RECNO:
+ if ((ret = __ram_append(dbc, key, &tdata)) != 0)
+ goto err;
+ break;
+ default:
+ /* The interface should prevent this. */
+ DB_ASSERT(0);
+ ret = __db_ferr(dbenv, "__db_put", flags);
+ goto err;
+ }
+
+ /*
+ * Secondary indices: since we've returned zero from
+ * an append function, we've just put a record, and done
+ * so outside __db_c_put. We know we're not a secondary--
+ * the interface prevents puts on them--but we may be a
+ * primary. If so, update our secondary indices
+ * appropriately.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL)
+ ret = __db_append_primary(dbc, key, &tdata);
+
+ /*
+ * The append callback, if one exists, may have allocated
+ * a new tdata.data buffer. If so, free it.
+ */
+ FREE_IF_NEEDED(dbp, &tdata);
+
+ /* No need for a cursor put; we're done. */
+ goto err;
+ case DB_NOOVERWRITE:
flags = 0;
/*
* Set DB_DBT_USERMEM, this might be a threaded application and
@@ -460,16 +639,161 @@ __db_put(dbp, txn, key, data, flags)
if ((ret = dbc->c_get(dbc, key, &tdata,
DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0))) == 0)
ret = DB_KEYEXIST;
- else if (ret == DB_NOTFOUND)
+ else if (ret == DB_NOTFOUND || ret == DB_KEYEMPTY)
ret = 0;
+ break;
+ default:
+ /* Fall through to normal cursor put. */
+ break;
}
if (ret == 0)
ret = dbc->c_put(dbc,
- key, data, flags == 0 ? DB_KEYLAST : flags);
+ key, data, flags == 0 ? DB_KEYLAST : flags);
- if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+err: /* Close the cursor. */
+ if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_delete --
+ * Delete the items referenced by a key.
+ *
+ * PUBLIC: int __db_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ */
+int
+__db_delete(dbp, txn, key, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *key;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ DBT data, lkey;
+ DB_ENV *dbenv;
+ u_int32_t f_init, f_next;
+ int ret, t_ret, txn_local;
+
+ dbc = NULL;
+ dbenv = dbp->dbenv;
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
+
+ /* Check for invalid flags. */
+ if ((ret = __db_delchk(dbp, key, flags)) != 0)
+ return (ret);
+
+ /* Create local transaction as necessary. */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ LF_CLR(DB_AUTO_COMMIT);
+ }
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Allocate a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ goto err;
+
+ DEBUG_LWRITE(dbc, txn, "db_delete", key, NULL, flags);
+
+ /*
+ * Walk a cursor through the key/data pairs, deleting as we go. Set
+ * the DB_DBT_USERMEM flag, as this might be a threaded application
+ * and the flags checking will catch us. We don't actually want the
+ * keys or data, so request a partial of length 0.
+ */
+ memset(&lkey, 0, sizeof(lkey));
+ F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ /*
+ * If locking (and we haven't already acquired CDB locks), set the
+ * read-modify-write flag.
+ */
+ f_init = DB_SET;
+ f_next = DB_NEXT_DUP;
+ if (STD_LOCKING(dbc)) {
+ f_init |= DB_RMW;
+ f_next |= DB_RMW;
+ }
+
+ /* Walk through the set of key/data pairs, deleting as we go. */
+ if ((ret = dbc->c_get(dbc, key, &data, f_init)) != 0)
+ goto err;
+
+ /*
+ * Hash permits an optimization in DB->del: since on-page
+ * duplicates are stored in a single HKEYDATA structure, it's
+ * possible to delete an entire set of them at once, and as
+ * the HKEYDATA has to be rebuilt and re-put each time it
+ * changes, this is much faster than deleting the duplicates
+ * one by one. Thus, if we're not pointing at an off-page
+ * duplicate set, and we're not using secondary indices (in
+ * which case we'd have to examine the items one by one anyway),
+ * let hash do this "quick delete".
+ *
+ * !!!
+ * Note that this is the only application-executed delete call in
+ * Berkeley DB that does not go through the __db_c_del function.
+ * If anything other than the delete itself (like a secondary index
+ * update) has to happen there in a particular situation, the
+ * conditions here should be modified not to call __ham_quick_delete.
+ * The ordinary AM-independent alternative will work just fine with
+ * a hash; it'll just be slower.
+ */
+ if (dbp->type == DB_HASH) {
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL &&
+ !F_ISSET(dbp, DB_AM_SECONDARY) &&
+ dbc->internal->opd == NULL) {
+ ret = __ham_quick_delete(dbc);
+ goto err;
+ }
+ }
+
+ for (;;) {
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc, &lkey, &data, f_next)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ break;
+ }
+ goto err;
+ }
+ }
+
+err: /* Discard the cursor. */
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
return (ret);
}
@@ -505,7 +829,443 @@ __db_sync(dbp, flags)
return (0);
/* Flush any dirty pages from the cache to the backing file. */
- if ((t_ret = memp_fsync(dbp->mpf)) != 0 && ret == 0)
+ if ((t_ret = dbp->mpf->sync(dbp->mpf)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_associate --
+ * Associate another database as a secondary index to this one.
+ *
+ * PUBLIC: int __db_associate __P((DB *, DB_TXN *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associate(dbp, txn, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ DB_TXN *txn;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *pdbc, *sdbc;
+ DBT skey, key, data;
+ int build, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ txn_local = 0;
+ pdbc = NULL;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&skey, 0, sizeof(DBT));
+
+ if ((ret = __db_associatechk(dbp, sdbp, callback, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create a local transaction as necessary, check for consistent
+ * transaction usage, and, if we have no transaction but do have
+ * locking on, acquire a locker id for the handle lock acquisition.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * Check that if an open transaction is in progress, we're in it,
+ * for other common transaction errors, and for concurrent associates.
+ */
+ if ((ret = __db_check_txn(dbp, txn, DB_LOCK_INVALIDID, 0)) != 0)
+ return (ret);
+
+ sdbp->s_callback = callback;
+ sdbp->s_primary = dbp;
+
+ sdbp->stored_get = sdbp->get;
+ sdbp->get = __db_secondary_get;
+
+ sdbp->stored_close = sdbp->close;
+ sdbp->close = __db_secondary_close;
+
+ /*
+ * Secondary cursors may have the primary's lock file ID, so we
+ * need to make sure that no older cursors are lying around
+ * when we make the transition.
+ */
+ if (TAILQ_FIRST(&sdbp->active_queue) != NULL ||
+ TAILQ_FIRST(&sdbp->join_queue) != NULL) {
+ __db_err(dbenv,
+ "Databases may not become secondary indices while cursors are open");
+ ret = EINVAL;
+ goto err;
+ }
+ while ((sdbc = TAILQ_FIRST(&sdbp->free_queue)) != NULL)
+ if ((ret = __db_c_destroy(sdbc)) != 0)
+ goto err;
+
+ F_SET(sdbp, DB_AM_SECONDARY);
+
+ /*
+ * Check to see if the secondary is empty--and thus if we should
+ * build it--before we link it in and risk making it show up in
+ * other threads.
+ */
+ build = 0;
+ if (LF_ISSET(DB_CREATE)) {
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc, 0)) != 0)
+ goto err;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+
+ /*
+ * We don't care about key or data; we're just doing
+ * an existence check.
+ */
+ F_SET(&key, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ F_SET(&data, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = sdbc->c_real_get(sdbc, &key, &data,
+ (STD_LOCKING(sdbc) ? DB_RMW : 0) |
+ DB_FIRST)) == DB_NOTFOUND) {
+ build = 1;
+ ret = 0;
+ }
+
+ /*
+ * Secondary cursors have special refcounting close
+ * methods. Be careful.
+ */
+ if ((t_ret = __db_c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Add the secondary to the list on the primary. Do it here
+ * so that we see any updates that occur while we're walking
+ * the primary.
+ */
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+
+ /* See __db_s_next for an explanation of secondary refcounting. */
+ DB_ASSERT(sdbp->s_refcnt == 0);
+ sdbp->s_refcnt = 1;
+ LIST_INSERT_HEAD(&dbp->s_secondaries, sdbp, s_links);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+
+ if (build) {
+ /*
+ * We loop through the primary, putting each item we
+ * find into the new secondary.
+ *
+ * If we're using CDB, opening these two cursors puts us
+ * in a bit of a locking tangle: CDB locks are done on the
+ * primary, so that we stay deadlock-free, but that means
+ * that updating the secondary while we have a read cursor
+ * open on the primary will self-block. To get around this,
+ * we force the primary cursor to use the same locker ID
+ * as the secondary, so they won't conflict. This should
+ * be harmless even if we're not using CDB.
+ */
+ if ((ret = sdbp->cursor(sdbp, txn, &sdbc,
+ CDB_LOCKING(sdbp->dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ goto err;
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /* Lock out other threads, now that we have a locker ID. */
+ dbp->associate_lid = sdbc->locker;
+
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ while ((ret = pdbc->c_get(pdbc, &key, &data, DB_NEXT)) == 0) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = callback(sdbp, &key, &data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbc->c_put(sdbc,
+ &skey, &key, DB_UPDATE_SECONDARY)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ }
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ if ((ret = sdbc->c_close(sdbc)) != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ dbp->associate_lid = DB_LOCK_INVALIDID;
+
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_pget --
+ * Return a primary key/data pair given a secondary key.
+ *
+ * PUBLIC: int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_pget(dbp, txn, skey, pkey, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->pget");
+
+ if ((ret = __db_pgetchk(dbp, skey, pkey, data, flags)) != 0)
+ return (ret);
+
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ SET_RET_MEM(dbc, dbp);
+
+ /*
+ * The underlying cursor pget will fill in a default DBT for null
+ * pkeys, and use the cursor's returned-key memory internally to
+ * store any intermediate primary keys. However, we've just set
+ * the returned-key memory to the DB handle's key memory, which
+ * is unsafe to use if the DB handle is threaded. If the pkey
+ * argument is NULL, use the DBC-owned returned-key memory
+ * instead; it'll go away when we close the cursor before we
+ * return, but in this case that's just fine, as we're not
+ * returning the primary key.
+ */
+ if (pkey == NULL)
+ dbc->rkey = &dbc->my_rkey;
+
+ DEBUG_LREAD(dbc, txn, "__db_pget", skey, NULL, flags);
+
+ /*
+ * The cursor is just a perfectly ordinary secondary database
+ * cursor. Call its c_pget() method to do the dirty work.
+ */
+ if (flags == 0 || flags == DB_RMW)
+ flags |= DB_SET;
+ ret = dbc->c_pget(dbc, skey, pkey, data, flags);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __db_secondary_get --
+ * This wrapper function for DB->pget() is the DB->get() function
+ * on a database which has been made into a secondary index.
+ */
+static int
+__db_secondary_get(sdbp, txn, skey, data, flags)
+ DB *sdbp;
+ DB_TXN *txn;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(sdbp, DB_AM_SECONDARY));
+ return (sdbp->pget(sdbp, txn, skey, NULL, data, flags));
+}
+
+/*
+ * __db_secondary_close --
+ * Wrapper function for DB->close() which we use on secondaries to
+ * manage refcounting and make sure we don't close them underneath
+ * a primary that is updating.
+ */
+static int
+__db_secondary_close(sdbp, flags)
+ DB *sdbp;
+ u_int32_t flags;
+{
+ DB *primary;
+ int doclose;
+
+ doclose = 0;
+ primary = sdbp->s_primary;
+
+ MUTEX_THREAD_LOCK(primary->dbenv, primary->mutexp);
+ /*
+ * Check the refcount--if it was at 1 when we were called, no
+ * thread is currently updating this secondary through the primary,
+ * so it's safe to close it for real.
+ *
+ * If it's not safe to do the close now, we do nothing; the
+ * database will actually be closed when the refcount is decremented,
+ * which can happen in either __db_s_next or __db_s_done.
+ */
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ /* We don't want to call close while the mutex is held. */
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(primary->dbenv, primary->mutexp);
+
+ /*
+ * sdbp->close is this function; call the real one explicitly if
+ * need be.
+ */
+ return (doclose ? __db_close(sdbp, flags) : 0);
+}
+
+/*
+ * __db_append_primary --
+ * Perform the secondary index updates necessary to put(DB_APPEND)
+ * a record to a primary database.
+ */
+static int
+__db_append_primary(dbc, key, data)
+ DBC *dbc;
+ DBT *key, *data;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc, *pdbc;
+ DBT oldpkey, pkey, pdata, skey;
+ int cmp, ret, t_ret;
+
+ dbp = dbc->dbp;
+ sdbp = NULL;
+ ret = 0;
+
+ /*
+ * Worrying about partial appends seems a little like worrying
+ * about Linear A character encodings. But we support those
+ * too if your application understands them.
+ */
+ pdbc = NULL;
+ if (F_ISSET(data, DB_DBT_PARTIAL) || F_ISSET(key, DB_DBT_PARTIAL)) {
+ /*
+ * The dbc we were passed is all set to pass things
+ * back to the user; we can't safely do a call on it.
+ * Dup the cursor, grab the real data item (we don't
+ * care what the key is--we've been passed it directly),
+ * and use that instead of the data DBT we were passed.
+ *
+ * Note that we can get away with this simple get because
+ * an appended item is by definition new, and the
+ * correctly-constructed full data item from this partial
+ * put is on the page waiting for us.
+ */
+ if ((ret = __db_c_idup(dbc, &pdbc, DB_POSITIONI)) != 0)
+ return (ret);
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&pdata, 0, sizeof(DBT));
+
+ if ((ret = pdbc->c_get(pdbc, &pkey, &pdata, DB_CURRENT)) != 0)
+ goto err;
+
+ key = &pkey;
+ data = &pdata;
+ }
+
+ /*
+ * Loop through the secondary indices, putting a new item in
+ * each that points to the appended item.
+ *
+ * This is much like the loop in "step 3" in __db_c_put, so
+ * I'm not commenting heavily here; it was unclean to excerpt
+ * just that section into a common function, but the basic
+ * overview is the same here.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, key, data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ continue;
+ else
+ goto err;
+ }
+
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0) {
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto err;
+ }
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Since we know we have a new primary key, it can't be a
+ * duplicate duplicate in the secondary. It can be a
+ * duplicate in a secondary that doesn't support duplicates,
+ * however, so we need to be careful to avoid an overwrite
+ * (which would corrupt our index).
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc, &skey, &oldpkey,
+ DB_SET | (STD_LOCKING(dbc) ? DB_RMW : 0));
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, key);
+ /*
+ * XXX
+ * This needs to use the right free function
+ * as soon as this is possible.
+ */
+ __os_ufree(sdbp->dbenv,
+ oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Append results in a non-unique secondary key in",
+ " an index not configured to support duplicates");
+ ret = EINVAL;
+ goto err1;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto err1;
+ }
+
+ ret = sdbc->c_put(sdbc, &skey, key, DB_UPDATE_SECONDARY);
+
+err1: FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (pdbc != NULL && (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
diff --git a/bdb/db/db_cam.c b/bdb/db/db_cam.c
index 708d4cbda4d..4de3467d4aa 100644
--- a/bdb/db/db_cam.c
+++ b/bdb/db/db_cam.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_cam.c,v 11.52 2001/01/18 15:11:16 bostic Exp $";
+static const char revid[] = "$Id: db_cam.c,v 11.114 2002/09/03 15:44:46 krinsky Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,17 +18,18 @@ static const char revid[] = "$Id: db_cam.c,v 11.52 2001/01/18 15:11:16 bostic Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "txn.h"
-#include "db_ext.h"
-
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __db_buildpartial __P((DB *, DBT *, DBT *, DBT *));
static int __db_c_cleanup __P((DBC *, DBC *, int));
-static int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+static int __db_c_del_secondary __P((DBC *));
+static int __db_c_pget_recno __P((DBC *, DBT *, DBT *, u_int32_t));
static int __db_wrlock_err __P((DB_ENV *));
#define CDB_LOCKING_INIT(dbp, dbc) \
@@ -43,9 +44,9 @@ static int __db_wrlock_err __P((DB_ENV *));
return (__db_wrlock_err(dbp->dbenv)); \
\
if (F_ISSET(dbc, DBC_WRITECURSOR) && \
- (ret = lock_get((dbp)->dbenv, (dbc)->locker, \
- DB_LOCK_UPGRADE, &(dbc)->lock_dbt, DB_LOCK_WRITE, \
- &(dbc)->mylock)) != 0) \
+ (ret = (dbp)->dbenv->lock_get((dbp)->dbenv, \
+ (dbc)->locker, DB_LOCK_UPGRADE, &(dbc)->lock_dbt, \
+ DB_LOCK_WRITE, &(dbc)->mylock)) != 0) \
return (ret); \
}
#define CDB_LOCKING_DONE(dbp, dbc) \
@@ -63,9 +64,8 @@ static int __db_wrlock_err __P((DB_ENV *));
F_ISSET((dbc_o), DBC_WRITECURSOR | DBC_WRITEDUP)) { \
memcpy(&(dbc_n)->mylock, &(dbc_o)->mylock, \
sizeof((dbc_o)->mylock)); \
- (dbc_n)->locker = (dbc_o)->locker; \
- /* This lock isn't ours to put--just discard it on close. */ \
- F_SET((dbc_n), DBC_WRITEDUP); \
+ /* This lock isn't ours to put--just discard it on close. */ \
+ F_SET((dbc_n), DBC_WRITEDUP); \
}
/*
@@ -81,12 +81,14 @@ __db_c_close(dbc)
DB *dbp;
DBC *opd;
DBC_INTERNAL *cp;
+ DB_ENV *dbenv;
int ret, t_ret;
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
ret = 0;
- PANIC_CHECK(dbp->dbenv);
+ PANIC_CHECK(dbenv);
/*
* If the cursor is already closed we have a serious problem, and we
@@ -95,7 +97,7 @@ __db_c_close(dbc)
*/
if (!F_ISSET(dbc, DBC_ACTIVE)) {
if (dbp != NULL)
- __db_err(dbp->dbenv, "Closing closed cursor");
+ __db_err(dbenv, "Closing already-closed cursor");
DB_ASSERT(0);
return (EINVAL);
@@ -113,11 +115,9 @@ __db_c_close(dbc)
* !!!
* Cursors must be removed from the active queue before calling the
* access specific cursor close routine, btree depends on having that
- * order of operations. It must also happen before any action that
- * can fail and cause __db_c_close to return an error, or else calls
- * here from __db_close may loop indefinitely.
+ * order of operations.
*/
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
if (opd != NULL) {
F_CLR(opd, DBC_ACTIVE);
@@ -126,7 +126,7 @@ __db_c_close(dbc)
F_CLR(dbc, DBC_ACTIVE);
TAILQ_REMOVE(&dbp->active_queue, dbc, links);
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
/* Call the access specific cursor close routine. */
if ((t_ret =
@@ -137,17 +137,20 @@ __db_c_close(dbc)
* Release the lock after calling the access method specific close
* routine, a Btree cursor may have had pending deletes.
*/
- if (CDB_LOCKING(dbc->dbp->dbenv)) {
+ if (CDB_LOCKING(dbenv)) {
/*
* If DBC_WRITEDUP is set, the cursor is an internally
* duplicated write cursor and the lock isn't ours to put.
+ *
+ * Also, be sure not to free anything if mylock.off is
+ * INVALID; in some cases, such as idup'ed read cursors
+ * and secondary update cursors, a cursor in a CDB
+ * environment may not have a lock at all.
*/
- if (!F_ISSET(dbc, DBC_WRITEDUP) &&
- dbc->mylock.off != LOCK_INVALID) {
- if ((t_ret = lock_put(dbc->dbp->dbenv,
- &dbc->mylock)) != 0 && ret == 0)
+ if (!F_ISSET(dbc, DBC_WRITEDUP) && LOCK_ISSET(dbc->mylock)) {
+ if ((t_ret = dbenv->lock_put(
+ dbenv, &dbc->mylock)) != 0 && ret == 0)
ret = t_ret;
- dbc->mylock.off = LOCK_INVALID;
}
/* For safety's sake, since this is going on the free queue. */
@@ -159,7 +162,7 @@ __db_c_close(dbc)
dbc->txn->cursors--;
/* Move the cursor(s) to the free queue. */
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
if (opd != NULL) {
if (dbc->txn != NULL)
dbc->txn->cursors--;
@@ -167,7 +170,7 @@ __db_c_close(dbc)
opd = NULL;
}
TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
return (ret);
}
@@ -183,27 +186,37 @@ __db_c_destroy(dbc)
DBC *dbc;
{
DB *dbp;
- DBC_INTERNAL *cp;
- int ret;
+ DB_ENV *dbenv;
+ int ret, t_ret;
dbp = dbc->dbp;
- cp = dbc->internal;
+ dbenv = dbp->dbenv;
/* Remove the cursor from the free queue. */
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
TAILQ_REMOVE(&dbp->free_queue, dbc, links);
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
/* Free up allocated memory. */
- if (dbc->rkey.data != NULL)
- __os_free(dbc->rkey.data, dbc->rkey.ulen);
- if (dbc->rdata.data != NULL)
- __os_free(dbc->rdata.data, dbc->rdata.ulen);
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbenv, dbc->my_rdata.data);
/* Call the access specific cursor destroy routine. */
ret = dbc->c_am_destroy == NULL ? 0 : dbc->c_am_destroy(dbc);
- __os_free(dbc, sizeof(*dbc));
+ /*
+ * Release the lock id for this cursor.
+ */
+ if (LOCKING_ON(dbenv) &&
+ F_ISSET(dbc, DBC_OWN_LID) &&
+ (t_ret = dbenv->lock_id_free(dbenv, dbc->lid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ __os_free(dbenv, dbc);
return (ret);
}
@@ -256,7 +269,7 @@ __db_c_count(dbc, recnop, flags)
break;
default:
return (__db_unknown_type(dbp->dbenv,
- "__db_c_count", dbp->type));
+ "__db_c_count", dbp->type));
}
return (0);
}
@@ -286,11 +299,13 @@ __db_c_del(dbc, flags)
dbp = dbc->dbp;
PANIC_CHECK(dbp->dbenv);
- DB_CHECK_TXN(dbp, dbc->txn);
/* Check for invalid flags. */
- if ((ret = __db_cdelchk(dbp, flags,
- F_ISSET(dbp, DB_AM_RDONLY), IS_INITIALIZED(dbc))) != 0)
+ if ((ret = __db_cdelchk(dbp, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
return (ret);
DEBUG_LWRITE(dbc, dbc->txn, "db_c_del", NULL, NULL, flags);
@@ -298,6 +313,27 @@ __db_c_del(dbc, flags)
CDB_LOCKING_INIT(dbp, dbc);
/*
+ * If we're a secondary index, and DB_UPDATE_SECONDARY isn't set
+ * (which it only is if we're being called from a primary update),
+ * then we need to call through to the primary and delete the item.
+ *
+ * Note that this will delete the current item; we don't need to
+ * delete it ourselves as well, so we can just goto done.
+ */
+ if (flags != DB_UPDATE_SECONDARY && F_ISSET(dbp, DB_AM_SECONDARY)) {
+ ret = __db_c_del_secondary(dbc);
+ goto done;
+ }
+
+ /*
+ * If we are a primary and have secondary indices, go through
+ * and delete any secondary keys that point at the current record.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL &&
+ (ret = __db_c_del_primary(dbc)) != 0)
+ goto done;
+
+ /*
* Off-page duplicate trees are locked in the primary tree, that is,
* we acquire a write lock in the primary tree and no locks in the
* off-page dup tree. If the del operation is done in an off-page
@@ -310,7 +346,7 @@ __db_c_del(dbc, flags)
if ((ret = dbc->c_am_writelock(dbc)) == 0)
ret = opd->c_am_del(opd);
- CDB_LOCKING_DONE(dbp, dbc);
+done: CDB_LOCKING_DONE(dbp, dbc);
return (ret);
}
@@ -362,7 +398,7 @@ __db_c_dup(dbc_orig, dbcp, flags)
if (CDB_LOCKING(dbenv) && flags != DB_POSITIONI) {
DB_ASSERT(!F_ISSET(dbc_orig, DBC_WRITER | DBC_WRITECURSOR));
- if ((ret = lock_get(dbenv, dbc_n->locker, 0,
+ if ((ret = dbenv->lock_get(dbenv, dbc_n->locker, 0,
&dbc_n->lock_dbt, DB_LOCK_READ, &dbc_n->mylock)) != 0) {
(void)__db_c_close(dbc_n);
return (ret);
@@ -380,6 +416,8 @@ __db_c_dup(dbc_orig, dbcp, flags)
dbc_n->internal->opd = dbc_nopd;
}
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
return (0);
err: if (dbc_n != NULL)
@@ -393,8 +431,10 @@ err: if (dbc_n != NULL)
/*
* __db_c_idup --
* Internal version of __db_c_dup.
+ *
+ * PUBLIC: int __db_c_idup __P((DBC *, DBC **, u_int32_t));
*/
-static int
+int
__db_c_idup(dbc_orig, dbcp, flags)
DBC *dbc_orig, **dbcp;
u_int32_t flags;
@@ -408,17 +448,16 @@ __db_c_idup(dbc_orig, dbcp, flags)
dbc_n = *dbcp;
if ((ret = __db_icursor(dbp, dbc_orig->txn, dbc_orig->dbtype,
- dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD), &dbc_n)) != 0)
+ dbc_orig->internal->root, F_ISSET(dbc_orig, DBC_OPD),
+ dbc_orig->locker, &dbc_n)) != 0)
return (ret);
- dbc_n->locker = dbc_orig->locker;
-
/* If the user wants the cursor positioned, do it here. */
if (flags == DB_POSITION || flags == DB_POSITIONI) {
int_n = dbc_n->internal;
int_orig = dbc_orig->internal;
- dbc_n->flags = dbc_orig->flags;
+ dbc_n->flags |= dbc_orig->flags & ~DBC_OWN_LID;
int_n->indx = int_orig->indx;
int_n->pgno = int_orig->pgno;
@@ -449,6 +488,9 @@ __db_c_idup(dbc_orig, dbcp, flags)
/* Now take care of duping the CDB information. */
CDB_LOCKING_COPY(dbp, dbc_orig, dbc_n);
+ /* Copy the dirty read flag to the new cursor. */
+ F_SET(dbc_n, F_ISSET(dbc_orig, DBC_DIRTY_READ));
+
*dbcp = dbc_n;
return (0);
@@ -460,12 +502,13 @@ err: (void)dbc_n->c_close(dbc_n);
* __db_c_newopd --
* Create a new off-page duplicate cursor.
*
- * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC **));
+ * PUBLIC: int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **));
*/
int
-__db_c_newopd(dbc_parent, root, dbcp)
+__db_c_newopd(dbc_parent, root, oldopd, dbcp)
DBC *dbc_parent;
db_pgno_t root;
+ DBC *oldopd;
DBC **dbcp;
{
DB *dbp;
@@ -476,14 +519,44 @@ __db_c_newopd(dbc_parent, root, dbcp)
dbp = dbc_parent->dbp;
dbtype = (dbp->dup_compare == NULL) ? DB_RECNO : DB_BTREE;
+ /*
+ * On failure, we want to default to returning the old off-page dup
+ * cursor, if any; our caller can't be left with a dangling pointer
+ * to a freed cursor. On error the only allowable behavior is to
+ * close the cursor (and the old OPD cursor it in turn points to), so
+ * this should be safe.
+ */
+ *dbcp = oldopd;
+
if ((ret = __db_icursor(dbp,
- dbc_parent->txn, dbtype, root, 1, &opd)) != 0)
+ dbc_parent->txn, dbtype, root, 1, dbc_parent->locker, &opd)) != 0)
return (ret);
+ /* !!!
+ * If the parent is a DBC_WRITER, this won't copy anything. That's
+ * not actually a problem--we only need lock information in an
+ * off-page dup cursor in order to upgrade at cursor close time
+ * if we've done a delete, but WRITERs don't need to upgrade.
+ */
CDB_LOCKING_COPY(dbp, dbc_parent, opd);
*dbcp = opd;
+ /*
+ * Check to see if we already have an off-page dup cursor that we've
+ * passed in. If we do, close it. It'd be nice to use it again
+ * if it's a cursor belonging to the right tree, but if we're doing
+ * a cursor-relative operation this might not be safe, so for now
+ * we'll take the easy way out and always close and reopen.
+ *
+ * Note that under no circumstances do we want to close the old
+ * cursor without returning a valid new one; we don't want to
+ * leave the main cursor in our caller with a non-NULL pointer
+ * to a freed off-page dup cursor.
+ */
+ if (oldopd != NULL && (ret = oldopd->c_close(oldopd)) != 0)
+ return (ret);
+
return (0);
}
@@ -502,8 +575,9 @@ __db_c_get(dbc_arg, key, data, flags)
DB *dbp;
DBC *dbc, *dbc_n, *opd;
DBC_INTERNAL *cp, *cp_n;
+ DB_MPOOLFILE *mpf;
db_pgno_t pgno;
- u_int32_t tmp_flags, tmp_rmw;
+ u_int32_t multi, tmp_dirty, tmp_flags, tmp_rmw;
u_int8_t type;
int ret, t_ret;
@@ -517,6 +591,7 @@ __db_c_get(dbc_arg, key, data, flags)
* functions.
*/
dbp = dbc_arg->dbp;
+ mpf = dbp->mpf;
dbc_n = NULL;
opd = NULL;
@@ -531,6 +606,12 @@ __db_c_get(dbc_arg, key, data, flags)
tmp_rmw = LF_ISSET(DB_RMW);
LF_CLR(DB_RMW);
+ tmp_dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_DIRTY_READ);
+
+ multi = LF_ISSET(DB_MULTIPLE|DB_MULTIPLE_KEY);
+ LF_CLR(DB_MULTIPLE|DB_MULTIPLE_KEY);
+
DEBUG_LREAD(dbc_arg, dbc_arg->txn, "db_c_get",
flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
@@ -538,8 +619,18 @@ __db_c_get(dbc_arg, key, data, flags)
* Return a cursor's record number. It has nothing to do with the
* cursor get code except that it was put into the interface.
*/
- if (flags == DB_GET_RECNO)
- return (__bam_c_rget(dbc_arg, data, flags | tmp_rmw));
+ if (flags == DB_GET_RECNO) {
+ if (tmp_rmw)
+ F_SET(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+ ret = __bam_c_rget(dbc_arg, data);
+ if (tmp_rmw)
+ F_CLR(dbc_arg, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ return (ret);
+ }
if (flags == DB_CONSUME || flags == DB_CONSUME_WAIT)
CDB_LOCKING_INIT(dbp, dbc_arg);
@@ -564,8 +655,8 @@ __db_c_get(dbc_arg, key, data, flags)
if ((ret = __db_c_idup(cp->opd, &opd, DB_POSITIONI)) != 0)
return (ret);
- switch (ret = opd->c_am_get(
- opd, key, data, flags, NULL)) {
+ switch (ret =
+ opd->c_am_get(opd, key, data, flags, NULL)) {
case 0:
goto done;
case DB_NOTFOUND:
@@ -605,21 +696,49 @@ __db_c_get(dbc_arg, key, data, flags)
break;
}
+ if (tmp_dirty)
+ F_SET(dbc_arg, DBC_DIRTY_READ);
+
/*
* If this cursor is going to be closed immediately, we don't
* need to take precautions to clean it up on error.
*/
if (F_ISSET(dbc_arg, DBC_TRANSIENT))
dbc_n = dbc_arg;
- else if ((ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags)) != 0)
- goto err;
+ else {
+ ret = __db_c_idup(dbc_arg, &dbc_n, tmp_flags);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+
+ if (ret != 0)
+ goto err;
+ COPY_RET_MEM(dbc_arg, dbc_n);
+ }
if (tmp_rmw)
F_SET(dbc_n, DBC_RMW);
+
+ switch (multi) {
+ case DB_MULTIPLE:
+ F_SET(dbc_n, DBC_MULTIPLE);
+ break;
+ case DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE_KEY);
+ break;
+ case DB_MULTIPLE | DB_MULTIPLE_KEY:
+ F_SET(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
+ break;
+ case 0:
+ break;
+ }
+
pgno = PGNO_INVALID;
ret = dbc_n->c_am_get(dbc_n, key, data, flags, &pgno);
if (tmp_rmw)
F_CLR(dbc_n, DBC_RMW);
+ if (tmp_dirty)
+ F_CLR(dbc_arg, DBC_DIRTY_READ);
+ F_CLR(dbc_n, DBC_MULTIPLE|DBC_MULTIPLE_KEY);
if (ret != 0)
goto err;
@@ -630,7 +749,8 @@ __db_c_get(dbc_arg, key, data, flags)
* a new cursor and call the underlying function.
*/
if (pgno != PGNO_INVALID) {
- if ((ret = __db_c_newopd(dbc_arg, pgno, &cp_n->opd)) != 0)
+ if ((ret = __db_c_newopd(dbc_arg,
+ pgno, cp_n->opd, &cp_n->opd)) != 0)
goto err;
switch (flags) {
@@ -648,10 +768,9 @@ __db_c_get(dbc_arg, key, data, flags)
tmp_flags = DB_LAST;
break;
case DB_GET_BOTH:
- tmp_flags = DB_GET_BOTH;
- break;
case DB_GET_BOTHC:
- tmp_flags = DB_GET_BOTHC;
+ case DB_GET_BOTH_RANGE:
+ tmp_flags = flags;
break;
default:
ret =
@@ -680,19 +799,66 @@ done: /*
cp_n = dbc_n == NULL ? dbc_arg->internal : dbc_n->internal;
if (!F_ISSET(key, DB_DBT_ISSET)) {
if (cp_n->page == NULL && (ret =
- memp_fget(dbp->mpf, &cp_n->pgno, 0, &cp_n->page)) != 0)
+ mpf->get(mpf, &cp_n->pgno, 0, &cp_n->page)) != 0)
goto err;
if ((ret = __db_ret(dbp, cp_n->page, cp_n->indx,
- key, &dbc_arg->rkey.data, &dbc_arg->rkey.ulen)) != 0)
+ key, &dbc_arg->rkey->data, &dbc_arg->rkey->ulen)) != 0)
goto err;
}
- dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n;
- if (!F_ISSET(data, DB_DBT_ISSET)) {
+ if (multi != 0) {
+ /*
+ * Even if fetching from the OPD cursor we need a duplicate
+ * primary cursor if we are going after multiple keys.
+ */
+ if (dbc_n == NULL) {
+ /*
+ * Non-"_KEY" DB_MULTIPLE doesn't move the main cursor,
+ * so it's safe to just use dbc_arg, unless dbc_arg
+ * has an open OPD cursor whose state might need to
+ * be preserved.
+ */
+ if ((!(multi & DB_MULTIPLE_KEY) &&
+ dbc_arg->internal->opd == NULL) ||
+ F_ISSET(dbc_arg, DBC_TRANSIENT))
+ dbc_n = dbc_arg;
+ else {
+ if ((ret = __db_c_idup(dbc_arg,
+ &dbc_n, DB_POSITIONI)) != 0)
+ goto err;
+ if ((ret = dbc_n->c_am_get(dbc_n,
+ key, data, DB_CURRENT, &pgno)) != 0)
+ goto err;
+ }
+ cp_n = dbc_n->internal;
+ }
+
+ /*
+ * If opd is set then we dupped the opd that we came in with.
+ * When we return we may have a new opd if we went to another
+ * key.
+ */
+ if (opd != NULL) {
+ DB_ASSERT(cp_n->opd == NULL);
+ cp_n->opd = opd;
+ opd = NULL;
+ }
+
+ /*
+ * Bulk get doesn't use __db_retcopy, so data.size won't
+ * get set up unless there is an error. Assume success
+ * here. This is the only call to c_am_bulk, and it avoids
+ * setting it exactly the same everywhere. If we have an
+ * ENOMEM error, it'll get overwritten with the needed value.
+ */
+ data->size = data->ulen;
+ ret = dbc_n->c_am_bulk(dbc_n, data, flags | multi);
+ } else if (!F_ISSET(data, DB_DBT_ISSET)) {
+ dbc = opd != NULL ? opd : cp_n->opd != NULL ? cp_n->opd : dbc_n;
type = TYPE(dbc->internal->page);
ret = __db_ret(dbp, dbc->internal->page, dbc->internal->indx +
(type == P_LBTREE || type == P_HASH ? O_INDX : 0),
- data, &dbc_arg->rdata.data, &dbc_arg->rdata.ulen);
+ data, &dbc_arg->rdata->data, &dbc_arg->rdata->ulen);
}
err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */
@@ -701,9 +867,8 @@ err: /* Don't pass DB_DBT_ISSET back to application level, error or no. */
/* Cleanup and cursor resolution. */
if (opd != NULL) {
- if ((t_ret =
- __db_c_cleanup(dbc_arg->internal->opd,
- opd, ret)) != 0 && ret == 0)
+ if ((t_ret = __db_c_cleanup(
+ dbc_arg->internal->opd, opd, ret)) != 0 && ret == 0)
ret = t_ret;
}
@@ -728,11 +893,12 @@ __db_c_put(dbc_arg, key, data, flags)
DBT *key, *data;
u_int32_t flags;
{
- DB *dbp;
- DBC *dbc_n, *opd;
+ DB *dbp, *sdbp;
+ DBC *dbc_n, *oldopd, *opd, *sdbc, *pdbc;
+ DBT olddata, oldpkey, oldskey, newdata, pkey, save_skey, skey, temp;
db_pgno_t pgno;
- u_int32_t tmp_flags;
- int ret, t_ret;
+ int cmp, have_oldrec, ispartial, nodel, re_pad, ret, rmw, t_ret;
+ u_int32_t re_len, size, tmp_flags;
/*
* Cursor Cleanup Note:
@@ -744,16 +910,30 @@ __db_c_put(dbc_arg, key, data, flags)
* functions.
*/
dbp = dbc_arg->dbp;
- dbc_n = NULL;
+ sdbp = NULL;
+ pdbc = dbc_n = NULL;
+ memset(&newdata, 0, sizeof(DBT));
PANIC_CHECK(dbp->dbenv);
- DB_CHECK_TXN(dbp, dbc_arg->txn);
/* Check for invalid flags. */
- if ((ret = __db_cputchk(dbp, key, data, flags,
- F_ISSET(dbp, DB_AM_RDONLY), IS_INITIALIZED(dbc_arg))) != 0)
+ if ((ret = __db_cputchk(dbp,
+ key, data, flags, IS_INITIALIZED(dbc_arg))) != 0)
+ return (ret);
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, dbc_arg->txn, dbc_arg->locker, 0)) != 0)
return (ret);
+ /*
+ * Putting to secondary indices is forbidden; when we need
+ * to internally update one, we'll call this with a private
+ * synonym for DB_KEYLAST, DB_UPDATE_SECONDARY, which does
+ * the right thing but won't return an error from cputchk().
+ */
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+
DEBUG_LWRITE(dbc_arg, dbc_arg->txn, "db_c_put",
flags == DB_KEYFIRST || flags == DB_KEYLAST ||
flags == DB_NODUPDATA ? key : NULL, data, flags);
@@ -761,6 +941,439 @@ __db_c_put(dbc_arg, key, data, flags)
CDB_LOCKING_INIT(dbp, dbc_arg);
/*
+ * Check to see if we are a primary and have secondary indices.
+ * If we are not, we save ourselves a good bit of trouble and
+ * just skip to the "normal" put.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) == NULL)
+ goto skip_s_update;
+
+ /*
+ * We have at least one secondary which we may need to update.
+ *
+ * There is a rather vile locking issue here. Secondary gets
+ * will always involve acquiring a read lock in the secondary,
+ * then acquiring a read lock in the primary. Ideally, we
+ * would likewise perform puts by updating all the secondaries
+ * first, then doing the actual put in the primary, to avoid
+ * deadlock (since having multiple threads doing secondary
+ * gets and puts simultaneously is probably a common case).
+ *
+ * However, if this put is a put-overwrite--and we have no way to
+ * tell in advance whether it will be--we may need to delete
+ * an outdated secondary key. In order to find that old
+ * secondary key, we need to get the record we're overwriting,
+ * before we overwrite it.
+ *
+ * (XXX: It would be nice to avoid this extra get, and have the
+ * underlying put routines somehow pass us the old record
+ * since they need to traverse the tree anyway. I'm saving
+ * this optimization for later, as it's a lot of work, and it
+ * would be hard to fit into this locking paradigm anyway.)
+ *
+ * The simple thing to do would be to go get the old record before
+ * we do anything else. Unfortunately, though, doing so would
+ * violate our "secondary, then primary" lock acquisition
+ * ordering--even in the common case where no old primary record
+ * exists, we'll still acquire and keep a lock on the page where
+ * we're about to do the primary insert.
+ *
+ * To get around this, we do the following gyrations, which
+ * hopefully solve this problem in the common case:
+ *
+ * 1) If this is a c_put(DB_CURRENT), go ahead and get the
+ * old record. We already hold the lock on this page in
+ * the primary, so no harm done, and we'll need the primary
+ * key (which we weren't passed in this case) to do any
+ * secondary puts anyway.
+ *
+ * 2) If we're doing a partial put, we need to perform the
+ * get on the primary key right away, since we don't have
+ * the whole datum that the secondary key is based on.
+ * We may also need to pad out the record if the primary
+ * has a fixed record length.
+ *
+ * 3) Loop through the secondary indices, putting into each a
+ * new secondary key that corresponds to the new record.
+ *
+ * 4) If we haven't done so in (1) or (2), get the old primary
+ * key/data pair. If one does not exist--the common case--we're
+ * done with secondary indices, and can go straight on to the
+ * primary put.
+ *
+ * 5) If we do have an old primary key/data pair, however, we need
+ * to loop through all the secondaries a second time and delete
+ * the old secondary in each.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&olddata, 0, sizeof(DBT));
+ have_oldrec = nodel = 0;
+
+ /*
+ * Primary indices can't have duplicates, so only DB_CURRENT,
+ * DB_KEYFIRST, and DB_KEYLAST make any sense. Other flags
+ * should have been caught by the checking routine, but
+ * add a sprinkling of paranoia.
+ */
+ DB_ASSERT(flags == DB_CURRENT ||
+ flags == DB_KEYFIRST || flags == DB_KEYLAST);
+
+ /*
+ * We'll want to use DB_RMW in a few places, but it's only legal
+ * when locking is on.
+ */
+ rmw = STD_LOCKING(dbc_arg) ? DB_RMW : 0;
+
+ if (flags == DB_CURRENT) { /* Step 1. */
+ /*
+ * This is safe to do on the cursor we already have;
+ * error or no, it won't move.
+ *
+ * We use DB_RMW for all of these gets because we'll be
+ * writing soon enough in the "normal" put code. In
+ * transactional databases we'll hold those write locks
+ * even if we close the cursor we're reading with.
+ */
+ ret = dbc_arg->c_get(dbc_arg,
+ &pkey, &olddata, rmw | DB_CURRENT);
+ if (ret == DB_KEYEMPTY) {
+ nodel = 1; /*
+ * We know we don't need a delete
+ * in the secondary.
+ */
+ have_oldrec = 1; /* We've looked for the old record. */
+ } else if (ret != 0)
+ goto err;
+ else
+ have_oldrec = 1;
+
+ } else {
+ /* So we can just use &pkey everywhere instead of key. */
+ pkey.data = key->data;
+ pkey.size = key->size;
+ }
+
+ /*
+ * Check for partial puts (step 2).
+ */
+ if (F_ISSET(data, DB_DBT_PARTIAL)) {
+ if (!have_oldrec && !nodel) {
+ /*
+ * We're going to have to search the tree for the
+ * specified key. Dup a cursor (so we have the same
+ * locking info) and do a c_get.
+ */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+
+ /* We should have gotten DB_CURRENT in step 1. */
+ DB_ASSERT(flags != DB_CURRENT);
+
+ ret = pdbc->c_get(pdbc,
+ &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+
+ have_oldrec = 1;
+ }
+
+ /*
+ * Now build the new datum from olddata and the partial
+ * data we were given.
+ */
+ if ((ret =
+ __db_buildpartial(dbp, &olddata, data, &newdata)) != 0)
+ goto err;
+ ispartial = 1;
+ } else
+ ispartial = 0;
+
+ /*
+ * Handle fixed-length records. If the primary database has
+ * fixed-length records, we need to pad out the datum before
+ * we pass it into the callback function; we always index the
+ * "real" record.
+ */
+ if ((dbp->type == DB_RECNO && F_ISSET(dbp, DB_AM_FIXEDLEN)) ||
+ (dbp->type == DB_QUEUE)) {
+ if (dbp->type == DB_QUEUE) {
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ re_pad = ((QUEUE *)dbp->q_internal)->re_pad;
+ } else {
+ re_len = ((BTREE *)dbp->bt_internal)->re_len;
+ re_pad = ((BTREE *)dbp->bt_internal)->re_pad;
+ }
+
+ size = ispartial ? newdata.size : data->size;
+ if (size > re_len) {
+ __db_err(dbp->dbenv,
+ "Length improper for fixed length record %lu",
+ (u_long)size);
+ ret = EINVAL;
+ goto err;
+ } else if (size < re_len) {
+ /*
+ * If we're not doing a partial put, copy
+ * data->data into newdata.data, then pad out
+ * newdata.data.
+ *
+ * If we're doing a partial put, the data
+ * we want are already in newdata.data; we
+ * just need to pad.
+ *
+ * Either way, realloc is safe.
+ */
+ if ((ret = __os_realloc(dbp->dbenv, re_len,
+ &newdata.data)) != 0)
+ goto err;
+ if (!ispartial)
+ memcpy(newdata.data, data->data, size);
+ memset((u_int8_t *)newdata.data + size, re_pad,
+ re_len - size);
+ newdata.size = re_len;
+ ispartial = 1;
+ }
+ }
+
+ /*
+ * Loop through the secondaries. (Step 3.)
+ *
+ * Note that __db_s_first and __db_s_next will take care of
+ * thread-locking and refcounting issues.
+ */
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary, to get the
+ * appropriate secondary key.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--don't
+ * put this key in the secondary. Just
+ * move on to the next one--we'll handle
+ * any necessary deletes in step 5.
+ */
+ continue;
+ else
+ goto err;
+ }
+
+ /*
+ * Save the DBT we just got back from the callback function
+ * off; we want to pass its value into c_get functions
+ * that may stomp on a buffer the callback function
+ * allocated.
+ */
+ memset(&save_skey, 0, sizeof(DBT)); /* Paranoia. */
+ save_skey = skey;
+
+ /*
+ * Open a cursor in this secondary.
+ *
+ * Use the same locker ID as our primary cursor, so that
+ * we're guaranteed that the locks don't conflict (e.g. in CDB
+ * or if we're subdatabases that share and want to lock a
+ * metadata page).
+ */
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+
+ /*
+ * If we're in CDB, updates will fail since the new cursor
+ * isn't a writer. However, we hold the WRITE lock in the
+ * primary and will for as long as our new cursor lasts,
+ * and the primary and secondary share a lock file ID,
+ * so it's safe to consider this a WRITER. The close
+ * routine won't try to put anything because we don't
+ * really have a lock.
+ */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * There are three cases here--
+ * 1) The secondary supports sorted duplicates.
+ * If we attempt to put a secondary/primary pair
+ * that already exists, that's a duplicate duplicate,
+ * and c_put will return DB_KEYEXIST (see __db_duperr).
+ * This will leave us with exactly one copy of the
+ * secondary/primary pair, and this is just right--we'll
+ * avoid deleting it later, as the old and new secondaries
+ * will match (since the old secondary is the dup dup
+ * that's already there).
+ * 2) The secondary supports duplicates, but they're not
+ * sorted. We need to avoid putting a duplicate
+ * duplicate, because the matching old and new secondaries
+ * will prevent us from deleting anything and we'll
+ * wind up with two secondary records that point to the
+ * same primary key. Do a c_get(DB_GET_BOTH); if
+ * that returns 0, skip the put.
+ * 3) The secondary doesn't support duplicates at all.
+ * In this case, secondary keys must be unique; if
+ * another primary key already exists for this
+ * secondary key, we have to either overwrite it or
+ * not put this one, and in either case we've
+ * corrupted the secondary index. Do a c_get(DB_SET).
+ * If the secondary/primary pair already exists, do
+ * nothing; if the secondary exists with a different
+ * primary, return an error; and if the secondary
+ * does not exist, put it.
+ */
+ if (!F_ISSET(sdbp, DB_AM_DUP)) {
+ /* Case 3. */
+ memset(&oldpkey, 0, sizeof(DBT));
+ F_SET(&oldpkey, DB_DBT_MALLOC);
+ ret = sdbc->c_real_get(sdbc,
+ &skey, &oldpkey, rmw | DB_SET);
+ if (ret == 0) {
+ cmp = __bam_defcmp(sdbp, &oldpkey, &pkey);
+ __os_ufree(sdbp->dbenv, oldpkey.data);
+ if (cmp != 0) {
+ __db_err(sdbp->dbenv, "%s%s",
+ "Put results in a non-unique secondary key in an ",
+ "index not configured to support duplicates");
+ ret = EINVAL;
+ goto skipput;
+ }
+ } else if (ret != DB_NOTFOUND && ret != DB_KEYEMPTY)
+ goto skipput;
+ } else if (!F_ISSET(sdbp, DB_AM_DUPSORT))
+ /* Case 2. */
+ if ((ret = sdbc->c_real_get(sdbc,
+ &skey, &pkey, rmw | DB_GET_BOTH)) == 0)
+ goto skipput;
+
+ ret = sdbc->c_put(sdbc, &skey, &pkey, DB_UPDATE_SECONDARY);
+
+ /*
+ * We don't know yet whether this was a put-overwrite that
+ * in fact changed nothing. If it was, we may get DB_KEYEXIST.
+ * This is not an error.
+ */
+ if (ret == DB_KEYEXIST)
+ ret = 0;
+
+skipput: FREE_IF_NEEDED(sdbp, &save_skey)
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ goto err;
+ }
+ if (ret != 0)
+ goto err;
+
+ /* If still necessary, go get the old primary key/data. (Step 4.) */
+ if (!have_oldrec) {
+ /* See the comments in step 2. This is real familiar. */
+ if ((ret = __db_c_idup(dbc_arg, &pdbc, 0)) != 0)
+ goto err;
+ DB_ASSERT(flags != DB_CURRENT);
+ pkey.data = key->data;
+ pkey.size = key->size;
+ ret = pdbc->c_get(pdbc, &pkey, &olddata, rmw | DB_SET);
+ if (ret == DB_KEYEMPTY || ret == DB_NOTFOUND) {
+ nodel = 1;
+ ret = 0;
+ }
+ if ((t_ret = pdbc->c_close(pdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ have_oldrec = 1;
+ }
+
+ /*
+ * If we don't follow this goto, we do in fact have an old record
+ * we may need to go delete. (Step 5).
+ */
+ if (nodel)
+ goto skip_s_update;
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Call the callback for this secondary to get the
+ * old secondary key.
+ */
+ memset(&oldskey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, &olddata, &oldskey)) != 0) {
+ if (ret == DB_DONOTINDEX)
+ /*
+ * The callback returned a null value--there's
+ * nothing to delete. Go on to the next
+ * secondary.
+ */
+ continue;
+ else
+ goto err;
+ }
+ if ((ret = sdbp->s_callback(sdbp,
+ &pkey, ispartial ? &newdata : data, &skey)) != 0 &&
+ ret != DB_DONOTINDEX)
+ goto err;
+
+ /*
+ * If there is no new secondary key, or if the old secondary
+ * key is different from the new secondary key, then
+ * we need to delete the old one.
+ *
+ * Note that bt_compare is (and must be) set no matter
+ * what access method we're in.
+ */
+ sdbc = NULL;
+ if (ret == DB_DONOTINDEX ||
+ ((BTREE *)sdbp->bt_internal)->bt_compare(sdbp,
+ &oldskey, &skey) != 0) {
+ if ((ret = __db_icursor(sdbp, dbc_arg->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc_arg->locker, &sdbc)) != 0)
+ goto err;
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Don't let c_get(DB_GET_BOTH) stomp on
+ * any secondary key value that the callback
+ * function may have allocated. Use a temp
+ * DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = oldskey.data;
+ temp.size = oldskey.size;
+ if ((ret = sdbc->c_real_get(sdbc,
+ &temp, &pkey, rmw | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+ }
+
+ FREE_IF_NEEDED(sdbp, &skey);
+ FREE_IF_NEEDED(sdbp, &oldskey);
+ if (sdbc != NULL && (t_ret = sdbc->c_close(sdbc)) != 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Secondary index updates are now done. On to the "real" stuff. */
+
+skip_s_update:
+ /*
* If we have an off-page duplicates cursor, and the operation applies
* to it, perform the operation. Duplicate the cursor and call the
* underlying function.
@@ -826,8 +1439,12 @@ __db_c_put(dbc_arg, key, data, flags)
* a new cursor and call the underlying function.
*/
if (pgno != PGNO_INVALID) {
- if ((ret = __db_c_newopd(dbc_arg, pgno, &opd)) != 0)
+ oldopd = dbc_n->internal->opd;
+ if ((ret = __db_c_newopd(dbc_arg, pgno, oldopd, &opd)) != 0) {
+ dbc_n->internal->opd = opd;
goto err;
+ }
+
dbc_n->internal->opd = opd;
if ((ret = opd->c_am_put(
@@ -840,8 +1457,15 @@ err: /* Cleanup and cursor resolution. */
if ((t_ret = __db_c_cleanup(dbc_arg, dbc_n, ret)) != 0 && ret == 0)
ret = t_ret;
+ /* If newdata was used, free its buffer. */
+ if (newdata.data != NULL)
+ __os_free(dbp->dbenv, newdata.data);
+
CDB_LOCKING_DONE(dbp, dbc_arg);
+ if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0)
+ return (t_ret);
+
return (ret);
}
@@ -855,7 +1479,20 @@ __db_duperr(dbp, flags)
DB *dbp;
u_int32_t flags;
{
- if (flags != DB_NODUPDATA)
+
+ /*
+ * If we run into this error while updating a secondary index,
+ * don't yell--there's no clean way to pass DB_NODUPDATA in along
+ * with DB_UPDATE_SECONDARY, but we may run into this problem
+ * in a normal, non-error course of events.
+ *
+ * !!!
+ * If and when we ever permit duplicate duplicates in sorted-dup
+ * databases, we need to either change the secondary index code
+ * to check for dup dups, or we need to maintain the implicit
+ * "DB_NODUPDATA" behavior for databases with DB_AM_SECONDARY set.
+ */
+ if (flags != DB_NODUPDATA && !F_ISSET(dbp, DB_AM_SECONDARY))
__db_err(dbp->dbenv,
"Duplicate data items are not supported with sorted data");
return (DB_KEYEXIST);
@@ -873,60 +1510,55 @@ __db_c_cleanup(dbc, dbc_n, failed)
DB *dbp;
DBC *opd;
DBC_INTERNAL *internal;
+ DB_MPOOLFILE *mpf;
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
internal = dbc->internal;
ret = 0;
/* Discard any pages we're holding. */
if (internal->page != NULL) {
- if ((t_ret =
- memp_fput(dbp->mpf, internal->page, 0)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, internal->page, 0)) != 0 && ret == 0)
ret = t_ret;
internal->page = NULL;
}
opd = internal->opd;
if (opd != NULL && opd->internal->page != NULL) {
- if ((t_ret = memp_fput(dbp->mpf,
- opd->internal->page, 0)) != 0 && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
ret = t_ret;
opd->internal->page = NULL;
}
/*
- * If dbc_n is NULL, there's no internal cursor swapping to be
- * done and no dbc_n to close--we probably did the entire
- * operation on an offpage duplicate cursor. Just return.
- */
- if (dbc_n == NULL)
- return (ret);
-
- /*
- * If dbc is marked DBC_TRANSIENT, we're inside a DB->{put/get}
+ * If dbc_n is NULL, there's no internal cursor swapping to be done
+ * and no dbc_n to close--we probably did the entire operation on an
+ * offpage duplicate cursor. Just return.
+ *
+ * If dbc and dbc_n are the same, we're either inside a DB->{put/get}
* operation, and as an optimization we performed the operation on
- * the main cursor rather than on a duplicated one. Assert
- * that dbc_n == dbc (i.e., that we really did skip the
- * duplication). Then just do nothing--even if there was
- * an error, we're about to close the cursor, and the fact that we
- * moved it isn't a user-visible violation of our "cursor
- * stays put on error" rule.
- */
- if (F_ISSET(dbc, DBC_TRANSIENT)) {
- DB_ASSERT(dbc == dbc_n);
+ * the main cursor rather than on a duplicated one, or we're in a
+ * bulk get that can't have moved the cursor (DB_MULTIPLE with the
+ * initial c_get operation on an off-page dup cursor). Just
+ * return--either we know we didn't move the cursor, or we're going
+ * to close it before we return to application code, so we're sure
+ * not to visibly violate the "cursor stays put on error" rule.
+ */
+ if (dbc_n == NULL || dbc == dbc_n)
return (ret);
- }
if (dbc_n->internal->page != NULL) {
- if ((t_ret = memp_fput(dbp->mpf,
- dbc_n->internal->page, 0)) != 0 && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, dbc_n->internal->page, 0)) != 0 && ret == 0)
ret = t_ret;
dbc_n->internal->page = NULL;
}
opd = dbc_n->internal->opd;
if (opd != NULL && opd->internal->page != NULL) {
- if ((t_ret = memp_fput(dbp->mpf,
- opd->internal->page, 0)) != 0 && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, opd->internal->page, 0)) != 0 && ret == 0)
ret = t_ret;
opd->internal->page = NULL;
}
@@ -963,6 +1595,316 @@ __db_c_cleanup(dbc, dbc_n, failed)
}
/*
+ * __db_c_secondary_get --
+ * This wrapper function for DBC->c_pget() is the DBC->c_get() function
+ * for a secondary index cursor.
+ *
+ * PUBLIC: int __db_c_secondary_get __P((DBC *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_secondary_get(dbc, skey, data, flags)
+ DBC *dbc;
+ DBT *skey, *data;
+ u_int32_t flags;
+{
+
+ DB_ASSERT(F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ return (dbc->c_pget(dbc, skey, NULL, data, flags));
+}
+
+/*
+ * __db_c_pget --
+ * Get a primary key/data pair through a secondary index.
+ *
+ * PUBLIC: int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ */
+int
+__db_c_pget(dbc, skey, pkey, data, flags)
+ DBC *dbc;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DBC *pdbc;
+ DBT *save_rdata, nullpkey;
+ int pkeymalloc, ret, save_pkey_flags, t_ret;
+
+ sdbp = dbc->dbp;
+ pdbp = sdbp->s_primary;
+ pkeymalloc = t_ret = 0;
+
+ PANIC_CHECK(sdbp->dbenv);
+ if ((ret = __db_cpgetchk(sdbp,
+ skey, pkey, data, flags, IS_INITIALIZED(dbc))) != 0)
+ return (ret);
+
+ /*
+ * The challenging part of this function is getting the behavior
+ * right for all the various permutations of DBT flags. The
+ * next several blocks handle the various cases we need to
+ * deal with specially.
+ */
+
+ /*
+ * We may be called with a NULL pkey argument, if we've been
+ * wrapped by a 2-DBT get call. If so, we need to use our
+ * own DBT.
+ */
+ if (pkey == NULL) {
+ memset(&nullpkey, 0, sizeof(DBT));
+ pkey = &nullpkey;
+ }
+
+ /*
+ * DB_GET_RECNO is a special case, because we're interested not in
+ * the primary key/data pair, but rather in the primary's record
+ * number.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_GET_RECNO)
+ return (__db_c_pget_recno(dbc, pkey, data, flags));
+
+ /*
+ * If the DBTs we've been passed don't have any of the
+ * user-specified memory management flags set, we want to make sure
+ * we return values using the DBTs dbc->rskey, dbc->rkey, and
+ * dbc->rdata, respectively.
+ *
+ * There are two tricky aspects to this: first, we need to pass
+ * skey and pkey *in* to the initial c_get on the secondary key,
+ * since either or both may be looked at by it (depending on the
+ * get flag). Second, we must not use a normal DB->get call
+ * on the secondary, even though that's what we want to accomplish,
+ * because the DB handle may be free-threaded. Instead,
+ * we open a cursor, then take steps to ensure that we actually use
+ * the rkey/rdata from the *secondary* cursor.
+ *
+ * We accomplish all this by passing in the DBTs we started out
+ * with to the c_get, but having swapped the contents of rskey and
+ * rkey, respectively, into rkey and rdata; __db_ret will treat
+ * them like the normal key/data pair in a c_get call, and will
+ * realloc them as need be (this is "step 1"). Then, for "step 2",
+ * we swap back rskey/rkey/rdata to normal, and do a get on the primary
+ * with the secondary dbc appointed as the owner of the returned-data
+ * memory.
+ *
+ * Note that in step 2, we copy the flags field in case we need to
+ * pass down a DB_DBT_PARTIAL or other flag that is compatible with
+ * letting DB do the memory management.
+ */
+ /* Step 1. */
+ save_rdata = dbc->rdata;
+ dbc->rdata = dbc->rkey;
+ dbc->rkey = dbc->rskey;
+
+ /*
+ * It is correct, though slightly sick, to attempt a partial get
+ * of a primary key. However, if we do so here, we'll never find the
+ * primary record; clear the DB_DBT_PARTIAL field of pkey just
+ * for the duration of the next call.
+ */
+ save_pkey_flags = pkey->flags;
+ F_CLR(pkey, DB_DBT_PARTIAL);
+
+ /*
+ * Now we can go ahead with the meat of this call. First, get the
+ * primary key from the secondary index. (What exactly we get depends
+ * on the flags, but the underlying cursor get will take care of the
+ * dirty work.)
+ */
+ if ((ret = dbc->c_real_get(dbc, skey, pkey, flags)) != 0) {
+ /* Restore rskey/rkey/rdata and return. */
+ pkey->flags = save_pkey_flags;
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+ goto err;
+ }
+
+ /* Restore pkey's flags in case we stomped the PARTIAL flag. */
+ pkey->flags = save_pkey_flags;
+
+ /*
+ * Restore the cursor's rskey, rkey, and rdata DBTs. If DB
+ * is handling the memory management, we now have newly
+ * reallocated buffers and ulens in rkey and rdata which we want
+ * to put in rskey and rkey. save_rdata contains the old value
+ * of dbc->rdata.
+ */
+ dbc->rskey = dbc->rkey;
+ dbc->rkey = dbc->rdata;
+ dbc->rdata = save_rdata;
+
+ /*
+ * Now we're ready for "step 2". If either or both of pkey and
+ * data do not have memory management flags set--that is, if DB is
+ * managing their memory--we need to swap around the rkey/rdata
+ * structures so that we don't wind up trying to use memory managed
+ * by the primary database cursor, which we'll close before we return.
+ *
+ * !!!
+ * If you're carefully following the bouncing ball, you'll note
+ * that in the DB-managed case, the buffer hanging off of pkey is
+ * the same as dbc->rkey->data. This is just fine; we may well
+ * realloc and stomp on it when we return, if we're going a
+ * DB_GET_BOTH and need to return a different partial or key
+ * (depending on the comparison function), but this is safe.
+ *
+ * !!!
+ * We need to use __db_icursor here rather than simply calling
+ * pdbp->cursor, because otherwise, if we're in CDB, we'll
+ * allocate a new locker ID and leave ourselves open to deadlocks.
+ * (Even though we're only acquiring read locks, we'll still block
+ * if there are any waiters.)
+ */
+ if ((ret = __db_icursor(pdbp,
+ dbc->txn, pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ goto err;
+
+ /*
+ * We're about to use pkey a second time. If DB_DBT_MALLOC
+ * is set on it, we'll leak the memory we allocated the first time.
+ * Thus, set DB_DBT_REALLOC instead so that we reuse that memory
+ * instead of leaking it.
+ *
+ * !!!
+ * This assumes that the user must always specify a compatible
+ * realloc function if a malloc function is specified. I think
+ * this is a reasonable requirement.
+ */
+ if (F_ISSET(pkey, DB_DBT_MALLOC)) {
+ F_CLR(pkey, DB_DBT_MALLOC);
+ F_SET(pkey, DB_DBT_REALLOC);
+ pkeymalloc = 1;
+ }
+
+ /*
+ * Do the actual get. Set DBC_TRANSIENT since we don't care
+ * about preserving the position on error, and it's faster.
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ F_SET(pdbc, DBC_TRANSIENT);
+ SET_RET_MEM(pdbc, dbc);
+ ret = pdbc->c_get(pdbc, pkey, data, DB_SET);
+
+ /*
+ * If the item wasn't found in the primary, this is a bug;
+ * our secondary has somehow gotten corrupted, and contains
+ * elements that don't correspond to anything in the primary.
+ * Complain.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ /* Now close the primary cursor. */
+ t_ret = pdbc->c_close(pdbc);
+
+err: if (pkeymalloc) {
+ /*
+ * If pkey had a MALLOC flag, we need to restore it;
+ * otherwise, if the user frees the buffer but reuses
+ * the DBT without NULL'ing its data field or changing
+ * the flags, we may drop core.
+ */
+ F_CLR(pkey, DB_DBT_REALLOC);
+ F_SET(pkey, DB_DBT_MALLOC);
+ }
+ return (t_ret == 0 ? ret : t_ret);
+}
+
+/*
+ * __db_c_pget_recno --
+ * Perform a DB_GET_RECNO c_pget on a secondary index. Returns
+ * the secondary's record number in the pkey field and the primary's
+ * in the data field.
+ */
+static int
+__db_c_pget_recno(sdbc, pkey, data, flags)
+ DBC *sdbc;
+ DBT *pkey, *data;
+ u_int32_t flags;
+{
+ DB *pdbp, *sdbp;
+ DB_ENV *dbenv;
+ DBC *pdbc;
+ DBT discardme, primary_key;
+ db_recno_t oob;
+ u_int32_t rmw;
+ int ret, t_ret;
+
+ sdbp = sdbc->dbp;
+ pdbp = sdbp->s_primary;
+ dbenv = sdbp->dbenv;
+ pdbc = NULL;
+ ret = t_ret = 0;
+
+ rmw = LF_ISSET(DB_RMW);
+
+ memset(&discardme, 0, sizeof(DBT));
+ F_SET(&discardme, DB_DBT_USERMEM | DB_DBT_PARTIAL);
+
+ oob = RECNO_OOB;
+
+ /*
+ * If the primary is an rbtree, we want its record number, whether
+ * or not the secondary is one too. Fetch the recno into "data".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "data".
+ */
+ if (F_ISSET(pdbp, DB_AM_RECNUM)) {
+ /*
+ * Get the primary key, so we can find the record number
+ * in the primary. (We're uninterested in the secondary key.)
+ */
+ memset(&primary_key, 0, sizeof(DBT));
+ F_SET(&primary_key, DB_DBT_MALLOC);
+ if ((ret = sdbc->c_real_get(sdbc,
+ &discardme, &primary_key, rmw | DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Open a cursor on the primary, set it to the right record,
+ * and fetch its recno into "data".
+ *
+ * (See __db_c_pget for a comment on the use of __db_icursor.)
+ *
+ * SET_RET_MEM so that the secondary DBC owns any returned-data
+ * memory.
+ */
+ if ((ret = __db_icursor(pdbp, sdbc->txn,
+ pdbp->type, PGNO_INVALID, 0, sdbc->locker, &pdbc)) != 0)
+ goto perr;
+ SET_RET_MEM(pdbc, sdbc);
+ if ((ret = pdbc->c_get(pdbc,
+ &primary_key, &discardme, rmw | DB_SET)) != 0)
+ goto perr;
+
+ ret = pdbc->c_get(pdbc, &discardme, data, rmw | DB_GET_RECNO);
+
+perr: __os_ufree(sdbp->dbenv, primary_key.data);
+ if (pdbc != NULL &&
+ (t_ret = pdbc->c_close(pdbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+ } else if ((ret = __db_retcopy(dbenv, data, &oob,
+ sizeof(oob), &sdbc->rkey->data, &sdbc->rkey->ulen)) != 0)
+ return (ret);
+
+ /*
+ * If the secondary is an rbtree, we want its record number, whether
+ * or not the primary is one too. Fetch the recno into "pkey".
+ *
+ * If it's not an rbtree, return RECNO_OOB in "pkey".
+ */
+ if (F_ISSET(sdbp, DB_AM_RECNUM))
+ return (sdbc->c_real_get(sdbc, &discardme, pkey, flags));
+ else
+ return (__db_retcopy(dbenv, pkey, &oob,
+ sizeof(oob), &sdbc->rdata->data, &sdbc->rdata->ulen));
+}
+
+/*
* __db_wrlock_err -- do not have a write lock.
*/
static int
@@ -972,3 +1914,373 @@ __db_wrlock_err(dbenv)
__db_err(dbenv, "Write attempted on read-only cursor");
return (EPERM);
}
+
+/*
+ * __db_c_del_secondary --
+ * Perform a delete operation on a secondary index: call through
+ * to the primary and delete the primary record that this record
+ * points to.
+ *
+ * Note that deleting the primary record will call c_del on all
+ * the secondaries, including this one; thus, it is not necessary
+ * to execute both this function and an actual delete.
+ *
+ */
+static int
+__db_c_del_secondary(dbc)
+ DBC *dbc;
+{
+ DB *pdbp;
+ DBC *pdbc;
+ DBT skey, pkey;
+ int ret, t_ret;
+
+ memset(&skey, 0, sizeof(DBT));
+ memset(&pkey, 0, sizeof(DBT));
+
+ /*
+ * Get the current item that we're pointing at.
+ * We don't actually care about the secondary key, just
+ * the primary.
+ */
+ F_SET(&skey, DB_DBT_PARTIAL | DB_DBT_USERMEM);
+ if ((ret = dbc->c_real_get(dbc,
+ &skey, &pkey, DB_CURRENT)) != 0)
+ return (ret);
+
+ /*
+ * Create a cursor on the primary with our locker ID,
+ * so that when it calls back, we don't conflict.
+ *
+ * We create a cursor explicitly because there's no
+ * way to specify the same locker ID if we're using
+ * locking but not transactions if we use the DB->del
+ * interface. This shouldn't be any less efficient
+ * anyway.
+ */
+ pdbp = dbc->dbp->s_primary;
+ if ((ret = __db_icursor(pdbp, dbc->txn,
+ pdbp->type, PGNO_INVALID, 0, dbc->locker, &pdbc)) != 0)
+ return (ret);
+
+ /*
+ * See comment in __db_c_put--if we're in CDB,
+ * we already hold the locks we need, and we need to flag
+ * the cursor as a WRITER so we don't run into errors
+ * when we try to delete.
+ */
+ if (CDB_LOCKING(pdbp->dbenv)) {
+ DB_ASSERT(pdbc->mylock.off == LOCK_INVALID);
+ F_SET(pdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the new cursor to the correct primary key. Then
+ * delete it. We don't really care about the datum;
+ * just reuse our skey DBT.
+ *
+ * If the primary get returns DB_NOTFOUND, something is amiss--
+ * every record in the secondary should correspond to some record
+ * in the primary.
+ */
+ if ((ret = pdbc->c_get(pdbc, &pkey, &skey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_SET)) == 0)
+ ret = pdbc->c_del(pdbc, 0);
+ else if (ret == DB_NOTFOUND)
+ ret = __db_secondary_corrupt(pdbp);
+
+ if ((t_ret = pdbc->c_close(pdbc)) != 0 && ret != 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_c_del_primary --
+ * Perform a delete operation on a primary index. Loop through
+ * all the secondary indices which correspond to this primary
+ * database, and delete any secondary keys that point at the current
+ * record.
+ *
+ * PUBLIC: int __db_c_del_primary __P((DBC *));
+ */
+int
+__db_c_del_primary(dbc)
+ DBC *dbc;
+{
+ DB *dbp, *sdbp;
+ DBC *sdbc;
+ DBT data, pkey, skey, temp;
+ int ret, t_ret;
+
+ dbp = dbc->dbp;
+
+ /*
+ * If we're called at all, we have at least one secondary.
+ * (Unfortunately, we can't assert this without grabbing the mutex.)
+ * Get the current record so that we can construct appropriate
+ * secondary keys as needed.
+ */
+ memset(&pkey, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ if ((ret = dbc->c_get(dbc, &pkey, &data, DB_CURRENT)) != 0)
+ return (ret);
+
+ for (sdbp = __db_s_first(dbp);
+ sdbp != NULL && ret == 0; ret = __db_s_next(&sdbp)) {
+ /*
+ * Get the secondary key for this secondary and the current
+ * item.
+ */
+ memset(&skey, 0, sizeof(DBT));
+ if ((ret = sdbp->s_callback(sdbp, &pkey, &data, &skey)) != 0) {
+ /*
+ * If the current item isn't in this index, we
+ * have no work to do. Proceed.
+ */
+ if (ret == DB_DONOTINDEX)
+ continue;
+
+ /* We had a substantive error. Bail. */
+ FREE_IF_NEEDED(sdbp, &skey);
+ goto done;
+ }
+
+ /* Open a secondary cursor. */
+ if ((ret = __db_icursor(sdbp, dbc->txn, sdbp->type,
+ PGNO_INVALID, 0, dbc->locker, &sdbc)) != 0)
+ goto done;
+ /* See comment above and in __db_c_put. */
+ if (CDB_LOCKING(sdbp->dbenv)) {
+ DB_ASSERT(sdbc->mylock.off == LOCK_INVALID);
+ F_SET(sdbc, DBC_WRITER);
+ }
+
+ /*
+ * Set the secondary cursor to the appropriate item.
+ * Delete it.
+ *
+ * We want to use DB_RMW if locking is on; it's only
+ * legal then, though.
+ *
+ * !!!
+ * Don't stomp on any callback-allocated buffer in skey
+ * when we do a c_get(DB_GET_BOTH); use a temp DBT instead.
+ */
+ memset(&temp, 0, sizeof(DBT));
+ temp.data = skey.data;
+ temp.size = skey.size;
+ if ((ret = sdbc->c_real_get(sdbc, &temp, &pkey,
+ (STD_LOCKING(dbc) ? DB_RMW : 0) | DB_GET_BOTH)) == 0)
+ ret = sdbc->c_del(sdbc, DB_UPDATE_SECONDARY);
+
+ FREE_IF_NEEDED(sdbp, &skey);
+
+ if ((t_ret = sdbc->c_close(sdbc)) != 0 || ret != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ goto done;
+ }
+ }
+
+done: if (sdbp != NULL && (t_ret = __db_s_done(sdbp)) != 0 && ret == 0)
+ return (t_ret);
+ return (ret);
+}
+
+/*
+ * __db_s_first --
+ * Get the first secondary, if any are present, from the primary.
+ *
+ * PUBLIC: DB *__db_s_first __P((DB *));
+ */
+DB *
+__db_s_first(pdbp)
+ DB *pdbp;
+{
+ DB *sdbp;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ sdbp = LIST_FIRST(&pdbp->s_secondaries);
+
+ /* See __db_s_next. */
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (sdbp);
+}
+
+/*
+ * __db_s_next --
+ * Get the next secondary in the list.
+ *
+ * PUBLIC: int __db_s_next __P((DB **));
+ */
+int
+__db_s_next(sdbpp)
+ DB **sdbpp;
+{
+ DB *sdbp, *pdbp, *closeme;
+ int ret;
+
+ /*
+ * Secondary indices are kept in a linked list, s_secondaries,
+ * off each primary DB handle. If a primary is free-threaded,
+ * this list may only be traversed or modified while the primary's
+ * thread mutex is held.
+ *
+ * The tricky part is that we don't want to hold the thread mutex
+ * across the full set of secondary puts necessary for each primary
+ * put, or we'll wind up essentially single-threading all the puts
+ * to the handle; the secondary puts will each take about as
+ * long as the primary does, and may require I/O. So we instead
+ * hold the thread mutex only long enough to follow one link to the
+ * next secondary, and then we release it before performing the
+ * actual secondary put.
+ *
+ * The only danger here is that we might legitimately close a
+ * secondary index in one thread while another thread is performing
+ * a put and trying to update that same secondary index. To
+ * prevent this from happening, we refcount the secondary handles.
+ * If close is called on a secondary index handle while we're putting
+ * to it, it won't really be closed--the refcount will simply drop,
+ * and we'll be responsible for closing it here.
+ */
+ sdbp = *sdbpp;
+ pdbp = sdbp->s_primary;
+ closeme = NULL;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ closeme = sdbp;
+ }
+ sdbp = LIST_NEXT(sdbp, s_links);
+ if (sdbp != NULL)
+ sdbp->s_refcnt++;
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ *sdbpp = sdbp;
+
+ /*
+ * closeme->close() is a wrapper; call __db_close explicitly.
+ */
+ ret = closeme != NULL ? __db_close(closeme, 0) : 0;
+ return (ret);
+}
+
+/*
+ * __db_s_done --
+ * Properly decrement the refcount on a secondary database handle we're
+ * using, without calling __db_s_next.
+ *
+ * PUBLIC: int __db_s_done __P((DB *));
+ */
+int
+__db_s_done(sdbp)
+ DB *sdbp;
+{
+ DB *pdbp;
+ int doclose;
+
+ pdbp = sdbp->s_primary;
+ doclose = 0;
+
+ MUTEX_THREAD_LOCK(pdbp->dbenv, pdbp->mutexp);
+ DB_ASSERT(sdbp->s_refcnt != 0);
+ if (--sdbp->s_refcnt == 0) {
+ LIST_REMOVE(sdbp, s_links);
+ doclose = 1;
+ }
+ MUTEX_THREAD_UNLOCK(pdbp->dbenv, pdbp->mutexp);
+
+ return (doclose ? __db_close(sdbp, 0) : 0);
+}
+
+/*
+ * __db_buildpartial --
+ * Build the record that will result after a partial put is applied to
+ * an existing record.
+ *
+ * This should probably be merged with __bam_build, but that requires
+ * a little trickery if we plan to keep the overflow-record optimization
+ * in that function.
+ */
+static int
+__db_buildpartial(dbp, oldrec, partial, newrec)
+ DB *dbp;
+ DBT *oldrec, *partial, *newrec;
+{
+ int ret;
+ u_int8_t *buf;
+ u_int32_t len, nbytes;
+
+ DB_ASSERT(F_ISSET(partial, DB_DBT_PARTIAL));
+
+ memset(newrec, 0, sizeof(DBT));
+
+ nbytes = __db_partsize(oldrec->size, partial);
+ newrec->size = nbytes;
+
+ if ((ret = __os_malloc(dbp->dbenv, nbytes, &buf)) != 0)
+ return (ret);
+ newrec->data = buf;
+
+ /* Nul or pad out the buffer, for any part that isn't specified. */
+ memset(buf,
+ F_ISSET(dbp, DB_AM_FIXEDLEN) ? ((BTREE *)dbp->bt_internal)->re_pad :
+ 0, nbytes);
+
+ /* Copy in any leading data from the original record. */
+ memcpy(buf, oldrec->data,
+ partial->doff > oldrec->size ? oldrec->size : partial->doff);
+
+ /* Copy the data from partial. */
+ memcpy(buf + partial->doff, partial->data, partial->size);
+
+ /* Copy any trailing data from the original record. */
+ len = partial->doff + partial->dlen;
+ if (oldrec->size > len)
+ memcpy(buf + partial->doff + partial->size,
+ (u_int8_t *)oldrec->data + len, oldrec->size - len);
+
+ return (0);
+}
+
+/*
+ * __db_partsize --
+ * Given the number of bytes in an existing record and a DBT that
+ * is about to be partial-put, calculate the size of the record
+ * after the put.
+ *
+ * This code is called from __bam_partsize.
+ *
+ * PUBLIC: u_int32_t __db_partsize __P((u_int32_t, DBT *));
+ */
+u_int32_t
+__db_partsize(nbytes, data)
+ u_int32_t nbytes;
+ DBT *data;
+{
+
+ /*
+ * There are really two cases here:
+ *
+ * Case 1: We are replacing some bytes that do not exist (i.e., they
+ * are past the end of the record). In this case the number of bytes
+ * we are replacing is irrelevant and all we care about is how many
+ * bytes we are going to add from offset. So, the new record length
+ * is going to be the size of the new bytes (size) plus wherever those
+ * new bytes begin (doff).
+ *
+ * Case 2: All the bytes we are replacing exist. Therefore, the new
+ * size is the oldsize (nbytes) minus the bytes we are replacing (dlen)
+ * plus the bytes we are adding (size).
+ */
+ if (nbytes < data->doff + data->dlen) /* Case 1 */
+ return (data->doff + data->size);
+
+ return (nbytes + data->size - data->dlen); /* Case 2 */
+}
diff --git a/bdb/db/db_conv.c b/bdb/db/db_conv.c
index df60be06790..f731c82d85e 100644
--- a/bdb/db/db_conv.c
+++ b/bdb/db/db_conv.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -40,7 +40,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_conv.c,v 11.11 2000/11/30 00:58:31 ubell Exp $";
+static const char revid[] = "$Id: db_conv.c,v 11.38 2002/08/15 03:00:13 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -50,12 +50,14 @@ static const char revid[] = "$Id: db_conv.c,v 11.11 2000/11/30 00:58:31 ubell Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "db_am.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
/*
* __db_pgin --
@@ -70,15 +72,135 @@ __db_pgin(dbenv, pg, pp, cookie)
void *pp;
DBT *cookie;
{
+ DB dummydb, *dbp;
DB_PGINFO *pginfo;
+ DB_CIPHER *db_cipher;
+ DB_LSN not_used;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int is_hmac, ret;
+ u_int8_t *chksum, *iv;
pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
- switch (((PAGE *)pp)->type) {
- case P_HASH:
+ ret = is_hmac = 0;
+ chksum = iv = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ switch (pagep->type) {
case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * If checksumming is set on the meta-page, we must set
+ * it in the dbp.
+ */
+ if (FLD_ISSET(((DBMETA *)pp)->metaflags, DBMETA_CHKSUM))
+ F_SET(dbp, DB_AM_CHKSUM);
+ if (((DBMETA *)pp)->encrypt_alg != 0 ||
+ F_ISSET(dbp, DB_AM_ENCRYPT))
+ is_hmac = 1;
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ /*
+ * We assume that we've read a file hole if we have
+ * a zero LSN, zero page number and P_INVALID. Otherwise
+ * we have an invalid page that might contain real data.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) && pagep->pgno == PGNO_INVALID) {
+ sum_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ /*
+ * If we are reading in a non-meta page, then if we have
+ * a db_cipher then we are using hmac.
+ */
+ is_hmac = CRYPTO_ON(dbenv) ? 1 : 0;
+ break;
+ }
+
+ /*
+ * We expect a checksum error if there was a configuration problem.
+ * If there is no configuration problem and we don't get a match,
+ * it's fatal: panic the system.
+ */
+ if (F_ISSET(dbp, DB_AM_CHKSUM) && sum_len != 0)
+ switch (ret = __db_check_chksum(
+ dbenv, db_cipher, chksum, pp, sum_len, is_hmac)) {
+ case 0:
+ break;
+ case -1:
+ if (DBENV_LOGGING(dbenv))
+ __db_cksum_log(
+ dbenv, NULL, &not_used, DB_FLUSH);
+ __db_err(dbenv,
+ "checksum error: catastrophic recovery required");
+ return (__db_panic(dbenv, DB_RUNRECOVERY));
+ default:
+ return (ret);
+ }
+
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ case P_INVALID:
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ pagep->pgno == PGNO_INVALID) {
+ pg_len = 0;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if (pg_len != 0 && (ret = db_cipher->decrypt(dbenv,
+ db_cipher->data, iv, ((u_int8_t *)pagep) + pg_off,
+ pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ switch (pagep->type) {
case P_INVALID:
- return (__ham_pgin(dbenv, pg, pp, cookie));
+ if (pginfo->type == DB_QUEUE)
+ return (__qam_pgin_out(dbenv, pg, pp, cookie));
+ else
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
+ case P_HASH:
+ case P_HASHMETA:
+ return (__ham_pgin(dbenv, dbp, pg, pp, cookie));
case P_BTREEMETA:
case P_IBTREE:
case P_IRECNO:
@@ -86,14 +208,14 @@ __db_pgin(dbenv, pg, pp, cookie)
case P_LDUP:
case P_LRECNO:
case P_OVERFLOW:
- return (__bam_pgin(dbenv, pg, pp, cookie));
+ return (__bam_pgin(dbenv, dbp, pg, pp, cookie));
case P_QAMMETA:
case P_QAMDATA:
return (__qam_pgin_out(dbenv, pg, pp, cookie));
default:
break;
}
- return (__db_unknown_type(dbenv, "__db_pgin", ((PAGE *)pp)->type));
+ return (__db_pgfmt(dbenv, pg));
}
/*
@@ -109,15 +231,33 @@ __db_pgout(dbenv, pg, pp, cookie)
void *pp;
DBT *cookie;
{
+ DB dummydb, *dbp;
+ DB_CIPHER *db_cipher;
DB_PGINFO *pginfo;
+ PAGE *pagep;
+ size_t pg_off, pg_len, sum_len;
+ int ret;
+ u_int8_t *chksum, *iv, *key;
pginfo = (DB_PGINFO *)cookie->data;
+ pagep = (PAGE *)pp;
- switch (((PAGE *)pp)->type) {
+ chksum = iv = key = NULL;
+ memset(&dummydb, 0, sizeof(DB));
+ dbp = &dummydb;
+ dummydb.flags = pginfo->flags;
+ ret = 0;
+ switch (pagep->type) {
+ case P_INVALID:
+ if (pginfo->type == DB_QUEUE)
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
+ else
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
case P_HASH:
case P_HASHMETA:
- case P_INVALID:
- return (__ham_pgout(dbenv, pg, pp, cookie));
+ ret = __ham_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
case P_BTREEMETA:
case P_IBTREE:
case P_IRECNO:
@@ -125,14 +265,73 @@ __db_pgout(dbenv, pg, pp, cookie)
case P_LDUP:
case P_LRECNO:
case P_OVERFLOW:
- return (__bam_pgout(dbenv, pg, pp, cookie));
+ ret = __bam_pgout(dbenv, dbp, pg, pp, cookie);
+ break;
case P_QAMMETA:
case P_QAMDATA:
- return (__qam_pgin_out(dbenv, pg, pp, cookie));
- default:
+ ret = __qam_pgin_out(dbenv, pg, pp, cookie);
break;
+ default:
+ return (__db_pgfmt(dbenv, pg));
+ }
+ if (ret)
+ return (ret);
+
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+
+ DB_ASSERT(db_cipher != NULL);
+ DB_ASSERT(F_ISSET(dbp, DB_AM_CHKSUM));
+
+ pg_off = P_OVERHEAD(dbp);
+ DB_ASSERT(db_cipher->adj_size(pg_off) == 0);
+
+ key = db_cipher->mac_key;
+
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the iv
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ iv = ((BTMETA *)pp)->iv;
+ pg_len = DBMETASIZE;
+ break;
+ default:
+ iv = P_IV(dbp, pagep);
+ pg_len = pginfo->db_pagesize;
+ break;
+ }
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ iv, ((u_int8_t *)pagep) + pg_off, pg_len - pg_off)) != 0)
+ return (ret);
+ }
+ if (F_ISSET(dbp, DB_AM_CHKSUM)) {
+ switch (pagep->type) {
+ case P_HASHMETA:
+ case P_BTREEMETA:
+ case P_QAMMETA:
+ /*
+ * !!!
+ * For all meta pages it is required that the chksum
+ * be at the same location. Use BTMETA to get to it
+ * for any meta type.
+ */
+ chksum = ((BTMETA *)pp)->chksum;
+ sum_len = DBMETASIZE;
+ break;
+ default:
+ chksum = P_CHKSUM(dbp, pagep);
+ sum_len = pginfo->db_pagesize;
+ break;
+ }
+ __db_chksum(pp, sum_len, key, chksum);
}
- return (__db_unknown_type(dbenv, "__db_pgout", ((PAGE *)pp)->type));
+ return (0);
}
/*
@@ -169,11 +368,13 @@ __db_metaswap(pg)
* __db_byteswap --
* Byteswap a page.
*
- * PUBLIC: int __db_byteswap __P((DB_ENV *, db_pgno_t, PAGE *, size_t, int));
+ * PUBLIC: int __db_byteswap
+ * PUBLIC: __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int));
*/
int
-__db_byteswap(dbenv, pg, h, pagesize, pgin)
+__db_byteswap(dbenv, dbp, pg, h, pagesize, pgin)
DB_ENV *dbenv;
+ DB *dbp;
db_pgno_t pg;
PAGE *h;
size_t pagesize;
@@ -183,11 +384,12 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
BKEYDATA *bk;
BOVERFLOW *bo;
RINTERNAL *ri;
- db_indx_t i, len, tmp;
+ db_indx_t i, *inp, len, tmp;
u_int8_t *p, *end;
COMPQUIET(pg, 0);
+ inp = P_INP(dbp, h);
if (pgin) {
M_32_SWAP(h->lsn.file);
M_32_SWAP(h->lsn.offset);
@@ -202,14 +404,14 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
case P_HASH:
for (i = 0; i < NUM_ENT(h); i++) {
if (pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
- switch (HPAGE_TYPE(h, i)) {
+ switch (HPAGE_TYPE(dbp, h, i)) {
case H_KEYDATA:
break;
case H_DUPLICATE:
- len = LEN_HKEYDATA(h, pagesize, i);
- p = HKEYDATA_DATA(P_ENTRY(h, i));
+ len = LEN_HKEYDATA(dbp, h, pagesize, i);
+ p = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
for (end = p + len; p < end;) {
if (pgin) {
P_16_SWAP(p);
@@ -226,11 +428,11 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
}
break;
case H_OFFDUP:
- p = HOFFPAGE_PGNO(P_ENTRY(h, i));
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
SWAP32(p); /* pgno */
break;
case H_OFFPAGE:
- p = HOFFPAGE_PGNO(P_ENTRY(h, i));
+ p = HOFFPAGE_PGNO(P_ENTRY(dbp, h, i));
SWAP32(p); /* pgno */
SWAP32(p); /* tlen */
break;
@@ -246,14 +448,14 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
*/
if (!pgin)
for (i = 0; i < NUM_ENT(h); i++)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
break;
case P_LBTREE:
case P_LDUP:
case P_LRECNO:
for (i = 0; i < NUM_ENT(h); i++) {
if (pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
/*
* In the case of on-page duplicates, key information
@@ -261,17 +463,17 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
*/
if (h->type == P_LBTREE && i > 1) {
if (pgin) {
- if (h->inp[i] == h->inp[i - 2])
+ if (inp[i] == inp[i - 2])
continue;
} else {
- M_16_SWAP(h->inp[i]);
- if (h->inp[i] == h->inp[i - 2])
+ M_16_SWAP(inp[i]);
+ if (inp[i] == inp[i - 2])
continue;
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
}
}
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
switch (B_TYPE(bk->type)) {
case B_KEYDATA:
M_16_SWAP(bk->len);
@@ -285,15 +487,15 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
}
if (!pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
}
break;
case P_IBTREE:
for (i = 0; i < NUM_ENT(h); i++) {
if (pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
- bi = GET_BINTERNAL(h, i);
+ bi = GET_BINTERNAL(dbp, h, i);
M_16_SWAP(bi->len);
M_32_SWAP(bi->pgno);
M_32_SWAP(bi->nrecs);
@@ -310,20 +512,20 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
}
if (!pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
}
break;
case P_IRECNO:
for (i = 0; i < NUM_ENT(h); i++) {
if (pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
- ri = GET_RINTERNAL(h, i);
+ ri = GET_RINTERNAL(dbp, h, i);
M_32_SWAP(ri->pgno);
M_32_SWAP(ri->nrecs);
if (!pgin)
- M_16_SWAP(h->inp[i]);
+ M_16_SWAP(inp[i]);
}
break;
case P_OVERFLOW:
@@ -331,7 +533,7 @@ __db_byteswap(dbenv, pg, h, pagesize, pgin)
/* Nothing to do. */
break;
default:
- return (__db_unknown_type(dbenv, "__db_byteswap", h->type));
+ return (__db_pgfmt(dbenv, pg));
}
if (!pgin) {
diff --git a/bdb/db/db_dispatch.c b/bdb/db/db_dispatch.c
index c9beac401a7..2cf29ec2f33 100644
--- a/bdb/db/db_dispatch.c
+++ b/bdb/db/db_dispatch.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -39,7 +39,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_dispatch.c,v 11.41 2001/01/11 18:19:50 bostic Exp $";
+static const char revid[] = "$Id: db_dispatch.c,v 11.121 2002/09/07 17:36:31 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -51,16 +51,24 @@ static const char revid[] = "$Id: db_dispatch.c,v 11.41 2001/01/11 18:19:50 bost
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_dispatch.h"
-#include "db_am.h"
-#include "log_auto.h"
-#include "txn.h"
-#include "txn_auto.h"
-#include "log.h"
-
-static int __db_txnlist_find_internal __P((void *, db_txnlist_type,
- u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int));
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/fop.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __db_limbo_fix __P((DB *,
+ DB_TXN *, DB_TXNLIST *, db_pgno_t *, DBMETA *));
+static int __db_limbo_bucket __P((DB_ENV *, DB_TXN *, DB_TXNLIST *));
+static int __db_limbo_move __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNLIST *));
+static int __db_lock_move __P((DB_ENV *,
+ u_int8_t *, db_pgno_t, db_lockmode_t, DB_TXN *, DB_TXN *));
+static int __db_default_getpgnos __P((DB_ENV *, DB_LSN *lsnp, void *));
+static int __db_txnlist_find_internal __P((DB_ENV *, void *, db_txnlist_type,
+ u_int32_t, u_int8_t [DB_FILE_ID_LEN], DB_TXNLIST **, int));
+static int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
+ int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
/*
* __db_dispatch --
@@ -71,16 +79,21 @@ static int __db_txnlist_find_internal __P((void *, db_txnlist_type,
* scripts in the tools directory). An application using a different
* recovery paradigm will supply a different dispatch function to txn_open.
*
- * PUBLIC: int __db_dispatch __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ * PUBLIC: int __db_dispatch __P((DB_ENV *,
+ * PUBLIC: int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)),
+ * PUBLIC: size_t, DBT *, DB_LSN *, db_recops, void *));
*/
int
-__db_dispatch(dbenv, db, lsnp, redo, info)
+__db_dispatch(dbenv, dtab, dtabsize, db, lsnp, redo, info)
DB_ENV *dbenv; /* The environment. */
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize; /* Size of the dtab. */
DBT *db; /* The log record upon which to dispatch. */
DB_LSN *lsnp; /* The lsn of the record being dispatched. */
db_recops redo; /* Redo this op (or undo it). */
void *info;
{
+ DB_LSN prev_lsn;
u_int32_t rectype, txnid;
int make_call, ret;
@@ -88,6 +101,9 @@ __db_dispatch(dbenv, db, lsnp, redo, info)
memcpy(&txnid, (u_int8_t *)db->data + sizeof(rectype), sizeof(txnid));
make_call = ret = 0;
+ /* If we don't have a dispatch table, it's hard to dispatch. */
+ DB_ASSERT(dtab != NULL);
+
/*
* If we find a record that is in the user's number space and they
* have specified a recovery routine, let them handle it. If they
@@ -96,17 +112,29 @@ __db_dispatch(dbenv, db, lsnp, redo, info)
*/
switch (redo) {
case DB_TXN_ABORT:
- /*
- * XXX
- * db_printlog depends on DB_TXN_ABORT not examining the TXN
- * list. If that ever changes, fix db_printlog too.
- */
+ case DB_TXN_APPLY:
+ case DB_TXN_PRINT:
make_call = 1;
break;
case DB_TXN_OPENFILES:
- if (rectype == DB_log_register)
- return (dbenv->dtab[rectype](dbenv,
- db, lsnp, redo, info));
+ /*
+ * We collect all the transactions that have
+ * "begin" records, those with no previous LSN,
+ * so that we do not abort partial transactions.
+ * These are known to be undone, otherwise the
+ * log would not have been freeable.
+ */
+ memcpy(&prev_lsn, (u_int8_t *)db->data +
+ sizeof(rectype) + sizeof(txnid), sizeof(prev_lsn));
+ if (txnid != 0 && prev_lsn.file == 0 && (ret =
+ __db_txnlist_add(dbenv, info, txnid, TXN_OK, NULL)) != 0)
+ return (ret);
+
+ /* FALLTHROUGH */
+ case DB_TXN_POPENFILES:
+ if (rectype == DB___dbreg_register ||
+ rectype == DB___txn_ckp || rectype == DB___txn_recycle)
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
break;
case DB_TXN_BACKWARD_ROLL:
/*
@@ -117,43 +145,146 @@ __db_dispatch(dbenv, db, lsnp, redo, info)
* we've never seen it, then we call the appropriate recovery
* routine.
*
- * We need to always undo DB_db_noop records, so that we
+ * We need to always undo DB___db_noop records, so that we
* properly handle any aborts before the file was closed.
*/
- if (rectype == DB_log_register ||
- rectype == DB_txn_ckp || rectype == DB_db_noop
- || rectype == DB_txn_child || (txnid != 0 &&
- (ret = __db_txnlist_find(info, txnid)) != 0)) {
+ switch(rectype) {
+ case DB___txn_regop:
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
+ case DB___fop_file_remove:
+ case DB___txn_child:
make_call = 1;
- if (ret == DB_NOTFOUND && rectype != DB_txn_regop &&
- rectype != DB_txn_xa_regop && (ret =
- __db_txnlist_add(dbenv, info, txnid, 1)) != 0)
- return (ret);
+ break;
+
+ case DB___dbreg_register:
+ if (txnid == 0) {
+ make_call = 1;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ if (txnid != 0 && (ret =
+ __db_txnlist_find(dbenv,
+ info, txnid)) != TXN_COMMIT && ret != TXN_IGNORE) {
+ /*
+ * If not found then, this is an incomplete
+ * abort.
+ */
+ if (ret == TXN_NOTFOUND)
+ return (__db_txnlist_add(dbenv,
+ info, txnid, TXN_IGNORE, lsnp));
+ make_call = 1;
+ if (ret == TXN_OK &&
+ (ret = __db_txnlist_update(dbenv,
+ info, txnid,
+ rectype == DB___txn_xa_regop ?
+ TXN_PREPARE : TXN_ABORT, NULL)) != 0)
+ return (ret);
+ }
}
break;
case DB_TXN_FORWARD_ROLL:
/*
* In the forward pass, if we haven't seen the transaction,
- * do nothing, else recovery it.
+ * do nothing, else recover it.
*
- * We need to always redo DB_db_noop records, so that we
+ * We need to always redo DB___db_noop records, so that we
* properly handle any commits after the file was closed.
*/
- if (rectype == DB_log_register ||
- rectype == DB_txn_ckp ||
- rectype == DB_db_noop ||
- __db_txnlist_find(info, txnid) == 0)
+ switch(rectype) {
+ case DB___txn_recycle:
+ case DB___txn_ckp:
+ case DB___db_noop:
make_call = 1;
+ break;
+
+ default:
+ if (txnid != 0 && (ret = __db_txnlist_find(dbenv,
+ info, txnid)) == TXN_COMMIT)
+ make_call = 1;
+ else if (ret != TXN_IGNORE &&
+ (rectype == DB___ham_metagroup ||
+ rectype == DB___ham_groupalloc ||
+ rectype == DB___db_pg_alloc)) {
+ /*
+ * Because we cannot undo file extensions
+ * all allocation records must be reprocessed
+ * during rollforward in case the file was
+ * just created. It may not have been
+ * present during the backward pass.
+ */
+ make_call = 1;
+ redo = DB_TXN_BACKWARD_ALLOC;
+ } else if (rectype == DB___dbreg_register) {
+ /*
+ * This may be a transaction dbreg_register.
+ * If it is, we only make the call on a COMMIT,
+ * which we checked above. If it's not, then we
+ * should always make the call, because we need
+ * the file open information.
+ */
+ if (txnid == 0)
+ make_call = 1;
+ }
+ }
break;
+ case DB_TXN_GETPGNOS:
+ /*
+ * If this is one of DB's own log records, we simply
+ * dispatch.
+ */
+ if (rectype < DB_user_BEGIN) {
+ make_call = 1;
+ break;
+ }
+
+ /*
+ * If we're still here, this is a custom record in an
+ * application that's doing app-specific logging. Such a
+ * record doesn't have a getpgno function for the user
+ * dispatch function to call--the getpgnos functions return
+ * which pages replication needs to lock using the TXN_RECS
+ * structure, which is private and not something we want to
+ * document.
+ *
+ * Thus, we leave any necessary locking for the app's
+ * recovery function to do during the upcoming
+ * DB_TXN_APPLY. Fill in default getpgnos info (we need
+ * a stub entry for every log record that will get
+ * DB_TXN_APPLY'd) and return success.
+ */
+ return (__db_default_getpgnos(dbenv, lsnp, info));
default:
return (__db_unknown_flag(dbenv, "__db_dispatch", redo));
}
+ /*
+ * The switch statement uses ret to receive the return value of
+ * __db_txnlist_find, which returns a large number of different
+ * statuses, none of which we will be returning. For safety,
+ * let's reset this here in case we ever do a "return(ret)"
+ * below in the future.
+ */
+ ret = 0;
if (make_call) {
- if (rectype >= DB_user_BEGIN && dbenv->tx_recover != NULL)
- return (dbenv->tx_recover(dbenv, db, lsnp, redo));
- else
- return (dbenv->dtab[rectype](dbenv, db, lsnp, redo, info));
+ if (rectype >= DB_user_BEGIN && dbenv->app_dispatch != NULL)
+ return (dbenv->app_dispatch(dbenv, db, lsnp, redo));
+ else {
+ /*
+ * The size of the dtab table argument is the same as
+ * the standard table, use the standard table's size
+ * as our sanity check.
+ */
+ if (rectype > dtabsize || dtab[rectype] == NULL) {
+ __db_err(dbenv,
+ "Illegal record type %lu in log",
+ (u_long)rectype);
+ return (EINVAL);
+ }
+ return (dtab[rectype](dbenv, db, lsnp, redo, info));
+ }
}
return (0);
@@ -163,75 +294,100 @@ __db_dispatch(dbenv, db, lsnp, redo, info)
* __db_add_recovery --
*
* PUBLIC: int __db_add_recovery __P((DB_ENV *,
- * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *,
+ * PUBLIC: int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
*/
int
-__db_add_recovery(dbenv, func, ndx)
+__db_add_recovery(dbenv, dtab, dtabsize, func, ndx)
DB_ENV *dbenv;
+ int (***dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsize;
int (*func) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
u_int32_t ndx;
{
- u_int32_t i, nsize;
+ size_t i, nsize;
int ret;
/* Check if we have to grow the table. */
- if (ndx >= dbenv->dtab_size) {
+ if (ndx >= *dtabsize) {
nsize = ndx + 40;
- if ((ret = __os_realloc(dbenv,
- nsize * sizeof(dbenv->dtab[0]), NULL, &dbenv->dtab)) != 0)
+ if ((ret =
+ __os_realloc(dbenv, nsize * sizeof((*dtab)[0]), dtab)) != 0)
return (ret);
- for (i = dbenv->dtab_size; i < nsize; ++i)
- dbenv->dtab[i] = NULL;
- dbenv->dtab_size = nsize;
+ for (i = *dtabsize; i < nsize; ++i)
+ (*dtab)[i] = NULL;
+ *dtabsize = nsize;
}
- dbenv->dtab[ndx] = func;
+ (*dtab)[ndx] = func;
return (0);
}
/*
- * __deprecated_recover --
- * Stub routine for deprecated recovery functions.
- *
- * PUBLIC: int __deprecated_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__deprecated_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- COMPQUIET(dbenv, NULL);
- COMPQUIET(dbtp, NULL);
- COMPQUIET(lsnp, NULL);
- COMPQUIET(op, 0);
- COMPQUIET(info, NULL);
- return (EINVAL);
-}
-
-/*
* __db_txnlist_init --
* Initialize transaction linked list.
*
- * PUBLIC: int __db_txnlist_init __P((DB_ENV *, void *));
+ * PUBLIC: int __db_txnlist_init __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LSN *, void *));
*/
int
-__db_txnlist_init(dbenv, retp)
+__db_txnlist_init(dbenv, low_txn, hi_txn, trunc_lsn, retp)
DB_ENV *dbenv;
+ u_int32_t low_txn, hi_txn;
+ DB_LSN *trunc_lsn;
void *retp;
{
DB_TXNHEAD *headp;
- int ret;
+ u_int32_t tmp;
+ int ret, size;
- if ((ret = __os_malloc(dbenv, sizeof(DB_TXNHEAD), NULL, &headp)) != 0)
+ /*
+ * Size a hash table.
+ * If low is zero then we are being called during rollback
+ * and we need only one slot.
+ * Hi maybe lower than low if we have recycled txnid's.
+ * The numbers here are guesses about txn density, we can afford
+ * to look at a few entries in each slot.
+ */
+ if (low_txn == 0)
+ size = 1;
+ else {
+ if (hi_txn < low_txn) {
+ tmp = hi_txn;
+ hi_txn = low_txn;
+ low_txn = tmp;
+ }
+ tmp = hi_txn - low_txn;
+ /* See if we wrapped around. */
+ if (tmp > (TXN_MAXIMUM - TXN_MINIMUM) / 2)
+ tmp = (low_txn - TXN_MINIMUM) + (TXN_MAXIMUM - hi_txn);
+ size = tmp / 5;
+ if (size < 100)
+ size = 100;
+ }
+ if ((ret = __os_malloc(dbenv,
+ sizeof(DB_TXNHEAD) + size * sizeof(headp->head), &headp)) != 0)
return (ret);
- LIST_INIT(&headp->head);
- headp->maxid = 0;
- headp->generation = 1;
+ memset(headp, 0, sizeof(DB_TXNHEAD) + size * sizeof(headp->head));
+ headp->maxid = hi_txn;
+ headp->generation = 0;
+ headp->nslots = size;
+ headp->gen_alloc = 8;
+ if ((ret = __os_malloc(dbenv, headp->gen_alloc *
+ sizeof(headp->gen_array[0]), &headp->gen_array)) != 0) {
+ __os_free(dbenv, headp);
+ return (ret);
+ }
+ headp->gen_array[0].generation = 0;
+ headp->gen_array[0].txn_min = TXN_MINIMUM;
+ headp->gen_array[0].txn_max = TXN_MAXIMUM;
+ if (trunc_lsn != NULL)
+ headp->trunc_lsn = *trunc_lsn;
+ else
+ ZERO_LSN(headp->trunc_lsn);
+ ZERO_LSN(headp->maxlsn);
+ ZERO_LSN(headp->ckplsn);
*(void **)retp = headp;
return (0);
@@ -241,132 +397,86 @@ __db_txnlist_init(dbenv, retp)
* __db_txnlist_add --
* Add an element to our transaction linked list.
*
- * PUBLIC: int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t));
+ * PUBLIC: int __db_txnlist_add __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, int32_t, DB_LSN *));
*/
int
-__db_txnlist_add(dbenv, listp, txnid, aborted)
+__db_txnlist_add(dbenv, listp, txnid, status, lsn)
DB_ENV *dbenv;
void *listp;
u_int32_t txnid;
- int32_t aborted;
+ int32_t status;
+ DB_LSN *lsn;
{
DB_TXNHEAD *hp;
DB_TXNLIST *elp;
int ret;
- if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
return (ret);
hp = (DB_TXNHEAD *)listp;
- LIST_INSERT_HEAD(&hp->head, elp, links);
+ LIST_INSERT_HEAD(&hp->head[DB_TXNLIST_MASK(hp, txnid)], elp, links);
elp->type = TXNLIST_TXNID;
elp->u.t.txnid = txnid;
- elp->u.t.aborted = aborted;
+ elp->u.t.status = status;
+ elp->u.t.generation = hp->generation;
if (txnid > hp->maxid)
hp->maxid = txnid;
- elp->u.t.generation = hp->generation;
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ DB_ASSERT(lsn == NULL ||
+ status != TXN_COMMIT || log_compare(&hp->maxlsn, lsn) >= 0);
return (0);
}
+
/*
* __db_txnlist_remove --
* Remove an element from our transaction linked list.
*
- * PUBLIC: int __db_txnlist_remove __P((void *, u_int32_t));
+ * PUBLIC: int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
*/
int
-__db_txnlist_remove(listp, txnid)
+__db_txnlist_remove(dbenv, listp, txnid)
+ DB_ENV *dbenv;
void *listp;
u_int32_t txnid;
{
DB_TXNLIST *entry;
- return (__db_txnlist_find_internal(listp,
- TXNLIST_TXNID, txnid, NULL, &entry, 1));
-}
-
-/* __db_txnlist_close --
- *
- * Call this when we close a file. It allows us to reconcile whether
- * we have done any operations on this file with whether the file appears
- * to have been deleted. If you never do any operations on a file, then
- * we assume it's OK to appear deleted.
- *
- * PUBLIC: int __db_txnlist_close __P((void *, int32_t, u_int32_t));
- */
-
-int
-__db_txnlist_close(listp, lid, count)
- void *listp;
- int32_t lid;
- u_int32_t count;
-{
- DB_TXNHEAD *hp;
- DB_TXNLIST *p;
-
- hp = (DB_TXNHEAD *)listp;
- for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
- if (p->type == TXNLIST_DELETE)
- if (lid == p->u.d.fileid &&
- !F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED)) {
- p->u.d.count += count;
- return (0);
- }
- }
-
- return (0);
+ return (__db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid,
+ NULL, &entry, 1) == TXN_NOTFOUND ? TXN_NOTFOUND : TXN_OK);
}
/*
- * __db_txnlist_delete --
- *
- * Record that a file was missing or deleted. If the deleted
- * flag is set, then we've encountered a delete of a file, else we've
- * just encountered a file that is missing. The lid is the log fileid
- * and is only meaningful if deleted is not equal to 0.
+ * __db_txnlist_ckp --
+ * Used to record the maximum checkpoint that will be retained
+ * after recovery. Typically this is simply the max checkpoint, but
+ * if we are doing client replication recovery or timestamp-based
+ * recovery, we are going to virtually truncate the log and we need
+ * to retain the last checkpoint before the truncation point.
*
- * PUBLIC: int __db_txnlist_delete __P((DB_ENV *,
- * PUBLIC: void *, char *, u_int32_t, int));
+ * PUBLIC: void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
*/
-int
-__db_txnlist_delete(dbenv, listp, name, lid, deleted)
+void
+__db_txnlist_ckp(dbenv, listp, ckp_lsn)
DB_ENV *dbenv;
void *listp;
- char *name;
- u_int32_t lid;
- int deleted;
+ DB_LSN *ckp_lsn;
{
DB_TXNHEAD *hp;
- DB_TXNLIST *p;
- int ret;
- hp = (DB_TXNHEAD *)listp;
- for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
- if (p->type == TXNLIST_DELETE)
- if (strcmp(name, p->u.d.fname) == 0) {
- if (deleted)
- F_SET(&p->u.d, TXNLIST_FLAG_DELETED);
- else
- F_CLR(&p->u.d, TXNLIST_FLAG_CLOSED);
- return (0);
- }
- }
-
- /* Need to add it. */
- if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &p)) != 0)
- return (ret);
- LIST_INSERT_HEAD(&hp->head, p, links);
+ COMPQUIET(dbenv, NULL);
- p->type = TXNLIST_DELETE;
- p->u.d.flags = 0;
- if (deleted)
- F_SET(&p->u.d, TXNLIST_FLAG_DELETED);
- p->u.d.fileid = lid;
- p->u.d.count = 0;
- ret = __os_strdup(dbenv, name, &p->u.d.fname);
+ hp = (DB_TXNHEAD *)listp;
- return (ret);
+ if (IS_ZERO_LSN(hp->ckplsn) && !IS_ZERO_LSN(hp->maxlsn) &&
+ log_compare(&hp->maxlsn, ckp_lsn) >= 0)
+ hp->ckplsn = *ckp_lsn;
}
/*
@@ -383,99 +493,156 @@ __db_txnlist_end(dbenv, listp)
{
DB_TXNHEAD *hp;
DB_TXNLIST *p;
- DB_LOG *lp;
+ int i;
- hp = (DB_TXNHEAD *)listp;
- lp = (DB_LOG *)dbenv->lg_handle;
- while (hp != NULL && (p = LIST_FIRST(&hp->head)) != NULL) {
- LIST_REMOVE(p, links);
- switch (p->type) {
- case TXNLIST_DELETE:
- /*
- * If we have a file that is not deleted and has
- * some operations, we flag the warning. Since
- * the file could still be open, we need to check
- * the actual log table as well.
- */
- if ((!F_ISSET(&p->u.d, TXNLIST_FLAG_DELETED) &&
- p->u.d.count != 0) ||
- (!F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED) &&
- p->u.d.fileid != (int32_t) TXNLIST_INVALID_ID &&
- p->u.d.fileid < lp->dbentry_cnt &&
- lp->dbentry[p->u.d.fileid].count != 0))
- __db_err(dbenv, "warning: %s: %s",
- p->u.d.fname, db_strerror(ENOENT));
- __os_freestr(p->u.d.fname);
- break;
- case TXNLIST_LSN:
- __os_free(p->u.l.lsn_array,
- p->u.l.maxn * sizeof(DB_LSN));
- break;
- default:
- /* Possibly an incomplete DB_TXNLIST; just free it. */
- break;
+ if ((hp = (DB_TXNHEAD *)listp) == NULL)
+ return;
+
+ for (i = 0; i < hp->nslots; i++)
+ while (hp != NULL && (p = LIST_FIRST(&hp->head[i])) != NULL) {
+ LIST_REMOVE(p, links);
+ switch (p->type) {
+ case TXNLIST_LSN:
+ __os_free(dbenv, p->u.l.lsn_array);
+ break;
+ default:
+ /*
+ * Possibly an incomplete DB_TXNLIST; just
+ * free it.
+ */
+ break;
+ }
+ __os_free(dbenv, p);
}
- __os_free(p, sizeof(DB_TXNLIST));
- }
- __os_free(listp, sizeof(DB_TXNHEAD));
+
+ if (hp->gen_array != NULL)
+ __os_free(dbenv, hp->gen_array);
+ __os_free(dbenv, listp);
}
/*
* __db_txnlist_find --
* Checks to see if a txnid with the current generation is in the
- * txnid list. This returns DB_NOTFOUND if the item isn't in the
- * list otherwise it returns (like __db_txnlist_find_internal) a
- * 1 or 0 indicating if the transaction is aborted or not. A txnid
- * of 0 means the record was generated while not in a transaction.
+ * txnid list. This returns TXN_NOTFOUND if the item isn't in the
+ * list otherwise it returns (like __db_txnlist_find_internal)
+ * the status of the transaction. A txnid of 0 means the record
+ * was generated while not in a transaction.
*
- * PUBLIC: int __db_txnlist_find __P((void *, u_int32_t));
+ * PUBLIC: int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t));
*/
int
-__db_txnlist_find(listp, txnid)
+__db_txnlist_find(dbenv, listp, txnid)
+ DB_ENV *dbenv;
void *listp;
u_int32_t txnid;
{
DB_TXNLIST *entry;
if (txnid == 0)
- return (DB_NOTFOUND);
- return (__db_txnlist_find_internal(listp,
- TXNLIST_TXNID, txnid, NULL, &entry, 0));
+ return (TXN_NOTFOUND);
+ return (__db_txnlist_find_internal(dbenv, listp,
+ TXNLIST_TXNID, txnid, NULL, &entry, 0));
+}
+
+/*
+ * __db_txnlist_update --
+ * Change the status of an existing transaction entry.
+ * Returns TXN_NOTFOUND if no such entry exists.
+ *
+ * PUBLIC: int __db_txnlist_update __P((DB_ENV *,
+ * PUBLIC: void *, u_int32_t, u_int32_t, DB_LSN *));
+ */
+int
+__db_txnlist_update(dbenv, listp, txnid, status, lsn)
+ DB_ENV *dbenv;
+ void *listp;
+ u_int32_t txnid;
+ u_int32_t status;
+ DB_LSN *lsn;
+{
+ DB_TXNHEAD *hp;
+ DB_TXNLIST *elp;
+ int ret;
+
+ if (txnid == 0)
+ return (TXN_NOTFOUND);
+ hp = (DB_TXNHEAD *)listp;
+ ret = __db_txnlist_find_internal(dbenv,
+ listp, TXNLIST_TXNID, txnid, NULL, &elp, 0);
+
+ if (ret == TXN_NOTFOUND)
+ return (ret);
+ elp->u.t.status = status;
+
+ if (lsn != NULL && IS_ZERO_LSN(hp->maxlsn) && status == TXN_COMMIT)
+ hp->maxlsn = *lsn;
+
+ return (ret);
}
/*
* __db_txnlist_find_internal --
- * Find an entry on the transaction list.
- * If the entry is not there or the list pointeris not initialized
- * we return DB_NOTFOUND. If the item is found, we return the aborted
- * status (1 for aborted, 0 for not aborted). Currently we always call
- * this with an initialized list pointer but checking for NULL keeps it general.
+ * Find an entry on the transaction list. If the entry is not there or
+ * the list pointer is not initialized we return TXN_NOTFOUND. If the
+ * item is found, we return the status. Currently we always call this
+ * with an initialized list pointer but checking for NULL keeps it general.
*/
static int
-__db_txnlist_find_internal(listp, type, txnid, uid, txnlistp, delete)
+__db_txnlist_find_internal(dbenv, listp, type, txnid, uid, txnlistp, delete)
+ DB_ENV *dbenv;
void *listp;
db_txnlist_type type;
- u_int32_t txnid;
+ u_int32_t txnid;
u_int8_t uid[DB_FILE_ID_LEN];
DB_TXNLIST **txnlistp;
int delete;
{
DB_TXNHEAD *hp;
DB_TXNLIST *p;
- int ret;
+ int32_t generation;
+ u_int32_t hash;
+ struct __db_headlink *head;
+ int i, ret;
if ((hp = (DB_TXNHEAD *)listp) == NULL)
- return (DB_NOTFOUND);
+ return (TXN_NOTFOUND);
+
+ switch (type) {
+ case TXNLIST_TXNID:
+ hash = txnid;
+ /* Find the most recent generation containing this ID */
+ for (i = 0; i <= hp->generation; i++)
+ /* The range may wrap around the end. */
+ if (hp->gen_array[i].txn_min <
+ hp->gen_array[i].txn_max ?
+ (txnid >= hp->gen_array[i].txn_min &&
+ txnid <= hp->gen_array[i].txn_max) :
+ (txnid >= hp->gen_array[i].txn_min ||
+ txnid <= hp->gen_array[i].txn_max))
+ break;
+ DB_ASSERT(i <= hp->generation);
+ generation = hp->gen_array[i].generation;
+ break;
+ case TXNLIST_PGNO:
+ memcpy(&hash, uid, sizeof(hash));
+ generation = 0;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (EINVAL);
+ }
+
+ head = &hp->head[DB_TXNLIST_MASK(hp, hash)];
- for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ for (p = LIST_FIRST(head); p != NULL; p = LIST_NEXT(p, links)) {
if (p->type != type)
continue;
switch (type) {
case TXNLIST_TXNID:
if (p->u.t.txnid != txnid ||
- hp->generation != p->u.t.generation)
+ generation != p->u.t.generation)
continue;
- ret = p->u.t.aborted;
+ ret = p->u.t.status;
break;
case TXNLIST_PGNO:
@@ -490,42 +657,67 @@ __db_txnlist_find_internal(listp, type, txnid, uid, txnlistp, delete)
}
if (delete == 1) {
LIST_REMOVE(p, links);
- __os_free(p, sizeof(DB_TXNLIST));
- } else if (p != LIST_FIRST(&hp->head)) {
+ __os_free(dbenv, p);
+ } else if (p != LIST_FIRST(head)) {
/* Move it to head of list. */
LIST_REMOVE(p, links);
- LIST_INSERT_HEAD(&hp->head, p, links);
+ LIST_INSERT_HEAD(head, p, links);
}
*txnlistp = p;
return (ret);
}
- return (DB_NOTFOUND);
+ return (TXN_NOTFOUND);
}
/*
* __db_txnlist_gen --
* Change the current generation number.
*
- * PUBLIC: void __db_txnlist_gen __P((void *, int));
+ * PUBLIC: int __db_txnlist_gen __P((DB_ENV *,
+ * PUBLIC: void *, int, u_int32_t, u_int32_t));
*/
-void
-__db_txnlist_gen(listp, incr)
+int
+__db_txnlist_gen(dbenv, listp, incr, min, max)
+ DB_ENV *dbenv;
void *listp;
int incr;
+ u_int32_t min, max;
{
DB_TXNHEAD *hp;
+ int ret;
/*
- * During recovery generation numbers keep track of how many "restart"
- * checkpoints we've seen. Restart checkpoints occur whenever we take
- * a checkpoint and there are no outstanding transactions. When that
- * happens, we can reset transaction IDs back to 1. It always happens
- * at recovery and it prevents us from exhausting the transaction IDs
- * name space.
+ * During recovery generation numbers keep track of "restart"
+ * checkpoints and recycle records. Restart checkpoints occur
+ * whenever we take a checkpoint and there are no outstanding
+ * transactions. When that happens, we can reset transaction IDs
+ * back to TXNID_MINIMUM. Currently we only do the reset
+ * at then end of recovery. Recycle records occrur when txnids
+ * are exhausted during runtime. A free range of ids is identified
+ * and logged. This code maintains a stack of ranges. A txnid
+ * is given the generation number of the first range it falls into
+ * in the stack.
*/
hp = (DB_TXNHEAD *)listp;
hp->generation += incr;
+ if (incr < 0)
+ memmove(hp->gen_array, &hp->gen_array[1],
+ (hp->generation + 1) * sizeof(hp->gen_array[0]));
+ else {
+ if (hp->generation >= hp->gen_alloc) {
+ hp->gen_alloc *= 2;
+ if ((ret = __os_realloc(dbenv, hp->gen_alloc *
+ sizeof(hp->gen_array[0]), &hp->gen_array)) != 0)
+ return (ret);
+ }
+ memmove(&hp->gen_array[1], &hp->gen_array[0],
+ hp->generation * sizeof(hp->gen_array[0]));
+ hp->gen_array[0].generation = hp->generation;
+ hp->gen_array[0].txn_min = min;
+ hp->gen_array[0].txn_max = max;
+ }
+ return (0);
}
#define TXN_BUBBLE(AP, MAX) { \
@@ -542,10 +734,10 @@ __db_txnlist_gen(listp, incr)
/*
* __db_txnlist_lsnadd --
- * Add to or re-sort the transaction list lsn entry.
- * Note that since this is used during an abort, the __txn_undo
- * code calls into the "recovery" subsystem explicitly, and there
- * is only a single TXNLIST_LSN entry on the list.
+ * Add to or re-sort the transaction list lsn entry. Note that since this
+ * is used during an abort, the __txn_undo code calls into the "recovery"
+ * subsystem explicitly, and there is only a single TXNLIST_LSN entry on
+ * the list.
*
* PUBLIC: int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
*/
@@ -562,19 +754,19 @@ __db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
hp = (DB_TXNHEAD *)listp;
- for (elp = LIST_FIRST(&hp->head);
+ for (elp = LIST_FIRST(&hp->head[0]);
elp != NULL; elp = LIST_NEXT(elp, links))
if (elp->type == TXNLIST_LSN)
break;
if (elp == NULL)
- return (EINVAL);
+ return (DB_SURPRISE_KID);
if (LF_ISSET(TXNLIST_NEW)) {
if (elp->u.l.ntxns >= elp->u.l.maxn) {
if ((ret = __os_realloc(dbenv,
2 * elp->u.l.maxn * sizeof(DB_LSN),
- NULL, &elp->u.l.lsn_array)) != 0)
+ &elp->u.l.lsn_array)) != 0)
return (ret);
elp->u.l.maxn *= 2;
}
@@ -584,9 +776,9 @@ __db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
elp->u.l.lsn_array[0] = *lsnp;
/*
- * If we just added a new entry and there may be NULL
- * entries, so we have to do a complete bubble sort,
- * not just trickle a changed entry around.
+ * If we just added a new entry and there may be NULL entries, so we
+ * have to do a complete bubble sort, not just trickle a changed entry
+ * around.
*/
for (i = 0; i < (!LF_ISSET(TXNLIST_NEW) ? 1 : elp->u.l.ntxns); i++)
TXN_BUBBLE(elp->u.l.lsn_array, elp->u.l.ntxns);
@@ -597,35 +789,6 @@ __db_txnlist_lsnadd(dbenv, listp, lsnp, flags)
}
/*
- * __db_txnlist_lsnhead --
- * Return a pointer to the beginning of the lsn_array.
- *
- * PUBLIC: int __db_txnlist_lsnhead __P((void *, DB_LSN **));
- */
-int
-__db_txnlist_lsnhead(listp, lsnpp)
- void *listp;
- DB_LSN **lsnpp;
-{
- DB_TXNHEAD *hp;
- DB_TXNLIST *elp;
-
- hp = (DB_TXNHEAD *)listp;
-
- for (elp = LIST_FIRST(&hp->head);
- elp != NULL; elp = LIST_NEXT(elp, links))
- if (elp->type == TXNLIST_LSN)
- break;
-
- if (elp == NULL)
- return (EINVAL);
-
- *lsnpp = &elp->u.l.lsn_array[0];
-
- return (0);
-}
-
-/*
* __db_txnlist_lsninit --
* Initialize a transaction list with an lsn array entry.
*
@@ -642,12 +805,12 @@ __db_txnlist_lsninit(dbenv, hp, lsnp)
elp = NULL;
- if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ if ((ret = __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
goto err;
- LIST_INSERT_HEAD(&hp->head, elp, links);
+ LIST_INSERT_HEAD(&hp->head[0], elp, links);
if ((ret = __os_malloc(dbenv,
- 12 * sizeof(DB_LSN), NULL, &elp->u.l.lsn_array)) != 0)
+ 12 * sizeof(DB_LSN), &elp->u.l.lsn_array)) != 0)
goto err;
elp->type = TXNLIST_LSN;
elp->u.l.maxn = 12;
@@ -662,8 +825,7 @@ err: __db_txnlist_end(dbenv, hp);
/*
* __db_add_limbo -- add pages to the limbo list.
- * Get the file information and call pgnoadd
- * for each page.
+ * Get the file information and call pgnoadd for each page.
*
* PUBLIC: int __db_add_limbo __P((DB_ENV *,
* PUBLIC: void *, int32_t, db_pgno_t, int32_t));
@@ -681,7 +843,7 @@ __db_add_limbo(dbenv, info, fileid, pgno, count)
int ret;
dblp = dbenv->lg_handle;
- if ((ret = __log_lid_to_fname(dblp, fileid, &fnp)) != 0)
+ if ((ret = __dbreg_id_to_fname(dblp, fileid, 0, &fnp)) != 0)
return (ret);
do {
@@ -698,201 +860,429 @@ __db_add_limbo(dbenv, info, fileid, pgno, count)
/*
* __db_do_the_limbo -- move pages from limbo to free.
*
- * If we are in recovery we add things to the free list without
- * logging becasue we want to incrementaly apply logs that
- * may be generated on another copy of this environment.
- * Otherwise we just call __db_free to put the pages on
- * the free list and log the activity.
+ * Limbo processing is what ensures that we correctly handle and
+ * recover from page allocations. During recovery, for each database,
+ * we process each in-question allocation, link them into the free list
+ * and then write out the new meta-data page that contains the pointer
+ * to the new beginning of the free list. On an abort, we use our
+ * standard __db_free mechanism in a compensating transaction which logs
+ * the specific modifications to the free list.
+ *
+ * If we run out of log space during an abort, then we can't write the
+ * compensating transaction, so we abandon the idea of a compenating
+ * transaction, and go back to processing how we do during recovery.
+ * The reason that this is not the norm is that it's expensive: it requires
+ * that we flush any database with an in-question allocation. Thus if
+ * a compensating transaction fails, we never try to restart it.
+ *
+ * Since files may be open and closed within transactions (in particular,
+ * the master database for subdatabases), we must be prepared to open
+ * files during this process. If there is a compensating transaction, we
+ * can open the files in that transaction. If this was an abort and there
+ * is no compensating transaction, then we've got to perform these opens
+ * in the context of the aborting transaction so that we do not deadlock.
+ * During recovery, there's no locking, so this isn't an issue.
*
- * PUBLIC: int __db_do_the_limbo __P((DB_ENV *, DB_TXNHEAD *));
+ * What you want to keep in mind when reading this is that there are two
+ * algorithms going on here: ctxn == NULL, then we're either in recovery
+ * or our compensating transaction has failed and we're doing the
+ * "create list and write meta-data page" algorithm. Otherwise, we're in
+ * an abort and doing the "use compensating transaction" algorithm.
+ *
+ * PUBLIC: int __db_do_the_limbo __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN *, DB_TXNHEAD *));
*/
int
-__db_do_the_limbo(dbenv, hp)
+__db_do_the_limbo(dbenv, ptxn, txn, hp)
DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
DB_TXNHEAD *hp;
{
- DB *dbp;
- DBC *dbc;
- DBMETA *meta;
- DB_TXN *txn;
DB_TXNLIST *elp;
- PAGE *pagep;
- db_pgno_t last_pgno, pgno;
- int i, in_recover, put_page, ret, t_ret;
+ int h, ret;
- dbp = NULL;
- dbc = NULL;
- txn = NULL;
ret = 0;
+ /*
+ * The slots correspond to hash buckets. We've hashed the
+ * fileids into hash buckets and need to pick up all affected
+ * files. (There will only be a single slot for an abort.)
+ */
+ for (h = 0; h < hp->nslots; h++) {
+ if ((elp = LIST_FIRST(&hp->head[h])) == NULL)
+ continue;
+ if (ptxn != NULL) {
+ if ((ret =
+ __db_limbo_move(dbenv, ptxn, txn, elp)) != 0)
+ goto err;
+ } else if ((ret = __db_limbo_bucket(dbenv, txn, elp)) != 0)
+ goto err;
+ }
+
+err: if (ret != 0) {
+ __db_err(dbenv, "Fatal error in abort of an allocation");
+ ret = __db_panic(dbenv, ret);
+ }
- /* Are we in recovery? */
- in_recover = F_ISSET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ return (ret);
+}
- for (elp = LIST_FIRST(&hp->head);
- elp != NULL; elp = LIST_NEXT(elp, links)) {
+/* Limbo support routines. */
+
+/*
+ * __db_lock_move --
+ * Move a lock from child to parent.
+ */
+static int
+__db_lock_move(dbenv, fileid, pgno, mode, ptxn, txn)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ db_pgno_t pgno;
+ db_lockmode_t mode;
+ DB_TXN *ptxn, *txn;
+{
+ DBT lock_dbt;
+ DB_LOCK lock;
+ DB_LOCK_ILOCK lock_obj;
+ DB_LOCKREQ req;
+ int ret;
+
+ lock_obj.pgno = pgno;
+ memcpy(lock_obj.fileid, fileid, DB_FILE_ID_LEN);
+ lock_obj.type = DB_PAGE_LOCK;
+
+ memset(&lock_dbt, 0, sizeof(lock_dbt));
+ lock_dbt.data = &lock_obj;
+ lock_dbt.size = sizeof(lock_obj);
+
+ if ((ret = dbenv->lock_get(dbenv,
+ txn->txnid, 0, &lock_dbt, mode, &lock)) == 0) {
+ memset(&req, 0, sizeof(req));
+ req.lock = lock;
+ req.op = DB_LOCK_TRADE;
+
+ ret = dbenv->lock_vec(dbenv, ptxn->txnid, 0, &req, 1, NULL);
+ }
+ return (ret);
+}
+
+/*
+ * __db_limbo_move
+ * Move just the metapage lock to the parent.
+ */
+static int
+__db_limbo_move(dbenv, ptxn, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *ptxn, *txn;
+ DB_TXNLIST *elp;
+{
+ int ret;
+
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
+ if (elp->type != TXNLIST_PGNO || elp->u.p.locked == 1)
+ continue;
+ if ((ret = __db_lock_move(dbenv, elp->u.p.uid,
+ PGNO_BASE_MD, DB_LOCK_WRITE, ptxn, txn)) != 0)
+ return (ret);
+ elp->u.p.locked = 1;
+ }
+
+ return (0);
+}
+/*
+ * __db_limbo_bucket
+ * Perform limbo processing for a single hash bucket in the txnlist.
+ * txn is the transaction aborting in the case of an abort and ctxn is the
+ * compensating transaction.
+ */
+
+#define T_RESTORED(txn) ((txn) != NULL && F_ISSET(txn, TXN_RESTORED))
+static int
+__db_limbo_bucket(dbenv, txn, elp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXNLIST *elp;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ DBMETA *meta;
+ DB_TXN *ctxn, *t;
+ db_pgno_t last_pgno, pgno;
+ int dbp_created, in_retry, ret, t_ret;
+
+ ctxn = NULL;
+ in_retry = 0;
+ meta = NULL;
+ mpf = NULL;
+ ret = 0;
+ for (; elp != NULL; elp = LIST_NEXT(elp, links)) {
if (elp->type != TXNLIST_PGNO)
continue;
+retry: dbp_created = 0;
+
+ /*
+ * Pick the transaction in which to potentially
+ * log compensations.
+ */
+ if (!in_retry && !IS_RECOVERING(dbenv) && !T_RESTORED(txn)
+ && (ret = __txn_compensate_begin(dbenv, &ctxn)) != 0)
+ return (ret);
+
+ /*
+ * Either use the compensating transaction or
+ * the one passed in, which will be null if recovering.
+ */
+ t = ctxn == NULL ? txn : ctxn;
+
+ /* First try to get a dbp by fileid. */
+ ret = __dbreg_id_to_db(dbenv, t, &dbp, elp->u.p.fileid, 0);
+
+ /*
+ * File is being destroyed. No need to worry about
+ * dealing with recovery of allocations.
+ */
+ if (ret == DB_DELETED ||
+ (ret == 0 && F_ISSET(dbp, DB_AM_DISCARD)))
+ goto next;
- if (in_recover) {
+ if (ret != 0) {
if ((ret = db_create(&dbp, dbenv, 0)) != 0)
goto err;
/*
- * It is ok if the file is nolonger there.
+ * This tells the system not to lock, which is always
+ * OK, whether this is an abort or recovery.
*/
+ F_SET(dbp, DB_AM_COMPENSATE);
+ dbp_created = 1;
+
+ /* It is ok if the file is nolonger there. */
dbp->type = DB_UNKNOWN;
- ret = __db_dbopen(dbp,
- elp->u.p.fname, 0, __db_omode("rw----"), 0);
+ ret = __db_dbopen(dbp, t, elp->u.p.fname, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), PGNO_BASE_MD);
+ if (ret == ENOENT)
+ goto next;
+ }
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if (memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ goto next;
+
+ mpf = dbp->mpf;
+ last_pgno = PGNO_INVALID;
+
+ if (ctxn == NULL) {
+ pgno = PGNO_BASE_MD;
+ if ((ret =
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ last_pgno = meta->free;
+ }
+
+ ret = __db_limbo_fix(dbp, ctxn, elp, &last_pgno, meta);
+ /*
+ * If we were doing compensating transactions, then we are
+ * going to hope this error was due to running out of space.
+ * We'll change modes (into the sync the file mode) and keep
+ * trying. If we weren't doing compensating transactions,
+ * then this is a real error and we're sunk.
+ */
+ if (ret != 0) {
+ if (ret == DB_RUNRECOVERY || ctxn == NULL)
+ goto err;
+ in_retry = 1;
+ goto retry;
+ }
+
+ if (ctxn != NULL) {
+ ret = ctxn->commit(ctxn, DB_TXN_NOSYNC);
+ ctxn = NULL;
+ if (ret != 0)
+ goto retry;
+ goto next;
+ }
+
+ /*
+ * This is where we handle the case where we're explicitly
+ * putting together a free list. We need to decide whether
+ * we have to write the meta-data page, and if we do, then
+ * we need to sync it as well.
+ */
+ if (last_pgno == meta->free) {
+ /* No change to page; just put the page back. */
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
} else {
/*
- * If we are in transaction undo, then we know
- * the fileid is still correct.
+ * These changes are unlogged so we cannot have the
+ * metapage pointing at pages that are not on disk.
+ * Therefore, we flush the new free list, then update
+ * the metapage. We have to put the meta-data page
+ * first so that it isn't pinned when we try to sync.
*/
+ if (!IS_RECOVERING(dbenv) && !T_RESTORED(txn))
+ __db_err(dbenv, "Flushing free list to disk");
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
+ goto err;
+ meta = NULL;
+ dbp->sync(dbp, 0);
+ pgno = PGNO_BASE_MD;
if ((ret =
- __db_fileid_to_db(dbenv, &dbp,
- elp->u.p.fileid, 0)) != 0 && ret != DB_DELETED)
+ mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ goto err;
+ meta->free = last_pgno;
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
goto err;
- /* File is being destroyed. */
- if (F_ISSET(dbp, DB_AM_DISCARD))
- ret = DB_DELETED;
+ meta = NULL;
}
+
+next:
/*
- * Verify that we are opening the same file that we were
- * referring to when we wrote this log record.
+ * If we get here, either we have processed the list
+ * or the db file has been deleted or could no be opened.
*/
- if (ret == 0 &&
- memcmp(elp->u.p.uid, dbp->fileid, DB_FILE_ID_LEN) == 0) {
- last_pgno = PGNO_INVALID;
- if (in_recover) {
- pgno = PGNO_BASE_MD;
- if ((ret = memp_fget(dbp->mpf,
- &pgno, 0, (PAGE **)&meta)) != 0)
- goto err;
- last_pgno = meta->free;
- /*
- * Check to see if the head of the free
- * list is any of the pages we are about
- * to link in. We could have crashed
- * after linking them in and before writing
- * a checkpoint.
- * It may not be the last one since
- * any page may get reallocated before here.
- */
- if (last_pgno != PGNO_INVALID)
- for (i = 0; i < elp->u.p.nentries; i++)
- if (last_pgno
- == elp->u.p.pgno_array[i])
- goto done_it;
- }
+ if (ctxn != NULL &&
+ (t_ret = ctxn->abort(ctxn)) != 0 && ret == 0)
+ ret = t_ret;
- for (i = 0; i < elp->u.p.nentries; i++) {
- pgno = elp->u.p.pgno_array[i];
- if ((ret = memp_fget(dbp->mpf,
- &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
- goto err;
+ if (dbp_created &&
+ (t_ret = __db_close_i(dbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ dbp = NULL;
+ __os_free(dbenv, elp->u.p.fname);
+ __os_free(dbenv, elp->u.p.pgno_array);
+ if (ret == ENOENT)
+ ret = 0;
+ else if (ret != 0)
+ goto err;
+ }
- put_page = 1;
- if (IS_ZERO_LSN(LSN(pagep))) {
- P_INIT(pagep, dbp->pgsize,
- pgno, PGNO_INVALID,
- last_pgno, 0, P_INVALID);
-
- if (in_recover) {
- LSN(pagep) = LSN(meta);
- last_pgno = pgno;
- } else {
- /*
- * Starting the transaction
- * is postponed until we know
- * we have something to do.
- */
- if (txn == NULL &&
- (ret = txn_begin(dbenv,
- NULL, &txn, 0)) != 0)
- goto err;
-
- if (dbc == NULL &&
- (ret = dbp->cursor(dbp,
- txn, &dbc, 0)) != 0)
- goto err;
- /* Turn off locking. */
- F_SET(dbc, DBC_COMPENSATE);
-
- /* __db_free puts the page. */
- if ((ret =
- __db_free(dbc, pagep)) != 0)
- goto err;
- put_page = 0;
- }
- }
+err: if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
- if (put_page == 1 &&
- (ret = memp_fput(dbp->mpf,
- pagep, DB_MPOOL_DIRTY)) != 0)
- goto err;
- }
- if (in_recover) {
- if (last_pgno == meta->free) {
-done_it:
+/*
+ * __db_limbo_fix --
+ * Process a single limbo entry which describes all the page allocations
+ * for a single file.
+ */
+static int
+__db_limbo_fix(dbp, ctxn, elp, lastp, meta)
+ DB *dbp;
+ DB_TXN *ctxn;
+ DB_TXNLIST *elp;
+ db_pgno_t *lastp;
+ DBMETA *meta;
+{
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *freep, *pagep;
+ db_pgno_t next, pgno;
+ int i, put_page, ret, t_ret;
+
+ /*
+ * Loop through the entries for this txnlist element and
+ * either link them into the free list or write a compensating
+ * record for each.
+ */
+ put_page = 0;
+ ret = 0;
+ mpf = dbp->mpf;
+ dbc = NULL;
+
+ for (i = 0; i < elp->u.p.nentries; i++) {
+ pgno = elp->u.p.pgno_array[i];
+
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto err;
+ put_page = 1;
+
+ if (IS_ZERO_LSN(LSN(pagep))) {
+ if (ctxn == NULL) {
+ /*
+ * If this is a fatal recovery which
+ * spans a previous crash this page may
+ * be on the free list already.
+ */
+ for (next = *lastp; next != 0; ) {
+ if (next == pgno)
+ break;
+ if ((ret = mpf->get(mpf,
+ &next, 0, &freep)) != 0)
+ goto err;
+ next = NEXT_PGNO(freep);
if ((ret =
- memp_fput(dbp->mpf, meta, 0)) != 0)
+ mpf->put(mpf, freep, 0)) != 0)
goto err;
- } else {
- /*
- * Flush the new free list then
- * update the metapage. This is
- * unlogged so we cannot have the
- * metapage pointing at pages that
- * are not on disk.
- */
- dbp->sync(dbp, 0);
- meta->free = last_pgno;
- if ((ret = memp_fput(dbp->mpf,
- meta, DB_MPOOL_DIRTY)) != 0)
+ }
+
+ if (next != pgno) {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ LSN(pagep) = LSN(meta);
+ *lastp = pgno;
+ }
+ } else {
+ P_INIT(pagep, dbp->pgsize, pgno,
+ PGNO_INVALID, *lastp, 0, P_INVALID);
+ if (dbc == NULL && (ret =
+ dbp->cursor(dbp, ctxn, &dbc, 0)) != 0)
goto err;
+ /*
+ * If the dbp is compensating (because we
+ * opened it), the dbc will automatically be
+ * marked compensating, but in case we didn't
+ * do the open, we have to mark it explicitly.
+ */
+ F_SET(dbc, DBC_COMPENSATE);
+ ret = __db_free(dbc, pagep);
+ put_page = 0;
+ /*
+ * On any error, we hope that the error was
+ * caused due to running out of space, and we
+ * switch modes, doing the processing where we
+ * sync out files instead of doing compensating
+ * transactions. If this was a real error and
+ * not out of space, we assume that some other
+ * call will fail real soon.
+ */
+ if (ret != 0) {
+ /* Assume that this is out of space. */
+ (void)dbc->c_close(dbc);
+ dbc = NULL;
+ goto err;
}
}
- if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
- goto err;
- dbc = NULL;
}
- if (in_recover && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
- ret = t_ret;
- dbp = NULL;
- __os_free(elp->u.p.fname, 0);
- __os_free(elp->u.p.pgno_array, 0);
- if (ret == ENOENT)
- ret = 0;
- else if (ret != 0)
+
+ if (put_page == 1) {
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
+ put_page = 0;
+ }
+ if (ret != 0)
goto err;
}
- if (txn != NULL) {
- ret = txn_commit(txn, 0);
- txn = NULL;
- }
-err:
- if (dbc != NULL)
- (void)dbc->c_close(dbc);
- if (in_recover && dbp != NULL)
- (void)dbp->close(dbp, 0);
- if (txn != NULL)
- (void)txn_abort(txn);
+err: if (put_page &&
+ (t_ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
return (ret);
-
}
-#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */
+#define DB_TXNLIST_MAX_PGNO 8 /* A nice even number. */
/*
* __db_txnlist_pgnoadd --
- * Find the txnlist entry for a file and add this pgno,
- * or add the list entry for the file and then add the pgno.
- *
- * PUBLIC: int __db_txnlist_pgnoadd __P((DB_ENV *, DB_TXNHEAD *,
- * PUBLIC: int32_t, u_int8_t [DB_FILE_ID_LEN], char *, db_pgno_t));
+ * Find the txnlist entry for a file and add this pgno, or add the list
+ * entry for the file and then add the pgno.
*/
-int
+static int
__db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno)
DB_ENV *dbenv;
DB_TXNHEAD *hp;
@@ -902,34 +1292,39 @@ __db_txnlist_pgnoadd(dbenv, hp, fileid, uid, fname, pgno)
db_pgno_t pgno;
{
DB_TXNLIST *elp;
+ u_int32_t hash;
int len, ret;
elp = NULL;
- if (__db_txnlist_find_internal(hp, TXNLIST_PGNO, 0, uid, &elp, 0) != 0) {
+ if (__db_txnlist_find_internal(dbenv, hp,
+ TXNLIST_PGNO, 0, uid, &elp, 0) != 0) {
if ((ret =
- __os_malloc(dbenv, sizeof(DB_TXNLIST), NULL, &elp)) != 0)
+ __os_malloc(dbenv, sizeof(DB_TXNLIST), &elp)) != 0)
goto err;
- LIST_INSERT_HEAD(&hp->head, elp, links);
+ memcpy(&hash, uid, sizeof(hash));
+ LIST_INSERT_HEAD(
+ &hp->head[DB_TXNLIST_MASK(hp, hash)], elp, links);
elp->u.p.fileid = fileid;
memcpy(elp->u.p.uid, uid, DB_FILE_ID_LEN);
- len = strlen(fname) + 1;
- if ((ret = __os_malloc(dbenv, len, NULL, &elp->u.p.fname)) != 0)
+ len = (int)strlen(fname) + 1;
+ if ((ret = __os_malloc(dbenv, len, &elp->u.p.fname)) != 0)
goto err;
memcpy(elp->u.p.fname, fname, len);
elp->u.p.maxentry = 0;
+ elp->u.p.locked = 0;
elp->type = TXNLIST_PGNO;
if ((ret = __os_malloc(dbenv,
- 8 * sizeof(db_pgno_t), NULL, &elp->u.p.pgno_array)) != 0)
+ 8 * sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
goto err;
elp->u.p.maxentry = DB_TXNLIST_MAX_PGNO;
elp->u.p.nentries = 0;
} else if (elp->u.p.nentries == elp->u.p.maxentry) {
elp->u.p.maxentry <<= 1;
if ((ret = __os_realloc(dbenv, elp->u.p.maxentry *
- sizeof(db_pgno_t), NULL, &elp->u.p.pgno_array)) != 0)
+ sizeof(db_pgno_t), &elp->u.p.pgno_array)) != 0)
goto err;
}
@@ -941,6 +1336,36 @@ err: __db_txnlist_end(dbenv, hp);
return (ret);
}
+/*
+ * __db_default_getpgnos --
+ * Fill in default getpgnos information for an application-specific
+ * log record.
+ */
+static int
+__db_default_getpgnos(dbenv, lsnp, summary)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
#ifdef DEBUG
/*
* __db_txnlist_print --
@@ -954,25 +1379,21 @@ __db_txnlist_print(listp)
{
DB_TXNHEAD *hp;
DB_TXNLIST *p;
+ int i;
+ char *stats[] = { "ok", "commit", "prepare", "abort", "notfound",
+ "ignore", "expected", "unexpected" };
hp = (DB_TXNHEAD *)listp;
printf("Maxid: %lu Generation: %lu\n",
(u_long)hp->maxid, (u_long)hp->generation);
- for (p = LIST_FIRST(&hp->head); p != NULL; p = LIST_NEXT(p, links)) {
+ for (i = 0; i < hp->nslots; i++)
+ for (p = LIST_FIRST(&hp->head[i]); p != NULL; p = LIST_NEXT(p, links)) {
switch (p->type) {
case TXNLIST_TXNID:
- printf("TXNID: %lu(%lu)\n",
- (u_long)p->u.t.txnid, (u_long)p->u.t.generation);
- break;
- case TXNLIST_DELETE:
- printf("FILE: %s id=%d ops=%d %s %s\n",
- p->u.d.fname, p->u.d.fileid, p->u.d.count,
- F_ISSET(&p->u.d, TXNLIST_FLAG_DELETED) ?
- "(deleted)" : "(missing)",
- F_ISSET(&p->u.d, TXNLIST_FLAG_CLOSED) ?
- "(closed)" : "(open)");
-
+ printf("TXNID: %lx(%lu): %s\n",
+ (u_long)p->u.t.txnid, (u_long)p->u.t.generation,
+ stats[p->u.t.status]);
break;
default:
printf("Unrecognized type: %d\n", p->type);
diff --git a/bdb/db/db_dup.c b/bdb/db/db_dup.c
index 6d8b2df9518..2d33d79153f 100644
--- a/bdb/db/db_dup.c
+++ b/bdb/db/db_dup.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_dup.c,v 11.18 2000/11/30 00:58:32 ubell Exp $";
+static const char revid[] = "$Id: db_dup.c,v 11.32 2002/08/08 03:57:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,12 +18,10 @@ static const char revid[] = "$Id: db_dup.c,v 11.18 2000/11/30 00:58:32 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "lock.h"
-#include "db_am.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
/*
* __db_ditem --
@@ -39,19 +37,20 @@ __db_ditem(dbc, pagep, indx, nbytes)
{
DB *dbp;
DBT ldbt;
- db_indx_t cnt, offset;
+ db_indx_t cnt, *inp, offset;
int ret;
u_int8_t *from;
dbp = dbc->dbp;
- if (DB_LOGGING(dbc)) {
- ldbt.data = P_ENTRY(pagep, indx);
+ if (DBC_LOGGING(dbc)) {
+ ldbt.data = P_ENTRY(dbp, pagep, indx);
ldbt.size = nbytes;
- if ((ret = __db_addrem_log(dbp->dbenv, dbc->txn,
- &LSN(pagep), 0, DB_REM_DUP, dbp->log_fileid, PGNO(pagep),
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_DUP, PGNO(pagep),
(u_int32_t)indx, nbytes, &ldbt, NULL, &LSN(pagep))) != 0)
return (ret);
- }
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
/*
* If there's only a single item on the page, we don't have to
@@ -63,24 +62,26 @@ __db_ditem(dbc, pagep, indx, nbytes)
return (0);
}
+ inp = P_INP(dbp, pagep);
/*
* Pack the remaining key/data items at the end of the page. Use
* memmove(3), the regions may overlap.
*/
from = (u_int8_t *)pagep + HOFFSET(pagep);
- memmove(from + nbytes, from, pagep->inp[indx] - HOFFSET(pagep));
+ DB_ASSERT((int)inp[indx] - HOFFSET(pagep) >= 0);
+ memmove(from + nbytes, from, inp[indx] - HOFFSET(pagep));
HOFFSET(pagep) += nbytes;
/* Adjust the indices' offsets. */
- offset = pagep->inp[indx];
+ offset = inp[indx];
for (cnt = 0; cnt < NUM_ENT(pagep); ++cnt)
- if (pagep->inp[cnt] < offset)
- pagep->inp[cnt] += nbytes;
+ if (inp[cnt] < offset)
+ inp[cnt] += nbytes;
/* Shift the indices down. */
--NUM_ENT(pagep);
if (indx != NUM_ENT(pagep))
- memmove(&pagep->inp[indx], &pagep->inp[indx + 1],
+ memmove(&inp[indx], &inp[indx + 1],
sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
return (0);
@@ -104,11 +105,13 @@ __db_pitem(dbc, pagep, indx, nbytes, hdr, data)
DB *dbp;
BKEYDATA bk;
DBT thdr;
+ db_indx_t *inp;
int ret;
u_int8_t *p;
- if (nbytes > P_FREESPACE(pagep)) {
- DB_ASSERT(nbytes <= P_FREESPACE(pagep));
+ dbp = dbc->dbp;
+ if (nbytes > P_FREESPACE(dbp, pagep)) {
+ DB_ASSERT(nbytes <= P_FREESPACE(dbp, pagep));
return (EINVAL);
}
/*
@@ -128,12 +131,13 @@ __db_pitem(dbc, pagep, indx, nbytes, hdr, data)
* the passed in header sizes must be adjusted for the structure's
* placeholder for the trailing variable-length data field.
*/
- dbp = dbc->dbp;
- if (DB_LOGGING(dbc))
- if ((ret = __db_addrem_log(dbp->dbenv, dbc->txn,
- &LSN(pagep), 0, DB_ADD_DUP, dbp->log_fileid, PGNO(pagep),
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_addrem_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_ADD_DUP, PGNO(pagep),
(u_int32_t)indx, nbytes, hdr, data, &LSN(pagep))) != 0)
return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
if (hdr == NULL) {
B_TSET(bk.type, B_KEYDATA, 0);
@@ -143,16 +147,17 @@ __db_pitem(dbc, pagep, indx, nbytes, hdr, data)
thdr.size = SSZA(BKEYDATA, data);
hdr = &thdr;
}
+ inp = P_INP(dbp, pagep);
/* Adjust the index table, then put the item on the page. */
if (indx != NUM_ENT(pagep))
- memmove(&pagep->inp[indx + 1], &pagep->inp[indx],
+ memmove(&inp[indx + 1], &inp[indx],
sizeof(db_indx_t) * (NUM_ENT(pagep) - indx));
HOFFSET(pagep) -= nbytes;
- pagep->inp[indx] = HOFFSET(pagep);
+ inp[indx] = HOFFSET(pagep);
++NUM_ENT(pagep);
- p = P_ENTRY(pagep, indx);
+ p = P_ENTRY(dbp, pagep, indx);
memcpy(p, hdr->data, hdr->size);
if (data != NULL)
memcpy(p + hdr->size, data->data, data->size);
@@ -177,13 +182,16 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
PAGE *np, *pp;
DB_LOCK npl, ppl;
DB_LSN *nlsnp, *plsnp, ret_lsn;
+ DB_MPOOLFILE *mpf;
int ret;
- ret = 0;
+ dbp = dbc->dbp;
np = pp = NULL;
- npl.off = ppl.off = LOCK_INVALID;
+ LOCK_INIT(npl);
+ LOCK_INIT(ppl);
nlsnp = plsnp = NULL;
- dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ ret = 0;
/*
* Retrieve and lock the one/two pages. For a remove, we may need
@@ -194,9 +202,8 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
if (needlock && (ret = __db_lget(dbc,
0, pagep->next_pgno, DB_LOCK_WRITE, 0, &npl)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf,
- &pagep->next_pgno, 0, &np)) != 0) {
- (void)__db_pgerr(dbp, pagep->next_pgno);
+ if ((ret = mpf->get(mpf, &pagep->next_pgno, 0, &np)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
goto err;
}
nlsnp = &np->lsn;
@@ -205,28 +212,27 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
if (needlock && (ret = __db_lget(dbc,
0, pagep->prev_pgno, DB_LOCK_WRITE, 0, &ppl)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf,
- &pagep->prev_pgno, 0, &pp)) != 0) {
- (void)__db_pgerr(dbp, pagep->next_pgno);
+ if ((ret = mpf->get(mpf, &pagep->prev_pgno, 0, &pp)) != 0) {
+ __db_pgerr(dbp, pagep->next_pgno, ret);
goto err;
}
plsnp = &pp->lsn;
}
/* Log the change. */
- if (DB_LOGGING(dbc)) {
- if ((ret = __db_relink_log(dbp->dbenv, dbc->txn,
- &ret_lsn, 0, add_rem, dbp->log_fileid,
- pagep->pgno, &pagep->lsn,
- pagep->prev_pgno, plsnp, pagep->next_pgno, nlsnp)) != 0)
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_relink_log(dbp, dbc->txn, &ret_lsn, 0, add_rem,
+ pagep->pgno, &pagep->lsn, pagep->prev_pgno, plsnp,
+ pagep->next_pgno, nlsnp)) != 0)
goto err;
- if (np != NULL)
- np->lsn = ret_lsn;
- if (pp != NULL)
- pp->lsn = ret_lsn;
- if (add_rem == DB_REM_PAGE)
- pagep->lsn = ret_lsn;
- }
+ } else
+ LSN_NOT_LOGGED(ret_lsn);
+ if (np != NULL)
+ np->lsn = ret_lsn;
+ if (pp != NULL)
+ pp->lsn = ret_lsn;
+ if (add_rem == DB_REM_PAGE)
+ pagep->lsn = ret_lsn;
/*
* Modify and release the two pages.
@@ -242,10 +248,10 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
else
np->prev_pgno = pagep->prev_pgno;
if (new_next == NULL)
- ret = memp_fput(dbp->mpf, np, DB_MPOOL_DIRTY);
+ ret = mpf->put(mpf, np, DB_MPOOL_DIRTY);
else {
*new_next = np;
- ret = memp_fset(dbp->mpf, np, DB_MPOOL_DIRTY);
+ ret = mpf->set(mpf, np, DB_MPOOL_DIRTY);
}
if (ret != 0)
goto err;
@@ -256,7 +262,7 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
if (pp != NULL) {
pp->next_pgno = pagep->next_pgno;
- if ((ret = memp_fput(dbp->mpf, pp, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, pp, DB_MPOOL_DIRTY)) != 0)
goto err;
if (needlock)
(void)__TLPUT(dbc, ppl);
@@ -264,12 +270,12 @@ __db_relink(dbc, add_rem, pagep, new_next, needlock)
return (0);
err: if (np != NULL)
- (void)memp_fput(dbp->mpf, np, 0);
- if (needlock && npl.off != LOCK_INVALID)
+ (void)mpf->put(mpf, np, 0);
+ if (needlock)
(void)__TLPUT(dbc, npl);
if (pp != NULL)
- (void)memp_fput(dbp->mpf, pp, 0);
- if (needlock && ppl.off != LOCK_INVALID)
+ (void)mpf->put(mpf, pp, 0);
+ if (needlock)
(void)__TLPUT(dbc, ppl);
return (ret);
}
diff --git a/bdb/db/db_iface.c b/bdb/db/db_iface.c
index 3548a2527bb..b518c3b14b2 100644
--- a/bdb/db/db_iface.c
+++ b/bdb/db/db_iface.c
@@ -1,55 +1,69 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_iface.c,v 11.34 2001/01/11 18:19:51 bostic Exp $";
+static const char revid[] = "$Id: db_iface.c,v 11.77 2002/08/08 03:57:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-
-#include <errno.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
static int __db_curinval __P((const DB_ENV *));
+static int __db_fnl __P((const DB_ENV *, const char *));
static int __db_rdonly __P((const DB_ENV *, const char *));
static int __dbt_ferr __P((const DB *, const char *, const DBT *, int));
/*
+ * A database should be required to be readonly if it's been explicitly
+ * specified as such or if we're a client in a replicated environment and
+ * we don't have the special "client-writer" designation.
+ */
+#define IS_READONLY(dbp) \
+ (F_ISSET(dbp, DB_AM_RDONLY) || \
+ (F_ISSET((dbp)->dbenv, DB_ENV_REP_CLIENT) && \
+ !F_ISSET((dbp), DB_AM_CL_WRITER)))
+
+/*
* __db_cursorchk --
* Common cursor argument checking routine.
*
- * PUBLIC: int __db_cursorchk __P((const DB *, u_int32_t, int));
+ * PUBLIC: int __db_cursorchk __P((const DB *, u_int32_t));
*/
int
-__db_cursorchk(dbp, flags, isrdonly)
+__db_cursorchk(dbp, flags)
const DB *dbp;
u_int32_t flags;
- int isrdonly;
{
+ /* DB_DIRTY_READ is the only valid bit-flag and requires locking. */
+ if (LF_ISSET(DB_DIRTY_READ)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->cursor"));
+ LF_CLR(DB_DIRTY_READ);
+ }
+
/* Check for invalid function flags. */
switch (flags) {
case 0:
break;
case DB_WRITECURSOR:
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "DB->cursor"));
if (!CDB_LOCKING(dbp->dbenv))
return (__db_ferr(dbp->dbenv, "DB->cursor", 0));
break;
case DB_WRITELOCK:
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "DB->cursor"));
break;
default:
@@ -90,22 +104,25 @@ __db_ccountchk(dbp, flags, isvalid)
* __db_cdelchk --
* Common cursor delete argument checking routine.
*
- * PUBLIC: int __db_cdelchk __P((const DB *, u_int32_t, int, int));
+ * PUBLIC: int __db_cdelchk __P((const DB *, u_int32_t, int));
*/
int
-__db_cdelchk(dbp, flags, isrdonly, isvalid)
+__db_cdelchk(dbp, flags, isvalid)
const DB *dbp;
u_int32_t flags;
- int isrdonly, isvalid;
+ int isvalid;
{
/* Check for changes to a read-only tree. */
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "c_del"));
/* Check for invalid function flags. */
switch (flags) {
case 0:
break;
+ case DB_UPDATE_SECONDARY:
+ DB_ASSERT(F_ISSET(dbp, DB_AM_SECONDARY));
+ break;
default:
return (__db_ferr(dbp->dbenv, "DBcursor->c_del", 0));
}
@@ -130,7 +147,7 @@ __db_cgetchk(dbp, key, data, flags, isvalid)
u_int32_t flags;
int isvalid;
{
- int ret;
+ int dirty, multi, ret;
/*
* Check for read-modify-write validity. DB_RMW doesn't make sense
@@ -140,44 +157,68 @@ __db_cgetchk(dbp, key, data, flags, isvalid)
* If this changes, confirm that DB does not itself set the DB_RMW
* flag in a path where CDB may have been configured.
*/
- if (LF_ISSET(DB_RMW)) {
- if (!LOCKING_ON(dbp->dbenv)) {
- __db_err(dbp->dbenv,
- "the DB_RMW flag requires locking");
- return (EINVAL);
- }
- LF_CLR(DB_RMW);
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ multi = 1;
+ if (LF_ISSET(DB_MULTIPLE) && LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ LF_CLR(DB_MULTIPLE | DB_MULTIPLE_KEY);
}
/* Check for invalid function flags. */
switch (flags) {
case DB_CONSUME:
case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
if (dbp->type != DB_QUEUE)
goto err;
break;
case DB_CURRENT:
case DB_FIRST:
case DB_GET_BOTH:
- case DB_LAST:
+ case DB_GET_BOTH_RANGE:
case DB_NEXT:
case DB_NEXT_DUP:
case DB_NEXT_NODUP:
- case DB_PREV:
- case DB_PREV_NODUP:
case DB_SET:
case DB_SET_RANGE:
break;
+ case DB_LAST:
+ case DB_PREV:
+ case DB_PREV_NODUP:
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 1));
+ break;
case DB_GET_BOTHC:
if (dbp->type == DB_QUEUE)
goto err;
break;
case DB_GET_RECNO:
- if (!F_ISSET(dbp, DB_BT_RECNUM))
+ /*
+ * The one situation in which this might be legal with a
+ * non-RECNUM dbp is if dbp is a secondary and its primary is
+ * DB_AM_RECNUM.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECNUM) &&
+ (!F_ISSET(dbp, DB_AM_SECONDARY) ||
+ !F_ISSET(dbp->s_primary, DB_AM_RECNUM)))
goto err;
break;
case DB_SET_RECNO:
- if (!F_ISSET(dbp, DB_BT_RECNUM))
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
goto err;
break;
default:
@@ -190,11 +231,24 @@ err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
return (ret);
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE(_KEY) requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
/*
- * The cursor must be initialized for DB_CURRENT or DB_NEXT_DUP,
- * return EINVAL for an invalid cursor, otherwise 0.
+ * The cursor must be initialized for DB_CURRENT, DB_GET_RECNO and
+ * DB_NEXT_DUP. Return EINVAL for an invalid cursor, otherwise 0.
*/
- if (isvalid || (flags != DB_CURRENT && flags != DB_NEXT_DUP))
+ if (isvalid || (flags != DB_CURRENT &&
+ flags != DB_GET_RECNO && flags != DB_NEXT_DUP))
return (0);
return (__db_curinval(dbp->dbenv));
@@ -205,24 +259,35 @@ err: return (__db_ferr(dbp->dbenv, "DBcursor->c_get", 0));
* Common cursor put argument checking routine.
*
* PUBLIC: int __db_cputchk __P((const DB *,
- * PUBLIC: const DBT *, DBT *, u_int32_t, int, int));
+ * PUBLIC: const DBT *, DBT *, u_int32_t, int));
*/
int
-__db_cputchk(dbp, key, data, flags, isrdonly, isvalid)
+__db_cputchk(dbp, key, data, flags, isvalid)
const DB *dbp;
const DBT *key;
DBT *data;
u_int32_t flags;
- int isrdonly, isvalid;
+ int isvalid;
{
int key_flags, ret;
key_flags = 0;
/* Check for changes to a read-only tree. */
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "c_put"));
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ if (flags == DB_UPDATE_SECONDARY)
+ flags = DB_KEYLAST;
+ else {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_put forbidden on secondary indices");
+ return (EINVAL);
+ }
+ }
+
/* Check for invalid function flags. */
switch (flags) {
case DB_AFTER:
@@ -238,7 +303,7 @@ __db_cputchk(dbp, key, data, flags, isrdonly, isvalid)
case DB_QUEUE: /* Not permitted. */
goto err;
case DB_RECNO: /* Only with mutable record numbers. */
- if (!F_ISSET(dbp, DB_RE_RENUMBER))
+ if (!F_ISSET(dbp, DB_AM_RENUMBER))
goto err;
key_flags = 1;
break;
@@ -259,8 +324,6 @@ __db_cputchk(dbp, key, data, flags, isrdonly, isvalid)
/* FALLTHROUGH */
case DB_KEYFIRST:
case DB_KEYLAST:
- if (dbp->type == DB_QUEUE || dbp->type == DB_RECNO)
- goto err;
key_flags = 1;
break;
default:
@@ -285,48 +348,153 @@ err: return (__db_ferr(dbp->dbenv, "DBcursor->c_put", 0));
}
/*
- * __db_closechk --
- * DB->close flag check.
+ * __db_pgetchk --
+ * DB->pget flag check.
*
- * PUBLIC: int __db_closechk __P((const DB *, u_int32_t));
+ * PUBLIC: int __db_pgetchk __P((const DB *, const DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t));
*/
int
-__db_closechk(dbp, flags)
+__db_pgetchk(dbp, skey, pkey, data, flags)
const DB *dbp;
+ const DBT *skey;
+ DBT *pkey, *data;
u_int32_t flags;
{
- /* Check for invalid function flags. */
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DB->pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ /* DB_CONSUME makes no sense on a secondary index. */
+ LF_CLR(DB_RMW);
switch (flags) {
- case 0:
- case DB_NOSYNC:
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ return (__db_ferr(dbp->dbenv, "DB->pget", 0));
+ default:
+ /* __db_getchk will catch the rest. */
+ break;
+ }
+
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 1)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_getchk(dbp, skey, data, save_flags));
+}
+
+/*
+ * __db_cpgetchk --
+ * Secondary-index cursor get argument checking routine.
+ *
+ * PUBLIC: int __db_cpgetchk __P((const DB *,
+ * PUBLIC: DBT *, DBT *, DBT *, u_int32_t, int));
+ */
+int
+__db_cpgetchk(dbp, skey, pkey, data, flags, isvalid)
+ const DB *dbp;
+ DBT *skey, *pkey, *data;
+ u_int32_t flags;
+ int isvalid;
+{
+ int ret;
+ u_int32_t save_flags;
+
+ save_flags = flags;
+
+ if (!F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv,
+ "DBcursor->c_pget may only be used on secondary indices");
+ return (EINVAL);
+ }
+
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE and DB_MULTIPLE_KEY may not be used on secondary indices");
+ return (EINVAL);
+ }
+
+ LF_CLR(DB_RMW);
+ switch (flags) {
+ case DB_CONSUME:
+ case DB_CONSUME_WAIT:
+ /* DB_CONSUME makes no sense on a secondary index. */
+ return (__db_ferr(dbp->dbenv, "DBcursor->c_pget", 0));
+ case DB_GET_BOTH:
+ /* DB_GET_BOTH is "get both the primary and the secondary". */
+ if (pkey == NULL) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH requires both a secondary and a primary key");
+ return (EINVAL);
+ }
break;
default:
- return (__db_ferr(dbp->dbenv, "DB->close", 0));
+ /* __db_cgetchk will catch the rest. */
+ break;
}
- return (0);
+ /*
+ * We allow the pkey field to be NULL, so that we can make the
+ * two-DBT get calls into wrappers for the three-DBT ones.
+ */
+ if (pkey != NULL &&
+ (ret = __dbt_ferr(dbp, "primary key", pkey, 0)) != 0)
+ return (ret);
+
+ /* But the pkey field can't be NULL if we're doing a DB_GET_BOTH. */
+ if (pkey == NULL && flags == DB_GET_BOTH) {
+ __db_err(dbp->dbenv,
+ "DB_GET_BOTH on a secondary index requires a primary key");
+ return (EINVAL);
+ }
+
+ return (__db_cgetchk(dbp, skey, data, save_flags, isvalid));
}
/*
* __db_delchk --
* Common delete argument checking routine.
*
- * PUBLIC: int __db_delchk __P((const DB *, DBT *, u_int32_t, int));
+ * PUBLIC: int __db_delchk __P((const DB *, DBT *, u_int32_t));
*/
int
-__db_delchk(dbp, key, flags, isrdonly)
+__db_delchk(dbp, key, flags)
const DB *dbp;
DBT *key;
u_int32_t flags;
- int isrdonly;
{
COMPQUIET(key, NULL);
/* Check for changes to a read-only tree. */
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "delete"));
/* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
switch (flags) {
case 0:
break;
@@ -350,7 +518,7 @@ __db_getchk(dbp, key, data, flags)
DBT *data;
u_int32_t flags;
{
- int ret;
+ int dirty, multi, ret;
/*
* Check for read-modify-write validity. DB_RMW doesn't make sense
@@ -360,13 +528,21 @@ __db_getchk(dbp, key, data, flags)
* If this changes, confirm that DB does not itself set the DB_RMW
* flag in a path where CDB may have been configured.
*/
- if (LF_ISSET(DB_RMW)) {
- if (!LOCKING_ON(dbp->dbenv)) {
- __db_err(dbp->dbenv,
- "the DB_RMW flag requires locking");
- return (EINVAL);
- }
- LF_CLR(DB_RMW);
+ dirty = 0;
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DB->get"));
+ if (LF_ISSET(DB_DIRTY_READ))
+ dirty = 1;
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
+ }
+
+ multi = 0;
+ if (LF_ISSET(DB_MULTIPLE | DB_MULTIPLE_KEY)) {
+ if (LF_ISSET(DB_MULTIPLE_KEY))
+ goto multi_err;
+ multi = LF_ISSET(DB_MULTIPLE) ? 1 : 0;
+ LF_CLR(DB_MULTIPLE);
}
/* Check for invalid function flags. */
@@ -375,24 +551,48 @@ __db_getchk(dbp, key, data, flags)
case DB_GET_BOTH:
break;
case DB_SET_RECNO:
- if (!F_ISSET(dbp, DB_BT_RECNUM))
+ if (!F_ISSET(dbp, DB_AM_RECNUM))
goto err;
break;
case DB_CONSUME:
case DB_CONSUME_WAIT:
+ if (dirty) {
+ __db_err(dbp->dbenv,
+ "DB_DIRTY_READ is not supported with DB_CONSUME or DB_CONSUME_WAIT");
+ return (EINVAL);
+ }
+ if (multi)
+multi_err: return (__db_ferr(dbp->dbenv, "DB->get", 1));
if (dbp->type == DB_QUEUE)
break;
- /* Fall through */
+ /* FALLTHROUGH */
default:
err: return (__db_ferr(dbp->dbenv, "DB->get", 0));
}
- /* Check for invalid key/data flags. */
+ /*
+ * Check for invalid key/data flags.
+ *
+ * XXX: Dave Krinsky
+ * Remember to modify this when we fix the flag-returning problem.
+ */
if ((ret = __dbt_ferr(dbp, "key", key, flags == DB_SET_RECNO)) != 0)
return (ret);
if ((ret = __dbt_ferr(dbp, "data", data, 1)) != 0)
return (ret);
+ if (multi && !F_ISSET(data, DB_DBT_USERMEM)) {
+ __db_err(dbp->dbenv,
+ "DB_MULTIPLE requires that DB_DBT_USERMEM be set");
+ return (EINVAL);
+ }
+ if (multi &&
+ (F_ISSET(key, DB_DBT_PARTIAL) || F_ISSET(data, DB_DBT_PARTIAL))) {
+ __db_err(dbp->dbenv,
+ "DB_DBT_PARTIAL forbidden with DB_MULTIPLE(_KEY)");
+ return (EINVAL);
+ }
+
return (0);
}
@@ -449,13 +649,11 @@ __db_joingetchk(dbp, key, flags)
u_int32_t flags;
{
- if (LF_ISSET(DB_RMW)) {
- if (!LOCKING_ON(dbp->dbenv)) {
- __db_err(dbp->dbenv,
- "the DB_RMW flag requires locking");
- return (EINVAL);
- }
- LF_CLR(DB_RMW);
+ if (LF_ISSET(DB_DIRTY_READ | DB_RMW)) {
+ if (!LOCKING_ON(dbp->dbenv))
+ return (__db_fnl(dbp->dbenv, "DBcursor->c_get"));
+
+ LF_CLR(DB_DIRTY_READ | DB_RMW);
}
switch (flags) {
@@ -491,23 +689,32 @@ __db_joingetchk(dbp, key, flags)
* Common put argument checking routine.
*
* PUBLIC: int __db_putchk
- * PUBLIC: __P((const DB *, DBT *, const DBT *, u_int32_t, int, int));
+ * PUBLIC: __P((const DB *, DBT *, const DBT *, u_int32_t, int));
*/
int
-__db_putchk(dbp, key, data, flags, isrdonly, isdup)
+__db_putchk(dbp, key, data, flags, isdup)
const DB *dbp;
DBT *key;
const DBT *data;
u_int32_t flags;
- int isrdonly, isdup;
+ int isdup;
{
- int ret;
+ int ret, returnkey;
+
+ returnkey = 0;
/* Check for changes to a read-only tree. */
- if (isrdonly)
+ if (IS_READONLY(dbp))
return (__db_rdonly(dbp->dbenv, "put"));
+ /* Check for puts on a secondary. */
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbp->dbenv, "DB->put forbidden on secondary indices");
+ return (EINVAL);
+ }
+
/* Check for invalid function flags. */
+ LF_CLR(DB_AUTO_COMMIT);
switch (flags) {
case 0:
case DB_NOOVERWRITE:
@@ -515,6 +722,7 @@ __db_putchk(dbp, key, data, flags, isrdonly, isdup)
case DB_APPEND:
if (dbp->type != DB_RECNO && dbp->type != DB_QUEUE)
goto err;
+ returnkey = 1;
break;
case DB_NODUPDATA:
if (F_ISSET(dbp, DB_AM_DUPSORT))
@@ -525,7 +733,7 @@ err: return (__db_ferr(dbp->dbenv, "DB->put", 0));
}
/* Check for invalid key/data flags. */
- if ((ret = __dbt_ferr(dbp, "key", key, 0)) != 0)
+ if ((ret = __dbt_ferr(dbp, "key", key, returnkey)) != 0)
return (ret);
if ((ret = __dbt_ferr(dbp, "data", data, 0)) != 0)
return (ret);
@@ -541,28 +749,6 @@ err: return (__db_ferr(dbp->dbenv, "DB->put", 0));
}
/*
- * __db_removechk --
- * DB->remove flag check.
- *
- * PUBLIC: int __db_removechk __P((const DB *, u_int32_t));
- */
-int
-__db_removechk(dbp, flags)
- const DB *dbp;
- u_int32_t flags;
-{
- /* Check for invalid function flags. */
- switch (flags) {
- case 0:
- break;
- default:
- return (__db_ferr(dbp->dbenv, "DB->remove", 0));
- }
-
- return (0);
-}
-
-/*
* __db_statchk --
* Common stat argument checking routine.
*
@@ -576,12 +762,13 @@ __db_statchk(dbp, flags)
/* Check for invalid function flags. */
switch (flags) {
case 0:
- case DB_CACHED_COUNTS:
+ case DB_FAST_STAT:
+ case DB_CACHED_COUNTS: /* Deprecated and undocumented. */
break;
- case DB_RECORDCOUNT:
+ case DB_RECORDCOUNT: /* Deprecated and undocumented. */
if (dbp->type == DB_RECNO)
break;
- if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_BT_RECNUM))
+ if (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))
break;
goto err;
default:
@@ -636,9 +823,9 @@ __dbt_ferr(dbp, name, dbt, check_thread)
* database and then specify that same DBT as a key to a primary
* database, without having to clear flags.
*/
- if ((ret = __db_fchk(dbenv, name, dbt->flags,
- DB_DBT_MALLOC | DB_DBT_DUPOK |
- DB_DBT_REALLOC | DB_DBT_USERMEM | DB_DBT_PARTIAL)) != 0)
+ if ((ret = __db_fchk(dbenv, name, dbt->flags, DB_DBT_APPMALLOC |
+ DB_DBT_MALLOC | DB_DBT_DUPOK | DB_DBT_REALLOC | DB_DBT_USERMEM |
+ DB_DBT_PARTIAL)) != 0)
return (ret);
switch (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM)) {
case 0:
@@ -674,6 +861,20 @@ __db_rdonly(dbenv, name)
}
/*
+ * __db_fnl --
+ * Common flag-needs-locking message.
+ */
+static int
+__db_fnl(dbenv, name)
+ const DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv,
+ "%s: the DB_DIRTY_READ and DB_RMW flags require locking", name);
+ return (EINVAL);
+}
+
+/*
* __db_curinval
* Report that a cursor is in an invalid state.
*/
@@ -685,3 +886,98 @@ __db_curinval(dbenv)
"Cursor position must be set before performing this operation");
return (EINVAL);
}
+
+/*
+ * __db_secondary_corrupt --
+ * Report that a secondary index appears corrupt, as it has a record
+ * that does not correspond to a record in the primary.
+ *
+ * PUBLIC: int __db_secondary_corrupt __P((DB *));
+ */
+int
+__db_secondary_corrupt(dbp)
+ DB *dbp;
+{
+
+ __db_err(dbp->dbenv,
+ "Secondary index corrupt: item in secondary not found in primary");
+ return (DB_SECONDARY_BAD);
+}
+
+/*
+ * __db_associatechk --
+ * Argument checking routine for DB->associate().
+ *
+ * PUBLIC: int __db_associatechk __P((DB *, DB *,
+ * PUBLIC: int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+ */
+int
+__db_associatechk(dbp, sdbp, callback, flags)
+ DB *dbp, *sdbp;
+ int (*callback) __P((DB *, const DBT *, const DBT *, DBT *));
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (F_ISSET(sdbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary index handles may not be re-associated");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_SECONDARY)) {
+ __db_err(dbenv,
+ "Secondary indices may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "Primary databases may not be configured with duplicates");
+ return (EINVAL);
+ }
+ if (F_ISSET(dbp, DB_AM_RENUMBER)) {
+ __db_err(dbenv,
+ "Renumbering recno databases may not be used as primary databases");
+ return (EINVAL);
+ }
+ if (callback == NULL &&
+ (!F_ISSET(dbp, DB_AM_RDONLY) || !F_ISSET(sdbp, DB_AM_RDONLY))) {
+ __db_err(dbenv,
+ "Callback function may be NULL only when database handles are read-only");
+ return (EINVAL);
+ }
+
+ return (__db_fchk(dbenv,
+ "DB->associate", flags, DB_CREATE | DB_AUTO_COMMIT));
+}
+
+/*
+ * __db_txn_auto --
+ * Handle DB_AUTO_COMMIT initialization.
+ *
+ * PUBLIC: int __db_txn_auto __P((DB *, DB_TXN **));
+ */
+int
+__db_txn_auto(dbp, txnidp)
+ DB *dbp;
+ DB_TXN **txnidp;
+{
+ DB_ENV *dbenv;
+
+ dbenv = dbp->dbenv;
+
+ if (*txnidp != NULL) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified along with a transaction handle");
+ return (EINVAL);
+ }
+
+ if (!TXN_ON(dbenv)) {
+ __db_err(dbenv,
+ "DB_AUTO_COMMIT may not be specified in non-transactional environment");
+ return (EINVAL);
+ }
+
+ return (dbenv->txn_begin(dbenv, NULL, txnidp, 0));
+}
diff --git a/bdb/db/db_join.c b/bdb/db/db_join.c
index 881dedde0fc..6281b1a8383 100644
--- a/bdb/db/db_join.c
+++ b/bdb/db/db_join.c
@@ -1,14 +1,14 @@
-/*-
+/*
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_join.c,v 11.31 2000/12/20 22:41:54 krinsky Exp $";
+static const char revid[] = "$Id: db_join.c,v 11.55 2002/08/08 03:57:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,16 +19,17 @@ static const char revid[] = "$Id: db_join.c,v 11.31 2000/12/20 22:41:54 krinsky
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_join.h"
-#include "db_am.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_join.h"
+#include "dbinc/btree.h"
static int __db_join_close __P((DBC *));
static int __db_join_cmp __P((const void *, const void *));
static int __db_join_del __P((DBC *, u_int32_t));
static int __db_join_get __P((DBC *, DBT *, DBT *, u_int32_t));
-static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t));
+static int __db_join_getnext __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+static int __db_join_primget __P((DB *,
+ DB_TXN *, u_int32_t, DBT *, DBT *, u_int32_t));
static int __db_join_put __P((DBC *, DBT *, DBT *, u_int32_t));
/*
@@ -84,7 +85,8 @@ __db_join(primary, curslist, dbcp, flags)
DBC *dbc;
JOIN_CURSOR *jc;
int ret;
- u_int32_t i, ncurs, nslots;
+ u_int32_t i;
+ size_t ncurs, nslots;
COMPQUIET(nslots, 0);
@@ -104,11 +106,13 @@ __db_join(primary, curslist, dbcp, flags)
1, sizeof(JOIN_CURSOR), &jc)) != 0)
goto err;
- if ((ret = __os_malloc(dbenv, 256, NULL, &jc->j_key.data)) != 0)
+ if ((ret = __os_malloc(dbenv, 256, &jc->j_key.data)) != 0)
goto err;
jc->j_key.ulen = 256;
F_SET(&jc->j_key, DB_DBT_USERMEM);
+ F_SET(&jc->j_rdata, DB_DBT_REALLOC);
+
for (jc->j_curslist = curslist;
*jc->j_curslist != NULL; jc->j_curslist++)
;
@@ -184,7 +188,7 @@ __db_join(primary, curslist, dbcp, flags)
jc->j_fdupcurs[i] = NULL;
jc->j_exhausted[i] = 0;
}
- jc->j_ncurs = ncurs;
+ jc->j_ncurs = (u_int32_t)ncurs;
/*
* If DB_JOIN_NOSORT is not set, optimize secondary cursors by
@@ -226,20 +230,20 @@ __db_join(primary, curslist, dbcp, flags)
err: if (jc != NULL) {
if (jc->j_curslist != NULL)
- __os_free(jc->j_curslist, nslots * sizeof(DBC *));
+ __os_free(dbenv, jc->j_curslist);
if (jc->j_workcurs != NULL) {
if (jc->j_workcurs[0] != NULL)
- __os_free(jc->j_workcurs[0], sizeof(DBC));
- __os_free(jc->j_workcurs, nslots * sizeof(DBC *));
+ __os_free(dbenv, jc->j_workcurs[0]);
+ __os_free(dbenv, jc->j_workcurs);
}
if (jc->j_fdupcurs != NULL)
- __os_free(jc->j_fdupcurs, nslots * sizeof(DBC *));
+ __os_free(dbenv, jc->j_fdupcurs);
if (jc->j_exhausted != NULL)
- __os_free(jc->j_exhausted, nslots * sizeof(u_int8_t));
- __os_free(jc, sizeof(JOIN_CURSOR));
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc);
}
if (dbc != NULL)
- __os_free(dbc, sizeof(DBC));
+ __os_free(dbenv, dbc);
return (ret);
}
@@ -279,8 +283,8 @@ __db_join_get(dbc, key_arg, data_arg, flags)
DB *dbp;
DBC *cp;
JOIN_CURSOR *jc;
- int ret;
- u_int32_t i, j, operation;
+ int db_manage_data, ret;
+ u_int32_t i, j, operation, opmods;
dbp = dbc->dbp;
jc = (JOIN_CURSOR *)dbc->internal;
@@ -289,6 +293,12 @@ __db_join_get(dbc, key_arg, data_arg, flags)
operation = LF_ISSET(DB_OPFLAGS_MASK);
+ /* !!!
+ * If the set of flags here changes, check that __db_join_primget
+ * is updated to handle them properly.
+ */
+ opmods = LF_ISSET(DB_RMW | DB_DIRTY_READ);
+
if ((ret = __db_joingetchk(dbp, key_arg, flags)) != 0)
return (ret);
@@ -319,13 +329,14 @@ __db_join_get(dbc, key_arg, data_arg, flags)
goto samekey;
F_CLR(jc, JOIN_RETRY);
-retry: ret = jc->j_workcurs[0]->c_get(jc->j_workcurs[0],
- &jc->j_key, key_n, jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT);
+retry: ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n,
+ opmods | (jc->j_exhausted[0] ? DB_NEXT_DUP : DB_CURRENT));
if (ret == ENOMEM) {
jc->j_key.ulen <<= 1;
if ((ret = __os_realloc(dbp->dbenv,
- jc->j_key.ulen, NULL, &jc->j_key.data)) != 0)
+ jc->j_key.ulen, &jc->j_key.data)) != 0)
goto mem_err;
goto retry;
}
@@ -379,7 +390,7 @@ retry: ret = jc->j_workcurs[0]->c_get(jc->j_workcurs[0],
retry2: cp = jc->j_workcurs[i];
if ((ret = __db_join_getnext(cp, &jc->j_key, key_n,
- jc->j_exhausted[i])) == DB_NOTFOUND) {
+ jc->j_exhausted[i], opmods)) == DB_NOTFOUND) {
/*
* jc->j_workcurs[i] has no more of the datum we're
* interested in. Go back one cursor and get
@@ -475,7 +486,7 @@ retry2: cp = jc->j_workcurs[i];
if (ret == ENOMEM) {
jc->j_key.ulen <<= 1;
if ((ret = __os_realloc(dbp->dbenv, jc->j_key.ulen,
- NULL, &jc->j_key.data)) != 0) {
+ &jc->j_key.data)) != 0) {
mem_err: __db_err(dbp->dbenv,
"Allocation failed for join key, len = %lu",
(u_long)jc->j_key.ulen);
@@ -523,8 +534,8 @@ samekey: /*
* Get the key we tried and failed to return last time;
* it should be the current datum of all the secondary cursors.
*/
- if ((ret = jc->j_workcurs[0]->c_get(jc->j_workcurs[0],
- &jc->j_key, key_n, DB_CURRENT)) != 0)
+ if ((ret = jc->j_workcurs[0]->c_real_get(jc->j_workcurs[0],
+ &jc->j_key, key_n, DB_CURRENT | opmods)) != 0)
return (ret);
F_CLR(jc, JOIN_RETRY);
}
@@ -532,36 +543,28 @@ samekey: /*
/*
* ret == 0; we have a key to return.
*
- * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to
- * copy it back into the dbt we were given for the key;
- * call __db_retcopy.
- *
- * Otherwise, assert that we do not in fact need to copy anything
- * and simply proceed.
+ * If DB_DBT_USERMEM or DB_DBT_MALLOC is set, we need to copy the key
+ * back into the dbt we were given for the key; call __db_retcopy.
+ * Otherwise, assert that we do not need to copy anything and proceed.
*/
- if (F_ISSET(key_arg, DB_DBT_USERMEM) ||
- F_ISSET(key_arg, DB_DBT_MALLOC)) {
+ DB_ASSERT(F_ISSET(
+ key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) || key_n == key_arg);
+
+ if (F_ISSET(key_arg, DB_DBT_USERMEM | DB_DBT_MALLOC) &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) {
/*
- * We need to copy the key back into our original
- * datum. Do so.
+ * The retcopy failed, most commonly because we have a user
+ * buffer for the key which is too small. Set things up to
+ * retry next time, and return.
*/
- if ((ret = __db_retcopy(dbp,
- key_arg, key_n->data, key_n->size, NULL, NULL)) != 0) {
- /*
- * The retcopy failed, most commonly because we
- * have a user buffer for the key which is too small.
- * Set things up to retry next time, and return.
- */
- F_SET(jc, JOIN_RETRY);
- return (ret);
- }
- } else
- DB_ASSERT(key_n == key_arg);
+ F_SET(jc, JOIN_RETRY);
+ return (ret);
+ }
/*
- * If DB_JOIN_ITEM is
- * set, we return it; otherwise we do the lookup in the
- * primary and then return.
+ * If DB_JOIN_ITEM is set, we return it; otherwise we do the lookup
+ * in the primary and then return.
*
* Note that we use key_arg here; it is safe (and appropriate)
* to do so.
@@ -569,14 +572,45 @@ samekey: /*
if (operation == DB_JOIN_ITEM)
return (0);
- if ((ret = jc->j_primary->get(jc->j_primary,
- jc->j_curslist[0]->txn, key_arg, data_arg, 0)) != 0)
- /*
- * The get on the primary failed, most commonly because we're
- * using a user buffer that's not big enough. Flag our
- * failure so we can return the same key next time.
- */
- F_SET(jc, JOIN_RETRY);
+ /*
+ * If data_arg->flags == 0--that is, if DB is managing the
+ * data DBT's memory--it's not safe to just pass the DBT
+ * through to the primary get call, since we don't want that
+ * memory to belong to the primary DB handle (and if the primary
+ * is free-threaded, it can't anyway).
+ *
+ * Instead, use memory that is managed by the join cursor, in
+ * jc->j_rdata.
+ */
+ if (!F_ISSET(data_arg, DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM))
+ db_manage_data = 1;
+ else
+ db_manage_data = 0;
+ if ((ret = __db_join_primget(jc->j_primary,
+ jc->j_curslist[0]->txn, jc->j_curslist[0]->locker, key_arg,
+ db_manage_data ? &jc->j_rdata : data_arg, opmods)) != 0) {
+ if (ret == DB_NOTFOUND)
+ /*
+ * If ret == DB_NOTFOUND, the primary and secondary
+ * are out of sync; every item in each secondary
+ * should correspond to something in the primary,
+ * or we shouldn't have done the join this way.
+ * Wail.
+ */
+ ret = __db_secondary_corrupt(jc->j_primary);
+ else
+ /*
+ * The get on the primary failed for some other
+ * reason, most commonly because we're using a user
+ * buffer that's not big enough. Flag our failure
+ * so we can return the same key next time.
+ */
+ F_SET(jc, JOIN_RETRY);
+ }
+ if (db_manage_data && ret == 0) {
+ data_arg->data = jc->j_rdata.data;
+ data_arg->size = jc->j_rdata.size;
+ }
return (ret);
}
@@ -586,12 +620,14 @@ __db_join_close(dbc)
DBC *dbc;
{
DB *dbp;
+ DB_ENV *dbenv;
JOIN_CURSOR *jc;
int ret, t_ret;
u_int32_t i;
jc = (JOIN_CURSOR *)dbc->internal;
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
ret = t_ret = 0;
/*
@@ -599,11 +635,11 @@ __db_join_close(dbc)
* must happen before any action that can fail and return, or else
* __db_close may loop indefinitely.
*/
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
TAILQ_REMOVE(&dbp->join_queue, dbc, links);
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
- PANIC_CHECK(dbc->dbp->dbenv);
+ PANIC_CHECK(dbenv);
/*
* Close any open scratch cursors. In each case, there may
@@ -625,13 +661,15 @@ __db_join_close(dbc)
ret = t_ret;
}
- __os_free(jc->j_exhausted, 0);
- __os_free(jc->j_curslist, 0);
- __os_free(jc->j_workcurs, 0);
- __os_free(jc->j_fdupcurs, 0);
- __os_free(jc->j_key.data, jc->j_key.ulen);
- __os_free(jc, sizeof(JOIN_CURSOR));
- __os_free(dbc, sizeof(DBC));
+ __os_free(dbenv, jc->j_exhausted);
+ __os_free(dbenv, jc->j_curslist);
+ __os_free(dbenv, jc->j_workcurs);
+ __os_free(dbenv, jc->j_fdupcurs);
+ __os_free(dbenv, jc->j_key.data);
+ if (jc->j_rdata.data != NULL)
+ __os_ufree(dbenv, jc->j_rdata.data);
+ __os_free(dbenv, jc);
+ __os_free(dbenv, dbc);
return (ret);
}
@@ -652,10 +690,10 @@ __db_join_close(dbc)
* If no matching datum exists, returns DB_NOTFOUND, else 0.
*/
static int
-__db_join_getnext(dbc, key, data, exhausted)
+__db_join_getnext(dbc, key, data, exhausted, opmods)
DBC *dbc;
DBT *key, *data;
- u_int32_t exhausted;
+ u_int32_t exhausted, opmods;
{
int ret, cmp;
DB *dbp;
@@ -667,10 +705,14 @@ __db_join_getnext(dbc, key, data, exhausted)
switch (exhausted) {
case 0:
+ /*
+ * We don't want to step on data->data; use a new
+ * DBT and malloc so we don't step on dbc's rdata memory.
+ */
memset(&ldata, 0, sizeof(DBT));
- /* We don't want to step on data->data; malloc. */
F_SET(&ldata, DB_DBT_MALLOC);
- if ((ret = dbc->c_get(dbc, key, &ldata, DB_CURRENT)) != 0)
+ if ((ret = dbc->c_real_get(dbc,
+ key, &ldata, opmods | DB_CURRENT)) != 0)
break;
cmp = func(dbp, data, &ldata);
if (cmp == 0) {
@@ -679,10 +721,10 @@ __db_join_getnext(dbc, key, data, exhausted)
* it into data, then free the buffer we malloc'ed
* above.
*/
- if ((ret = __db_retcopy(dbp, data, ldata.data,
+ if ((ret = __db_retcopy(dbp->dbenv, data, ldata.data,
ldata.size, &data->data, &data->size)) != 0)
return (ret);
- __os_free(ldata.data, 0);
+ __os_ufree(dbp->dbenv, ldata.data);
return (0);
}
@@ -691,10 +733,10 @@ __db_join_getnext(dbc, key, data, exhausted)
* dups. We just forget about ldata and free
* its buffer--data contains the value we're searching for.
*/
- __os_free(ldata.data, 0);
+ __os_ufree(dbp->dbenv, ldata.data);
/* FALLTHROUGH */
case 1:
- ret = dbc->c_get(dbc, key, data, DB_GET_BOTHC);
+ ret = dbc->c_real_get(dbc, key, data, opmods | DB_GET_BOTHC);
break;
default:
ret = EINVAL;
@@ -708,7 +750,6 @@ __db_join_getnext(dbc, key, data, exhausted)
* __db_join_cmp --
* Comparison function for sorting DBCs in cardinality order.
*/
-
static int
__db_join_cmp(a, b)
const void *a, *b;
@@ -728,3 +769,54 @@ __db_join_cmp(a, b)
return (counta - countb);
}
+
+/*
+ * __db_join_primget --
+ * Perform a DB->get in the primary, being careful not to use a new
+ * locker ID if we're doing CDB locking.
+ */
+static int
+__db_join_primget(dbp, txn, lockerid, key, data, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t lockerid;
+ DBT *key, *data;
+ u_int32_t flags;
+{
+ DBC *dbc;
+ int dirty, ret, rmw, t_ret;
+
+ /*
+ * The only allowable flags here are the two flags copied into
+ * "opmods" in __db_join_get, DB_RMW and DB_DIRTY_READ. The former
+ * is an op on the c_get call, the latter on the cursor call.
+ * It's a DB bug if we allow any other flags down in here.
+ */
+ rmw = LF_ISSET(DB_RMW);
+ dirty = LF_ISSET(DB_DIRTY_READ);
+ LF_CLR(DB_RMW | DB_DIRTY_READ);
+ DB_ASSERT(flags == 0);
+
+ if ((ret = __db_icursor(dbp,
+ txn, dbp->type, PGNO_INVALID, 0, lockerid, &dbc)) != 0)
+ return (ret);
+
+ if (dirty ||
+ (txn != NULL && F_ISSET(txn, TXN_DIRTY_READ)))
+ F_SET(dbc, DBC_DIRTY_READ);
+ F_SET(dbc, DBC_TRANSIENT);
+
+ /*
+ * This shouldn't be necessary, thanks to the fact that join cursors
+ * swap in their own DB_DBT_REALLOC'ed buffers, but just for form's
+ * sake, we mirror what __db_get does.
+ */
+ SET_RET_MEM(dbc, dbp);
+
+ ret = dbc->c_get(dbc, key, data, DB_SET | rmw);
+
+ if ((t_ret = __db_c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/db/db_meta.c b/bdb/db/db_meta.c
index 5b57c369454..015ef5c8fc7 100644
--- a/bdb/db/db_meta.c
+++ b/bdb/db/db_meta.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_meta.c,v 11.26 2001/01/16 21:57:19 ubell Exp $";
+static const char revid[] = "$Id: db_meta.c,v 11.61 2002/08/08 03:57:48 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,12 +53,37 @@ static const char revid[] = "$Id: db_meta.c,v 11.26 2001/01/16 21:57:19 ubell Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "txn.h"
-#include "db_am.h"
-#include "btree.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_am.h"
+
+static void __db_init_meta __P((void *, u_int32_t, db_pgno_t, u_int32_t));
+
+/*
+ * __db_init_meta --
+ * Helper function for __db_new that initializes the important fields in
+ * a meta-data page (used instead of P_INIT). We need to make sure that we
+ * retain the page number and LSN of the existing page.
+ */
+static void
+__db_init_meta(p, pgsize, pgno, pgtype)
+ void *p;
+ u_int32_t pgsize;
+ db_pgno_t pgno;
+ u_int32_t pgtype;
+{
+ DB_LSN save_lsn;
+ DBMETA *meta;
+
+ meta = (DBMETA *)p;
+ save_lsn = meta->lsn;
+ memset(meta, 0, sizeof(DBMETA));
+ meta->lsn = save_lsn;
+ meta->pagesize = pgsize;
+ meta->pgno = pgno;
+ meta->type = (u_int8_t)pgtype;
+}
/*
* __db_new --
@@ -75,60 +100,110 @@ __db_new(dbc, type, pagepp)
DBMETA *meta;
DB *dbp;
DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
PAGE *h;
- db_pgno_t pgno;
- int ret;
+ db_pgno_t pgno, newnext;
+ int meta_flags, extend, ret;
- dbp = dbc->dbp;
meta = NULL;
+ meta_flags = 0;
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
h = NULL;
+ newnext = PGNO_INVALID;
pgno = PGNO_BASE_MD;
if ((ret = __db_lget(dbc,
LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0)
goto err;
-
if (meta->free == PGNO_INVALID) {
- if ((ret = memp_fget(dbp->mpf, &pgno, DB_MPOOL_NEW, &h)) != 0)
- goto err;
- ZERO_LSN(h->lsn);
- h->pgno = pgno;
+ pgno = meta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ extend = 1;
} else {
pgno = meta->free;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
- meta->free = h->next_pgno;
- (void)memp_fset(dbp->mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+
+ /*
+ * We want to take the first page off the free list and
+ * then set meta->free to the that page's next_pgno, but
+ * we need to log the change first.
+ */
+ newnext = h->next_pgno;
+ lsn = h->lsn;
+ extend = 0;
}
- DB_ASSERT(TYPE(h) == P_INVALID);
+ /*
+ * Log the allocation before fetching the new page. If we
+ * don't have room in the log then we don't want to tell
+ * mpool to extend the file.
+ */
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_pg_alloc_log(dbp, dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD, &lsn, pgno,
+ (u_int32_t)type, newnext)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
- if (TYPE(h) != P_INVALID)
- return (__db_panic(dbp->dbenv, EINVAL));
+ meta_flags = DB_MPOOL_DIRTY;
+ meta->free = newnext;
- /* Log the change. */
- if (DB_LOGGING(dbc)) {
- if ((ret = __db_pg_alloc_log(dbp->dbenv,
- dbc->txn, &LSN(meta), 0, dbp->log_fileid,
- &LSN(meta), &h->lsn, h->pgno,
- (u_int32_t)type, meta->free)) != 0)
+ if (extend == 1) {
+ meta->last_pgno++;
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_NEW, &h)) != 0)
goto err;
- LSN(h) = LSN(meta);
+ ZERO_LSN(h->lsn);
+ h->pgno = pgno;
+ DB_ASSERT(pgno == meta->last_pgno);
}
+ LSN(h) = LSN(meta);
+
+ DB_ASSERT(TYPE(h) == P_INVALID);
+
+ if (TYPE(h) != P_INVALID)
+ return (__db_panic(dbp->dbenv, EINVAL));
- (void)memp_fput(dbp->mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY);
(void)__TLPUT(dbc, metalock);
- P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type);
+ switch (type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ __db_init_meta(h, dbp->pgsize, h->pgno, type);
+ break;
+ default:
+ P_INIT(h, dbp->pgsize,
+ h->pgno, PGNO_INVALID, PGNO_INVALID, 0, type);
+ break;
+ }
+
+ /*
+ * If dirty reads are enabled and we are in a transaction, we could
+ * abort this allocation after the page(s) pointing to this
+ * one have their locks downgraded. This would permit dirty readers
+ * to access this page which is ok, but they must be off the
+ * page when we abort. This will also prevent updates happening
+ * to this page until we commit.
+ */
+ if (F_ISSET(dbc->dbp, DB_AM_DIRTY) && dbc->txn != NULL) {
+ if ((ret = __db_lget(dbc, 0,
+ h->pgno, DB_LOCK_WWRITE, 0, &metalock)) != 0)
+ goto err;
+ }
*pagepp = h;
return (0);
err: if (h != NULL)
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
if (meta != NULL)
- (void)memp_fput(dbp->mpf, meta, 0);
+ (void)mpf->put(mpf, meta, meta_flags);
(void)__TLPUT(dbc, metalock);
return (ret);
}
@@ -148,11 +223,13 @@ __db_free(dbc, h)
DB *dbp;
DBT ldbt;
DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
db_pgno_t pgno;
u_int32_t dirty_flag;
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
/*
* Retrieve the metadata page and insert the page at the head of
@@ -163,43 +240,44 @@ __db_free(dbc, h)
dirty_flag = 0;
pgno = PGNO_BASE_MD;
if ((ret = __db_lget(dbc,
- LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ LCK_ALWAYS, pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, (PAGE **)&meta)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, (PAGE **)&meta)) != 0) {
(void)__TLPUT(dbc, metalock);
goto err;
}
DB_ASSERT(h->pgno != meta->free);
/* Log the change. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
memset(&ldbt, 0, sizeof(ldbt));
ldbt.data = h;
- ldbt.size = P_OVERHEAD;
- if ((ret = __db_pg_free_log(dbp->dbenv,
- dbc->txn, &LSN(meta), 0, dbp->log_fileid, h->pgno,
- &LSN(meta), &ldbt, meta->free)) != 0) {
- (void)memp_fput(dbp->mpf, (PAGE *)meta, 0);
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ dbc->txn, &LSN(meta), 0, h->pgno,
+ &LSN(meta), PGNO_BASE_MD, &ldbt, meta->free)) != 0) {
+ (void)mpf->put(mpf, (PAGE *)meta, 0);
(void)__TLPUT(dbc, metalock);
- return (ret);
+ goto err;
}
- LSN(h) = LSN(meta);
- }
+ } else
+ LSN_NOT_LOGGED(LSN(meta));
+ LSN(h) = LSN(meta);
P_INIT(h, dbp->pgsize, h->pgno, PGNO_INVALID, meta->free, 0, P_INVALID);
meta->free = h->pgno;
/* Discard the metadata page. */
- if ((t_ret = memp_fput(dbp->mpf,
- (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, (PAGE *)meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
if ((t_ret = __TLPUT(dbc, metalock)) != 0 && ret == 0)
ret = t_ret;
/* Discard the caller's page reference. */
dirty_flag = DB_MPOOL_DIRTY;
-err: if ((t_ret = memp_fput(dbp->mpf, h, dirty_flag)) != 0 && ret == 0)
+err: if ((t_ret = mpf->put(mpf, h, dirty_flag)) != 0 && ret == 0)
ret = t_ret;
/*
@@ -227,44 +305,63 @@ __db_lprint(dbc)
if (LOCKING_ON(dbp->dbenv)) {
req.op = DB_LOCK_DUMP;
- lock_vec(dbp->dbenv, dbc->locker, 0, &req, 1, NULL);
+ dbp->dbenv->lock_vec(dbp->dbenv, dbc->locker, 0, &req, 1, NULL);
}
return (0);
}
#endif
/*
+ * Implement the rules for transactional locking. We can release the previous
+ * lock if we are not in a transaction or COUPLE_ALWAYS is specifed (used in
+ * record locking). If we are doing dirty reads then we can release read locks
+ * and down grade write locks.
+ */
+#define DB_PUT_ACTION(dbc, action, lockp) \
+ (((action == LCK_COUPLE || action == LCK_COUPLE_ALWAYS) && \
+ LOCK_ISSET(*lockp)) ? \
+ (dbc->txn == NULL || action == LCK_COUPLE_ALWAYS || \
+ (F_ISSET(dbc, DBC_DIRTY_READ) && \
+ (lockp)->mode == DB_LOCK_DIRTY)) ? LCK_COUPLE : \
+ (F_ISSET((dbc)->dbp, DB_AM_DIRTY) && \
+ (lockp)->mode == DB_LOCK_WRITE) ? LCK_DOWNGRADE : 0 : 0)
+
+/*
* __db_lget --
* The standard lock get call.
*
* PUBLIC: int __db_lget __P((DBC *,
- * PUBLIC: int, db_pgno_t, db_lockmode_t, int, DB_LOCK *));
+ * PUBLIC: int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *));
*/
int
-__db_lget(dbc, flags, pgno, mode, lkflags, lockp)
+__db_lget(dbc, action, pgno, mode, lkflags, lockp)
DBC *dbc;
- int flags, lkflags;
+ int action;
db_pgno_t pgno;
db_lockmode_t mode;
+ u_int32_t lkflags;
DB_LOCK *lockp;
{
DB *dbp;
DB_ENV *dbenv;
DB_LOCKREQ couple[2], *reqp;
- int ret;
+ DB_TXN *txn;
+ int has_timeout, ret;
dbp = dbc->dbp;
dbenv = dbp->dbenv;
+ txn = dbc->txn;
/*
* We do not always check if we're configured for locking before
* calling __db_lget to acquire the lock.
*/
- if (CDB_LOCKING(dbenv)
- || !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE)
- || (!LF_ISSET(LCK_ROLLBACK) && F_ISSET(dbc, DBC_RECOVER))
- || (!LF_ISSET(LCK_ALWAYS) && F_ISSET(dbc, DBC_OPD))) {
- lockp->off = LOCK_INVALID;
+ if (CDB_LOCKING(dbenv) ||
+ !LOCKING_ON(dbenv) || F_ISSET(dbc, DBC_COMPENSATE) ||
+ (F_ISSET(dbc, DBC_RECOVER) &&
+ (action != LCK_ROLLBACK || F_ISSET(dbenv, DB_ENV_REP_CLIENT))) ||
+ (action != LCK_ALWAYS && F_ISSET(dbc, DBC_OPD))) {
+ LOCK_INIT(*lockp);
return (0);
}
@@ -282,27 +379,73 @@ __db_lget(dbc, flags, pgno, mode, lkflags, lockp)
if (DB_NONBLOCK(dbc))
lkflags |= DB_LOCK_NOWAIT;
- /*
- * If the object not currently locked, acquire the lock and return,
- * otherwise, lock couple.
- */
- if (LF_ISSET(LCK_COUPLE)) {
- couple[0].op = DB_LOCK_GET;
+ if (F_ISSET(dbc, DBC_DIRTY_READ) && mode == DB_LOCK_READ)
+ mode = DB_LOCK_DIRTY;
+
+ has_timeout = txn != NULL && F_ISSET(txn, TXN_LOCKTIMEOUT);
+
+ switch (DB_PUT_ACTION(dbc, action, lockp)) {
+ case LCK_COUPLE:
+lck_couple: couple[0].op = has_timeout? DB_LOCK_GET_TIMEOUT : DB_LOCK_GET;
couple[0].obj = &dbc->lock_dbt;
couple[0].mode = mode;
- couple[1].op = DB_LOCK_PUT;
- couple[1].lock = *lockp;
+ if (action == LCK_COUPLE_ALWAYS)
+ action = LCK_COUPLE;
+ UMRW_SET(couple[0].timeout);
+ if (has_timeout)
+ couple[0].timeout = txn->lock_timeout;
+ if (action == LCK_COUPLE) {
+ couple[1].op = DB_LOCK_PUT;
+ couple[1].lock = *lockp;
+ }
- ret = lock_vec(dbenv,
- dbc->locker, lkflags, couple, 2, &reqp);
+ ret = dbenv->lock_vec(dbenv, dbc->locker,
+ lkflags, couple, action == LCK_COUPLE ? 2 : 1, &reqp);
if (ret == 0 || reqp == &couple[1])
*lockp = couple[0].lock;
- } else {
- ret = lock_get(dbenv,
+ break;
+ case LCK_DOWNGRADE:
+ if ((ret = dbenv->lock_downgrade(
+ dbenv, lockp, DB_LOCK_WWRITE, 0)) != 0)
+ return (ret);
+ /* FALL THROUGH */
+ default:
+ if (has_timeout)
+ goto lck_couple;
+ ret = dbenv->lock_get(dbenv,
dbc->locker, lkflags, &dbc->lock_dbt, mode, lockp);
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_lput --
+ * The standard lock put call.
+ *
+ * PUBLIC: int __db_lput __P((DBC *, DB_LOCK *));
+ */
+int
+__db_lput(dbc, lockp)
+ DBC *dbc;
+ DB_LOCK *lockp;
+{
+ DB_ENV *dbenv;
+ int ret;
- if (ret != 0)
- lockp->off = LOCK_INVALID;
+ dbenv = dbc->dbp->dbenv;
+
+ switch (DB_PUT_ACTION(dbc, LCK_COUPLE, lockp)) {
+ case LCK_COUPLE:
+ ret = dbenv->lock_put(dbenv, lockp);
+ break;
+ case LCK_DOWNGRADE:
+ ret = __lock_downgrade(dbenv, lockp, DB_LOCK_WWRITE, 0);
+ break;
+ default:
+ ret = 0;
+ break;
}
return (ret);
diff --git a/bdb/db/db_method.c b/bdb/db/db_method.c
index 01568a6e144..14712180df0 100644
--- a/bdb/db/db_method.c
+++ b/bdb/db/db_method.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_method.c,v 11.36 2000/12/21 09:17:04 krinsky Exp $";
+static const char revid[] = "$Id: db_method.c,v 11.78 2002/07/02 19:26:55 sue Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -21,50 +21,56 @@ static const char revid[] = "$Id: db_method.c,v 11.36 2000/12/21 09:17:04 krinsk
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "xa.h"
-#include "xa_ext.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
#endif
-static int __db_get_byteswapped __P((DB *));
-static DBTYPE
- __db_get_type __P((DB *));
+static int __db_get_byteswapped __P((DB *, int *));
+static int __db_get_type __P((DB *, DBTYPE *dbtype));
static int __db_init __P((DB *, u_int32_t));
static int __db_key_range
__P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+static int __db_set_alloc __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
static int __db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
static int __db_set_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+static int __db_set_cache_priority __P((DB *, DB_CACHE_PRIORITY));
static int __db_set_dup_compare
__P((DB *, int (*)(DB *, const DBT *, const DBT *)));
-static void __db_set_errcall __P((DB *, void (*)(const char *, char *)));
-static void __db_set_errfile __P((DB *, FILE *));
+static int __db_set_encrypt __P((DB *, const char *, u_int32_t));
static int __db_set_feedback __P((DB *, void (*)(DB *, int, int)));
static int __db_set_flags __P((DB *, u_int32_t));
-static int __db_set_lorder __P((DB *, int));
-static int __db_set_malloc __P((DB *, void *(*)(size_t)));
static int __db_set_pagesize __P((DB *, u_int32_t));
-static int __db_set_realloc __P((DB *, void *(*)(void *, size_t)));
-static void __db_set_errpfx __P((DB *, const char *));
static int __db_set_paniccall __P((DB *, void (*)(DB_ENV *, int)));
+static void __db_set_errcall __P((DB *, void (*)(const char *, char *)));
+static void __db_set_errfile __P((DB *, FILE *));
+static void __db_set_errpfx __P((DB *, const char *));
+static int __db_stat_fail __P((DB *, void *, u_int32_t));
static void __dbh_err __P((DB *, int, const char *, ...));
static void __dbh_errx __P((DB *, const char *, ...));
+#ifdef HAVE_RPC
+static int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
+#endif
+
/*
* db_create --
* DB constructor.
+ *
+ * EXTERN: int db_create __P((DB **, DB_ENV *, u_int32_t));
*/
int
db_create(dbpp, dbenv, flags)
@@ -102,27 +108,25 @@ db_create(dbpp, dbenv, flags)
if ((ret = __os_calloc(dbenv, 1, sizeof(*dbp), &dbp)) != 0)
return (ret);
#ifdef HAVE_RPC
- if (dbenv != NULL && dbenv->cl_handle != NULL)
+ if (dbenv != NULL && RPC_ON(dbenv))
ret = __dbcl_init(dbp, dbenv, flags);
else
#endif
ret = __db_init(dbp, flags);
if (ret != 0) {
- __os_free(dbp, sizeof(*dbp));
+ __os_free(dbenv, dbp);
return (ret);
}
/* If we don't have an environment yet, allocate a local one. */
if (dbenv == NULL) {
if ((ret = db_env_create(&dbenv, 0)) != 0) {
- __os_free(dbp, sizeof(*dbp));
+ __os_free(dbenv, dbp);
return (ret);
}
- dbenv->dblocal_ref = 0;
F_SET(dbenv, DB_ENV_DBLOCAL);
}
- if (F_ISSET(dbenv, DB_ENV_DBLOCAL))
- ++dbenv->dblocal_ref;
+ ++dbenv->db_ref;
dbp->dbenv = dbenv;
@@ -141,18 +145,21 @@ __db_init(dbp, flags)
{
int ret;
- dbp->log_fileid = DB_LOGFILEID_INVALID;
+ dbp->lid = DB_LOCK_INVALIDID;
+ LOCK_INIT(dbp->handle_lock);
TAILQ_INIT(&dbp->free_queue);
TAILQ_INIT(&dbp->active_queue);
TAILQ_INIT(&dbp->join_queue);
+ LIST_INIT(&dbp->s_secondaries);
FLD_SET(dbp->am_ok,
DB_OK_BTREE | DB_OK_HASH | DB_OK_QUEUE | DB_OK_RECNO);
+ dbp->associate = __db_associate;
dbp->close = __db_close;
dbp->cursor = __db_cursor;
- dbp->del = NULL; /* !!! Must be set by access method. */
+ dbp->del = __db_delete;
dbp->err = __dbh_err;
dbp->errx = __dbh_errx;
dbp->fd = __db_fd;
@@ -162,26 +169,30 @@ __db_init(dbp, flags)
dbp->join = __db_join;
dbp->key_range = __db_key_range;
dbp->open = __db_open;
+ dbp->pget = __db_pget;
dbp->put = __db_put;
dbp->remove = __db_remove;
dbp->rename = __db_rename;
+ dbp->truncate = __db_truncate;
+ dbp->set_alloc = __db_set_alloc;
dbp->set_append_recno = __db_set_append_recno;
dbp->set_cachesize = __db_set_cachesize;
+ dbp->set_cache_priority = __db_set_cache_priority;
dbp->set_dup_compare = __db_set_dup_compare;
+ dbp->set_encrypt = __db_set_encrypt;
dbp->set_errcall = __db_set_errcall;
dbp->set_errfile = __db_set_errfile;
dbp->set_errpfx = __db_set_errpfx;
dbp->set_feedback = __db_set_feedback;
dbp->set_flags = __db_set_flags;
dbp->set_lorder = __db_set_lorder;
- dbp->set_malloc = __db_set_malloc;
dbp->set_pagesize = __db_set_pagesize;
dbp->set_paniccall = __db_set_paniccall;
- dbp->set_realloc = __db_set_realloc;
- dbp->stat = NULL; /* !!! Must be set by access method. */
+ dbp->stat = __db_stat_fail;
dbp->sync = __db_sync;
dbp->upgrade = __db_upgrade;
dbp->verify = __db_verify;
+
/* Access method specific. */
if ((ret = __bam_db_create(dbp)) != 0)
return (ret);
@@ -244,16 +255,7 @@ __dbh_err(dbp, error, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
-
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_err(dbp->dbenv, error, 1, 1, fmt, ap);
-
- va_end(ap);
+ DB_REAL_ERR(dbp->dbenv, error, 1, 1, fmt);
}
/*
@@ -270,16 +272,7 @@ __dbh_errx(dbp, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
-
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_err(dbp->dbenv, 0, 0, 1, fmt, ap);
-
- va_end(ap);
+ DB_REAL_ERR(dbp->dbenv, 0, 0, 1, fmt);
}
/*
@@ -287,25 +280,29 @@ __dbh_errx(dbp, fmt, va_alist)
* Return if database requires byte swapping.
*/
static int
-__db_get_byteswapped(dbp)
+__db_get_byteswapped(dbp, isswapped)
DB *dbp;
+ int *isswapped;
{
DB_ILLEGAL_BEFORE_OPEN(dbp, "get_byteswapped");
- return (F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0);
+ *isswapped = F_ISSET(dbp, DB_AM_SWAP) ? 1 : 0;
+ return (0);
}
/*
* __db_get_type --
* Return type of underlying database.
*/
-static DBTYPE
-__db_get_type(dbp)
+static int
+__db_get_type(dbp, dbtype)
DB *dbp;
+ DBTYPE *dbtype;
{
DB_ILLEGAL_BEFORE_OPEN(dbp, "get_type");
- return (dbp->type);
+ *dbtype = dbp->type;
+ return (0);
}
/*
@@ -366,6 +363,26 @@ __db_set_cachesize(dbp, cache_gbytes, cache_bytes, ncache)
}
/*
+ * __db_set_cache_priority --
+ * Set cache priority for pages from this file.
+ */
+static int
+__db_set_cache_priority(dbp, priority)
+ DB *dbp;
+ DB_CACHE_PRIORITY priority;
+{
+ /*
+ * If an underlying DB_MPOOLFILE exists, call it. Otherwise, save
+ * the information away until DB->open is called.
+ */
+ if (dbp->mpf == NULL) {
+ dbp->priority = priority;
+ return (0);
+ }
+ return (dbp->mpf->set_priority(dbp->mpf, priority));
+}
+
+/*
* __db_set_dup_compare --
* Set duplicate comparison routine.
*/
@@ -374,14 +391,50 @@ __db_set_dup_compare(dbp, func)
DB *dbp;
int (*func) __P((DB *, const DBT *, const DBT *));
{
+ int ret;
+
DB_ILLEGAL_AFTER_OPEN(dbp, "dup_compare");
DB_ILLEGAL_METHOD(dbp, DB_OK_BTREE | DB_OK_HASH);
+ if ((ret = dbp->set_flags(dbp, DB_DUPSORT)) != 0)
+ return (ret);
+
dbp->dup_compare = func;
return (0);
}
+/*
+ * __db_set_encrypt --
+ * Set database passwd.
+ */
+static int
+__db_set_encrypt(dbp, passwd, flags)
+ DB *dbp;
+ const char *passwd;
+ u_int32_t flags;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ DB_ILLEGAL_IN_ENV(dbp, "set_encrypt");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_encrypt");
+
+ if ((ret = dbp->dbenv->set_encrypt(dbp->dbenv, passwd, flags)) != 0)
+ return (ret);
+
+ /*
+ * In a real env, this gets initialized with the region. In a local
+ * env, we must do it here.
+ */
+ db_cipher = (DB_CIPHER *)dbp->dbenv->crypto_handle;
+ if (!F_ISSET(db_cipher, CIPHER_ANY) &&
+ (ret = db_cipher->init(dbp->dbenv, db_cipher)) != 0)
+ return (ret);
+
+ return (dbp->set_flags(dbp, DB_ENCRYPT));
+}
+
static void
__db_set_errcall(dbp, errcall)
DB *dbp;
@@ -430,6 +483,21 @@ __db_set_flags(dbp, flags)
*
* The queue access method takes no flags.
*/
+ if (LF_ISSET(DB_ENCRYPT)) {
+ if (!CRYPTO_ON(dbp->dbenv)) {
+ __db_err(dbp->dbenv,
+ "Database environment not configured for encryption");
+ return (EINVAL);
+ }
+ F_SET(dbp, DB_AM_ENCRYPT);
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_ENCRYPT);
+ }
+ if (LF_ISSET(DB_CHKSUM_SHA1)) {
+ F_SET(dbp, DB_AM_CHKSUM);
+ LF_CLR(DB_CHKSUM_SHA1);
+ }
+
if ((ret = __bam_set_flags(dbp, &flags)) != 0)
return (ret);
if ((ret = __ram_set_flags(dbp, &flags)) != 0)
@@ -438,7 +506,13 @@ __db_set_flags(dbp, flags)
return (flags == 0 ? 0 : __db_ferr(dbp->dbenv, "DB->set_flags", 0));
}
-static int
+/*
+ * __db_set_lorder --
+ * Set whether lorder is swapped or not.
+ *
+ * PUBLIC: int __db_set_lorder __P((DB *, int));
+ */
+int
__db_set_lorder(dbp, db_lorder)
DB *dbp;
int db_lorder;
@@ -463,14 +537,17 @@ __db_set_lorder(dbp, db_lorder)
}
static int
-__db_set_malloc(dbp, func)
+__db_set_alloc(dbp, mal_func, real_func, free_func)
DB *dbp;
- void *(*func) __P((size_t));
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
{
- DB_ILLEGAL_AFTER_OPEN(dbp, "set_malloc");
+ DB_ILLEGAL_IN_ENV(dbp, "set_alloc");
+ DB_ILLEGAL_AFTER_OPEN(dbp, "set_alloc");
- dbp->db_malloc = func;
- return (0);
+ return (dbp->dbenv->set_alloc(dbp->dbenv,
+ mal_func, real_func, free_func));
}
static int
@@ -495,7 +572,7 @@ __db_set_pagesize(dbp, db_pagesize)
* We don't want anything that's not a power-of-2, as we rely on that
* for alignment of various types on the pages.
*/
- if ((u_int32_t)1 << __db_log2(db_pagesize) != db_pagesize) {
+ if (!POWER_OF_TWO(db_pagesize)) {
__db_err(dbp->dbenv, "page sizes must be a power-of-2");
return (EINVAL);
}
@@ -511,44 +588,44 @@ __db_set_pagesize(dbp, db_pagesize)
}
static int
-__db_set_realloc(dbp, func)
+__db_set_paniccall(dbp, paniccall)
DB *dbp;
- void *(*func) __P((void *, size_t));
+ void (*paniccall) __P((DB_ENV *, int));
{
- DB_ILLEGAL_AFTER_OPEN(dbp, "set_realloc");
-
- dbp->db_realloc = func;
- return (0);
+ return (dbp->dbenv->set_paniccall(dbp->dbenv, paniccall));
}
static int
-__db_set_paniccall(dbp, paniccall)
+__db_stat_fail(dbp, sp, flags)
DB *dbp;
- void (*paniccall) __P((DB_ENV *, int));
+ void *sp;
+ u_int32_t flags;
{
- return (dbp->dbenv->set_paniccall(dbp->dbenv, paniccall));
+ COMPQUIET(sp, NULL);
+ COMPQUIET(flags, 0);
+
+ /*
+ * DB->stat isn't initialized until the actual DB->open call,
+ * but we don't want to core dump.
+ */
+ PANIC_CHECK(dbp->dbenv);
+ DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+
+ /* NOTREACHED */
+ return (EINVAL);
}
#ifdef HAVE_RPC
/*
* __dbcl_init --
* Initialize a DB structure on the server.
- *
- * PUBLIC: #ifdef HAVE_RPC
- * PUBLIC: int __dbcl_init __P((DB *, DB_ENV *, u_int32_t));
- * PUBLIC: #endif
*/
-int
+static int
__dbcl_init(dbp, dbenv, flags)
DB *dbp;
DB_ENV *dbenv;
u_int32_t flags;
{
- CLIENT *cl;
- __db_create_reply *replyp;
- __db_create_msg req;
- int ret;
-
TAILQ_INIT(&dbp->free_queue);
TAILQ_INIT(&dbp->active_queue);
/* !!!
@@ -556,6 +633,7 @@ __dbcl_init(dbp, dbenv, flags)
* not used in RPC clients. See the comment in __dbcl_db_join_ret().
*/
+ dbp->associate = __dbcl_db_associate;
dbp->close = __dbcl_db_close;
dbp->cursor = __dbcl_db_cursor;
dbp->del = __dbcl_db_del;
@@ -563,31 +641,34 @@ __dbcl_init(dbp, dbenv, flags)
dbp->errx = __dbh_errx;
dbp->fd = __dbcl_db_fd;
dbp->get = __dbcl_db_get;
- dbp->get_byteswapped = __dbcl_db_swapped;
+ dbp->get_byteswapped = __db_get_byteswapped;
dbp->get_type = __db_get_type;
dbp->join = __dbcl_db_join;
dbp->key_range = __dbcl_db_key_range;
- dbp->open = __dbcl_db_open;
+ dbp->open = __dbcl_db_open_wrap;
+ dbp->pget = __dbcl_db_pget;
dbp->put = __dbcl_db_put;
dbp->remove = __dbcl_db_remove;
dbp->rename = __dbcl_db_rename;
+ dbp->set_alloc = __dbcl_db_alloc;
dbp->set_append_recno = __dbcl_db_set_append_recno;
dbp->set_cachesize = __dbcl_db_cachesize;
- dbp->set_dup_compare = NULL;
+ dbp->set_cache_priority = __dbcl_db_cache_priority;
+ dbp->set_dup_compare = __dbcl_db_dup_compare;
+ dbp->set_encrypt = __dbcl_db_encrypt;
dbp->set_errcall = __db_set_errcall;
dbp->set_errfile = __db_set_errfile;
dbp->set_errpfx = __db_set_errpfx;
dbp->set_feedback = __dbcl_db_feedback;
dbp->set_flags = __dbcl_db_flags;
dbp->set_lorder = __dbcl_db_lorder;
- dbp->set_malloc = __dbcl_db_malloc;
dbp->set_pagesize = __dbcl_db_pagesize;
dbp->set_paniccall = __dbcl_db_panic;
- dbp->set_q_extentsize = __dbcl_db_extentsize;
- dbp->set_realloc = __dbcl_db_realloc;
dbp->stat = __dbcl_db_stat;
dbp->sync = __dbcl_db_sync;
+ dbp->truncate = __dbcl_db_truncate;
dbp->upgrade = __dbcl_db_upgrade;
+ dbp->verify = __dbcl_db_verify;
/*
* Set all the method specific functions to client funcs as well.
@@ -599,31 +680,12 @@ __dbcl_init(dbp, dbenv, flags)
dbp->set_h_ffactor = __dbcl_db_h_ffactor;
dbp->set_h_hash = __dbcl_db_h_hash;
dbp->set_h_nelem = __dbcl_db_h_nelem;
+ dbp->set_q_extentsize = __dbcl_db_extentsize;
dbp->set_re_delim = __dbcl_db_re_delim;
dbp->set_re_len = __dbcl_db_re_len;
dbp->set_re_pad = __dbcl_db_re_pad;
dbp->set_re_source = __dbcl_db_re_source;
-/*
- dbp->set_q_extentsize = __dbcl_db_q_extentsize;
-*/
-
- cl = (CLIENT *)dbenv->cl_handle;
- req.flags = flags;
- req.envpcl_id = dbenv->cl_id;
-
- /*
- * CALL THE SERVER
- */
- replyp = __db_db_create_1(&req, cl);
- if (replyp == NULL) {
- __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
- return (DB_NOSERVER);
- }
- if ((ret = replyp->status) != 0)
- return (ret);
-
- dbp->cl_id = replyp->dbpcl_id;
- return (0);
+ return (__dbcl_db_create(dbp, dbenv, flags));
}
#endif
diff --git a/bdb/db/db_open.c b/bdb/db/db_open.c
new file mode 100644
index 00000000000..f6f96cda547
--- /dev/null
+++ b/bdb/db/db_open.c
@@ -0,0 +1,705 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_open.c,v 11.215 2002/08/15 15:27:52 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
+
+static int __db_openchk __P((DB *,
+ DB_TXN *, const char *, const char *, DBTYPE, u_int32_t));
+
+/*
+ * __db_open --
+ * Main library interface to the DB access methods.
+ *
+ * PUBLIC: int __db_open __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, DBTYPE, u_int32_t, int));
+ */
+int
+__db_open(dbp, txn, name, subdb, type, flags, mode)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ DB_ENV *dbenv;
+ int remove_master, remove_me, ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ remove_me = remove_master = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __db_openchk(dbp, txn, name, subdb, type, flags)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ /*
+ * If the environment was configured with threads, the DB handle
+ * must also be free-threaded, so we force the DB_THREAD flag on.
+ * (See SR #2033 for why this is a requirement--recovery needs
+ * to be able to grab a dbp using __db_fileid_to_dbp, and it has
+ * no way of knowing which dbp goes with which thread, so whichever
+ * one it finds has to be usable in any of them.)
+ */
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ LF_SET(DB_THREAD);
+
+ /* Convert any DB->open flags. */
+ if (LF_ISSET(DB_RDONLY))
+ F_SET(dbp, DB_AM_RDONLY);
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(dbp, DB_AM_DIRTY);
+
+ /* Fill in the type. */
+ dbp->type = type;
+
+ /*
+ * If we're opening a subdatabase, we have to open (and potentially
+ * create) the main database, and then get (and potentially store)
+ * our base page number in that database. Then, we can finally open
+ * the subdatabase.
+ */
+ if ((ret = __db_dbopen(
+ dbp, txn, name, subdb, flags, mode, PGNO_BASE_MD)) != 0)
+ goto err;
+
+ /*
+ * You can open the database that describes the subdatabases in the
+ * rest of the file read-only. The content of each key's data is
+ * unspecified and applications should never be adding new records
+ * or updating existing records. However, during recovery, we need
+ * to open these databases R/W so we can redo/undo changes in them.
+ * Likewise, we need to open master databases read/write during
+ * rename and remove so we can be sure they're fully sync'ed, so
+ * we provide an override flag for the purpose.
+ */
+ if (subdb == NULL && !IS_RECOVERING(dbenv) && !LF_ISSET(DB_RDONLY) &&
+ !LF_ISSET(DB_RDWRMASTER) && F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "files containing multiple databases may only be opened read-only");
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /* If we were successful, don't discard the file on close. */
+ if (ret == 0)
+ /* If we were successful, don't discard the file on close. */
+ F_CLR(dbp, DB_AM_DISCARD | DB_AM_CREATED | DB_AM_CREATED_MSTR);
+ else {
+ /*
+ * If we are not transactional, we need to remove the
+ * databases/subdatabases. If we are transactional, then
+ * the abort of the child transaction should take care of
+ * cleaning them up.
+ */
+ remove_me = txn == NULL && F_ISSET(dbp, DB_AM_CREATED);
+ remove_master = txn == NULL && F_ISSET(dbp, DB_AM_CREATED_MSTR);
+
+ /*
+ * If we had an error, it may have happened before or after
+ * we actually logged the open. If it happened before, then
+ * abort won't know anything about it and won't close or
+ * refresh the dbp, so we need to do it explicitly.
+ */
+ (void)__db_refresh(dbp, txn, DB_NOSYNC);
+ }
+
+ /* Remove anyone we created. */
+ if (remove_master || (subdb == NULL && remove_me))
+ /* Remove file. */
+ (void)dbenv->dbremove(dbenv, txn, name, NULL, 0);
+ else if (remove_me)
+ /* Remove subdatabase. */
+ (void)dbenv->dbremove(dbenv, txn, name, subdb, 0);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * __db_dbopen --
+ * Open a database. This routine gets called in three different ways.
+ * 1. It can be called to open a file/database. In this case, subdb will
+ * be NULL and meta_pgno will be PGNO_BASE_MD.
+ * 2. It can be called to open a subdatabase during normal operation. In
+ * this case, name and subname will both be non-NULL and meta_pgno will
+ * be PGNO_BAS_MD (also PGNO_INVALID).
+ * 3. It can be called during recovery to open a subdatabase in which case
+ * name will be non-NULL, subname mqy be NULL and meta-pgno will be
+ * a valid pgno (i.e., not PGNO_BASE_MD).
+ *
+ * PUBLIC: int __db_dbopen __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, u_int32_t, int, db_pgno_t));
+ */
+int
+__db_dbopen(dbp, txn, name, subdb, flags, mode, meta_pgno)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+ int mode;
+ db_pgno_t meta_pgno;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t id;
+
+ dbenv = dbp->dbenv;
+ id = TXN_INVALID;
+ if (txn != NULL)
+ F_SET(dbp, DB_AM_TXN);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREOPEN, ret, name);
+ /*
+ * If name is NULL, it's always a create, so make sure that we
+ * have a type specified. It would be nice if this checking
+ * were done in __db_open where most of the interface checking
+ * is done, but this interface (__db_dbopen) is used by the
+ * recovery and limbo system, so we need to safeguard this
+ * interface as well.
+ */
+ if (name == NULL) {
+ F_SET(dbp, DB_AM_INMEM);
+
+ if (dbp->type == DB_UNKNOWN) {
+ __db_err(dbenv,
+ "DBTYPE of unknown without existing file");
+ return (EINVAL);
+ }
+
+ if (dbp->pgsize == 0)
+ dbp->pgsize = DB_DEF_IOSIZE;
+
+ /*
+ * If the file is a temporary file and we're doing locking,
+ * then we have to create a unique file ID. We can't use our
+ * normal dev/inode pair (or whatever this OS uses in place of
+ * dev/inode pairs) because no backing file will be created
+ * until the mpool cache is filled forcing the buffers to disk.
+ * Grab a random locker ID to use as a file ID. The created
+ * ID must never match a potential real file ID -- we know it
+ * won't because real file IDs contain a time stamp after the
+ * dev/inode pair, and we're simply storing a 4-byte value.
+ *
+ * !!!
+ * Store the locker in the file id structure -- we can get it
+ * from there as necessary, and it saves having two copies.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_id(dbenv,
+ (u_int32_t *)dbp->fileid)) != 0)
+ return (ret);
+ } else if (subdb == NULL && meta_pgno == PGNO_BASE_MD) {
+ /* Open/create the underlying file. Acquire locks. */
+ if ((ret =
+ __fop_file_setup(dbp, txn, name, mode, flags, &id)) != 0)
+ return (ret);
+ } else {
+ if ((ret = __fop_subdb_setup(dbp,
+ txn, name, subdb, mode, flags)) != 0)
+ return (ret);
+ meta_pgno = dbp->meta_pgno;
+ }
+
+ /*
+ * If we created the file, set the truncate flag for the mpool. This
+ * isn't for anything we've done, it's protection against stupid user
+ * tricks: if the user deleted a file behind Berkeley DB's back, we
+ * may still have pages in the mpool that match the file's "unique" ID.
+ *
+ * Note that if we're opening a subdatabase, we don't want to set
+ * the TRUNCATE flag even if we just created the file--we already
+ * opened and updated the master using access method interfaces,
+ * so we don't want to get rid of any pages that are in the mpool.
+ * If we created the file when we opened the master, we already hit
+ * this check in a non-subdb context then.
+ */
+ if (subdb == NULL && F_ISSET(dbp, DB_AM_CREATED))
+ LF_SET(DB_TRUNCATE);
+
+ /* Set up the underlying environment. */
+ if ((ret = __db_dbenv_setup(dbp, txn, name, id, flags)) != 0)
+ return (ret);
+
+ /*
+ * Set the open flag. We use it to mean that the dbp has gone
+ * through mpf setup, including dbreg_register. Also, below,
+ * the underlying access method open functions may want to do
+ * things like acquire cursors, so the open flag has to be set
+ * before calling them.
+ */
+ F_SET(dbp, DB_AM_OPEN_CALLED);
+
+ /*
+ * For unnamed files, we need to actually create the file now
+ * that the mpool is open.
+ */
+ if (name == NULL && (ret = __db_new_file(dbp, txn, NULL, NULL)) != 0)
+ return (ret);
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ ret = __bam_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_HASH:
+ ret = __ham_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_RECNO:
+ ret = __ram_open(dbp, txn, name, meta_pgno, flags);
+ break;
+ case DB_QUEUE:
+ ret = __qam_open(dbp, txn, name, meta_pgno, mode, flags);
+ break;
+ case DB_UNKNOWN:
+ return (__db_unknown_type(dbenv, "__db_dbopen", dbp->type));
+ }
+ if (ret != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTOPEN, ret, name);
+
+ /*
+ * Unnamed files don't need handle locks, so we only have to check
+ * for a handle lock downgrade or lockevent in the case of named
+ * files.
+ */
+ if (!F_ISSET(dbp, DB_AM_RECOVER) &&
+ name != NULL && LOCK_ISSET(dbp->handle_lock)) {
+ if (txn != NULL) {
+ ret = __txn_lockevent(dbenv,
+ txn, dbp, &dbp->handle_lock, dbp->lid);
+ } else if (LOCKING_ON(dbenv))
+ /* Trade write handle lock for read handle lock. */
+ ret = __lock_downgrade(dbenv,
+ &dbp->handle_lock, DB_LOCK_READ, 0);
+ }
+DB_TEST_RECOVERY_LABEL
+err:
+ return (ret);
+}
+
+/*
+ * __db_new_file --
+ * Create a new database file.
+ *
+ * PUBLIC: int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__db_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_HASH:
+ ret = __ham_new_file(dbp, txn, fhp, name);
+ break;
+ case DB_QUEUE:
+ ret = __qam_new_file(dbp, txn, fhp, name);
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "%s: Invalid type %d specified", name, dbp->type);
+ ret = EINVAL;
+ break;
+ }
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, name);
+ /* Sync the file in preparation for moving it into place. */
+ if (ret == 0 && fhp != NULL)
+ ret = __os_fsync(dbp->dbenv, fhp);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+ return (ret);
+}
+
+/*
+ * __db_init_subdb --
+ * Initialize the dbp for a subdb.
+ *
+ * PUBLIC: int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *));
+ */
+int
+__db_init_subdb(mdbp, dbp, name, txn)
+ DB *mdbp, *dbp;
+ const char *name;
+ DB_TXN *txn;
+{
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ int ret, t_ret;
+
+ ret = 0;
+ if (!F_ISSET(dbp, DB_AM_CREATED)) {
+ /* Subdb exists; read meta-data page and initialize. */
+ mpf = mdbp->mpf;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ ret = __db_meta_setup(mdbp->dbenv, dbp, name, meta, 0, 0);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * If __db_meta_setup found that the meta-page hadn't
+ * been written out during recovery, we can just return.
+ */
+ if (ret == ENOENT)
+ ret = 0;
+ goto err;
+ }
+
+ /* Handle the create case here. */
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ ret = __bam_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_HASH:
+ ret = __ham_new_subdb(mdbp, dbp, txn);
+ break;
+ case DB_QUEUE:
+ ret = EINVAL;
+ break;
+ default:
+ __db_err(dbp->dbenv,
+ "Invalid subdatabase type %d specified", dbp->type);
+ return (EINVAL);
+ }
+
+err: return (ret);
+}
+
+/*
+ * __db_chk_meta --
+ * Take a buffer containing a meta-data page and check it for a checksum
+ * (and verify the checksum if necessary) and possibly decrypt it.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int));
+ */
+int
+__db_chk_meta(dbenv, dbp, meta, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBMETA *meta;
+ int do_metachk;
+{
+ int is_hmac, ret;
+ u_int8_t *chksum;
+
+ ret = 0;
+
+ if (FLD_ISSET(meta->metaflags, DBMETA_CHKSUM)) {
+ if (dbp != NULL)
+ F_SET(dbp, DB_AM_CHKSUM);
+
+ is_hmac = meta->encrypt_alg == 0 ? 0 : 1;
+ chksum = ((BTMETA *)meta)->chksum;
+ if (do_metachk && ((ret = __db_check_chksum(dbenv,
+ (DB_CIPHER *)dbenv->crypto_handle, chksum, meta,
+ DBMETASIZE, is_hmac)) != 0))
+ return (ret);
+ }
+
+#ifdef HAVE_CRYPTO
+ ret = __crypto_decrypt_meta(dbenv, dbp, (u_int8_t *)meta, do_metachk);
+#endif
+ return (ret);
+}
+
+/*
+ * __db_meta_setup --
+ *
+ * Take a buffer containing a meta-data page and figure out if it's
+ * valid, and if so, initialize the dbp from the meta-data page.
+ *
+ * PUBLIC: int __db_meta_setup __P((DB_ENV *,
+ * PUBLIC: DB *, const char *, DBMETA *, u_int32_t, int));
+ */
+int
+__db_meta_setup(dbenv, dbp, name, meta, oflags, do_metachk)
+ DB_ENV *dbenv;
+ DB *dbp;
+ const char *name;
+ DBMETA *meta;
+ u_int32_t oflags;
+ int do_metachk;
+{
+ u_int32_t flags, magic;
+ int ret;
+
+ ret = 0;
+
+ /*
+ * Figure out what access method we're dealing with, and then
+ * call access method specific code to check error conditions
+ * based on conflicts between the found file and application
+ * arguments. A found file overrides some user information --
+ * we don't consider it an error, for example, if the user set
+ * an expected byte order and the found file doesn't match it.
+ */
+ F_CLR(dbp, DB_AM_SWAP);
+ magic = meta->magic;
+
+swap_retry:
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ case DB_HASHMAGIC:
+ case DB_QAMMAGIC:
+ case DB_RENAMEMAGIC:
+ break;
+ case 0:
+ /*
+ * The only time this should be 0 is if we're in the
+ * midst of opening a subdb during recovery and that
+ * subdatabase had its meta-data page allocated, but
+ * not yet initialized.
+ */
+ if (F_ISSET(dbp, DB_AM_SUBDB) && ((IS_RECOVERING(dbenv) &&
+ F_ISSET((DB_LOG *) dbenv->lg_handle, DBLOG_FORCE_OPEN)) ||
+ meta->pgno != PGNO_INVALID))
+ return (ENOENT);
+
+ goto bad_format;
+ default:
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ goto bad_format;
+
+ M_32_SWAP(magic);
+ F_SET(dbp, DB_AM_SWAP);
+ goto swap_retry;
+ }
+
+ /*
+ * We can only check the meta page if we are sure we have a meta page.
+ * If it is random data, then this check can fail. So only now can we
+ * checksum and decrypt. Don't distinguish between configuration and
+ * checksum match errors here, because we haven't opened the database
+ * and even a checksum error isn't a reason to panic the environment.
+ */
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, do_metachk)) != 0) {
+ if (ret == -1) {
+ __db_err(dbenv,
+ "%s: metadata page checksum error", name);
+ ret = EINVAL;
+ }
+ goto bad_format;
+ }
+
+ switch (magic) {
+ case DB_BTREEMAGIC:
+ flags = meta->flags;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(flags);
+ if (LF_ISSET(BTM_RECNO))
+ dbp->type = DB_RECNO;
+ else
+ dbp->type = DB_BTREE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __bam_metachk(dbp, name, (BTMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_HASHMAGIC:
+ dbp->type = DB_HASH;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __ham_metachk(dbp, name, (HMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_QAMMAGIC:
+ dbp->type = DB_QUEUE;
+ if ((oflags & DB_TRUNCATE) == 0 && (ret =
+ __qam_metachk(dbp, name, (QMETA *)meta)) != 0)
+ return (ret);
+ break;
+ case DB_RENAMEMAGIC:
+ F_SET(dbp, DB_AM_IN_RENAME);
+ break;
+ }
+ return (0);
+
+bad_format:
+ __db_err(dbenv, "%s: unexpected file type or format", name);
+ return (ret);
+}
+
+/*
+ * __db_openchk --
+ * Interface error checking for open calls.
+ */
+static int
+__db_openchk(dbp, txn, name, subdb, type, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ u_int32_t ok_flags;
+
+ dbenv = dbp->dbenv;
+
+ /* Validate arguments. */
+#define OKFLAGS \
+ (DB_AUTO_COMMIT | DB_CREATE | DB_DIRTY_READ | DB_EXCL | \
+ DB_FCNTL_LOCKING | DB_NOMMAP | DB_RDONLY | DB_RDWRMASTER | \
+ DB_THREAD | DB_TRUNCATE | DB_WRITEOPEN)
+ if ((ret = __db_fchk(dbenv, "DB->open", flags, OKFLAGS)) != 0)
+ return (ret);
+ if (LF_ISSET(DB_EXCL) && !LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+ if (LF_ISSET(DB_RDONLY) && LF_ISSET(DB_CREATE))
+ return (__db_ferr(dbenv, "DB->open", 1));
+
+#ifdef HAVE_VXWORKS
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv, "DB_TRUNCATE unsupported in VxWorks");
+ return (__db_eopnotsup(dbenv));
+ }
+#endif
+ switch (type) {
+ case DB_UNKNOWN:
+ if (LF_ISSET(DB_CREATE|DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "%s: DB_UNKNOWN type specified with DB_CREATE or DB_TRUNCATE",
+ name);
+ return (EINVAL);
+ }
+ ok_flags = 0;
+ break;
+ case DB_BTREE:
+ ok_flags = DB_OK_BTREE;
+ break;
+ case DB_HASH:
+ ok_flags = DB_OK_HASH;
+ break;
+ case DB_QUEUE:
+ ok_flags = DB_OK_QUEUE;
+ break;
+ case DB_RECNO:
+ ok_flags = DB_OK_RECNO;
+ break;
+ default:
+ __db_err(dbenv, "unknown type: %lu", (u_long)type);
+ return (EINVAL);
+ }
+ if (ok_flags)
+ DB_ILLEGAL_METHOD(dbp, ok_flags);
+
+ /* The environment may have been created, but never opened. */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_OPEN_CALLED)) {
+ __db_err(dbenv, "environment not yet opened");
+ return (EINVAL);
+ }
+
+ /*
+ * Historically, you could pass in an environment that didn't have a
+ * mpool, and DB would create a private one behind the scenes. This
+ * no longer works.
+ */
+ if (!F_ISSET(dbenv, DB_ENV_DBLOCAL) && !MPOOL_ON(dbenv)) {
+ __db_err(dbenv, "environment did not include a memory pool");
+ return (EINVAL);
+ }
+
+ /*
+ * You can't specify threads during DB->open if subsystems in the
+ * environment weren't configured with them.
+ */
+ if (LF_ISSET(DB_THREAD) &&
+ !F_ISSET(dbenv, DB_ENV_DBLOCAL | DB_ENV_THREAD)) {
+ __db_err(dbenv, "environment not created using DB_THREAD");
+ return (EINVAL);
+ }
+
+ /* DB_TRUNCATE is not transaction recoverable. */
+ if (LF_ISSET(DB_TRUNCATE) && txn != NULL) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with transaction specified");
+ return (EINVAL);
+ }
+
+ /* Subdatabase checks. */
+ if (subdb != NULL) {
+ /* Subdatabases must be created in named files. */
+ if (name == NULL) {
+ __db_err(dbenv,
+ "multiple databases cannot be created in temporary files");
+ return (EINVAL);
+ }
+
+ /* Truncate is a physical file operation */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ __db_err(dbenv,
+ "DB_TRUNCATE illegal with multiple databases");
+ return (EINVAL);
+ }
+
+ /* QAM can't be done as a subdatabase. */
+ if (type == DB_QUEUE) {
+ __db_err(dbenv, "Queue databases must be one-per-file");
+ return (EINVAL);
+ }
+ }
+
+ return (0);
+}
diff --git a/bdb/db/db_overflow.c b/bdb/db/db_overflow.c
index 54f0a03aafe..27dcb41a2ff 100644
--- a/bdb/db/db_overflow.c
+++ b/bdb/db/db_overflow.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_overflow.c,v 11.21 2000/11/30 00:58:32 ubell Exp $";
+static const char revid[] = "$Id: db_overflow.c,v 11.46 2002/08/08 03:57:48 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,9 +53,9 @@ static const char revid[] = "$Id: db_overflow.c,v 11.21 2000/11/30 00:58:32 ubel
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "db_verify.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_verify.h"
/*
* Big key/data code.
@@ -83,6 +83,7 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
u_int32_t *bpsz;
{
DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_indx_t bytes;
u_int32_t curoff, needed, start;
@@ -90,6 +91,7 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
int ret;
dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
/*
* Check if the buffer is big enough; if it is not and we are
@@ -99,7 +101,12 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
*/
if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
start = dbt->doff;
- needed = dbt->dlen;
+ if (start > tlen)
+ needed = 0;
+ else if (dbt->dlen > tlen - start)
+ needed = tlen - start;
+ else
+ needed = dbt->dlen;
} else {
start = 0;
needed = tlen;
@@ -112,15 +119,13 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
return (ENOMEM);
}
} else if (F_ISSET(dbt, DB_DBT_MALLOC)) {
- if ((ret = __os_malloc(dbenv,
- needed, dbp->db_malloc, &dbt->data)) != 0)
+ if ((ret = __os_umalloc(dbenv, needed, &dbt->data)) != 0)
return (ret);
} else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
- if ((ret = __os_realloc(dbenv,
- needed, dbp->db_realloc, &dbt->data)) != 0)
+ if ((ret = __os_urealloc(dbenv, needed, &dbt->data)) != 0)
return (ret);
} else if (*bpsz == 0 || *bpsz < needed) {
- if ((ret = __os_realloc(dbenv, needed, NULL, bpp)) != 0)
+ if ((ret = __os_realloc(dbenv, needed, bpp)) != 0)
return (ret);
*bpsz = needed;
dbt->data = *bpp;
@@ -133,13 +138,12 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
*/
dbt->size = needed;
for (curoff = 0, p = dbt->data; pgno != PGNO_INVALID && needed > 0;) {
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
- (void)__db_pgerr(dbp, pgno);
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
- }
+
/* Check if we need any bytes from this page. */
if (curoff + OV_LEN(h) >= start) {
- src = (u_int8_t *)h + P_OVERHEAD;
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
bytes = OV_LEN(h);
if (start > curoff) {
src += start - curoff;
@@ -153,7 +157,7 @@ __db_goff(dbp, dbt, tlen, pgno, bpp, bpsz)
}
curoff += OV_LEN(h);
pgno = h->next_pgno;
- memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
}
return (0);
}
@@ -171,13 +175,14 @@ __db_poff(dbc, dbt, pgnop)
db_pgno_t *pgnop;
{
DB *dbp;
- PAGE *pagep, *lastp;
- DB_LSN new_lsn, null_lsn;
DBT tmp_dbt;
+ DB_LSN new_lsn, null_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep, *lastp;
db_indx_t pagespace;
u_int32_t sz;
u_int8_t *p;
- int ret;
+ int ret, t_ret;
/*
* Allocate pages and copy the key/data item into them. Calculate the
@@ -185,8 +190,10 @@ __db_poff(dbc, dbt, pgnop)
* item.
*/
dbp = dbc->dbp;
- pagespace = P_MAXSPACE(dbp->pgsize);
+ mpf = dbp->mpf;
+ pagespace = P_MAXSPACE(dbp, dbp->pgsize);
+ ret = 0;
lastp = NULL;
for (p = dbt->data,
sz = dbt->size; sz > 0; p += pagespace, sz -= pagespace) {
@@ -203,30 +210,36 @@ __db_poff(dbc, dbt, pgnop)
* have a partial record.
*/
if ((ret = __db_new(dbc, P_OVERFLOW, &pagep)) != 0)
- return (ret);
- if (DB_LOGGING(dbc)) {
+ break;
+ if (DBC_LOGGING(dbc)) {
tmp_dbt.data = p;
tmp_dbt.size = pagespace;
ZERO_LSN(null_lsn);
- if ((ret = __db_big_log(dbp->dbenv, dbc->txn,
- &new_lsn, 0, DB_ADD_BIG, dbp->log_fileid,
- PGNO(pagep), lastp ? PGNO(lastp) : PGNO_INVALID,
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &new_lsn, 0, DB_ADD_BIG, PGNO(pagep),
+ lastp ? PGNO(lastp) : PGNO_INVALID,
PGNO_INVALID, &tmp_dbt, &LSN(pagep),
lastp == NULL ? &null_lsn : &LSN(lastp),
- &null_lsn)) != 0)
- return (ret);
+ &null_lsn)) != 0) {
+ if (lastp != NULL)
+ (void)mpf->put(mpf,
+ lastp, DB_MPOOL_DIRTY);
+ lastp = pagep;
+ break;
+ }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
- /* Move lsn onto page. */
- if (lastp)
- LSN(lastp) = new_lsn;
- LSN(pagep) = new_lsn;
- }
+ /* Move LSN onto page. */
+ if (lastp != NULL)
+ LSN(lastp) = new_lsn;
+ LSN(pagep) = new_lsn;
P_INIT(pagep, dbp->pgsize,
PGNO(pagep), PGNO_INVALID, PGNO_INVALID, 0, P_OVERFLOW);
OV_LEN(pagep) = pagespace;
OV_REF(pagep) = 1;
- memcpy((u_int8_t *)pagep + P_OVERHEAD, p, pagespace);
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(dbp), p, pagespace);
/*
* If this is the first entry, update the user's info.
@@ -238,12 +251,14 @@ __db_poff(dbc, dbt, pgnop)
else {
lastp->next_pgno = PGNO(pagep);
pagep->prev_pgno = PGNO(lastp);
- (void)memp_fput(dbp->mpf, lastp, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, lastp, DB_MPOOL_DIRTY);
}
lastp = pagep;
}
- (void)memp_fput(dbp->mpf, lastp, DB_MPOOL_DIRTY);
- return (0);
+ if (lastp != NULL &&
+ (t_ret = mpf->put(mpf, lastp, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
}
/*
@@ -259,23 +274,29 @@ __db_ovref(dbc, pgno, adjust)
int32_t adjust;
{
DB *dbp;
+ DB_MPOOLFILE *mpf;
PAGE *h;
int ret;
dbp = dbc->dbp;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
- (void)__db_pgerr(dbp, pgno);
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
return (ret);
}
- if (DB_LOGGING(dbc))
- if ((ret = __db_ovref_log(dbp->dbenv, dbc->txn,
- &LSN(h), 0, dbp->log_fileid, h->pgno, adjust,
- &LSN(h))) != 0)
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __db_ovref_log(dbp,
+ dbc->txn, &LSN(h), 0, h->pgno, adjust, &LSN(h))) != 0) {
+ (void)mpf->put(mpf, h, 0);
return (ret);
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(h));
OV_REF(h) += adjust;
- (void)memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, h, DB_MPOOL_DIRTY);
return (0);
}
@@ -293,13 +314,16 @@ __db_doff(dbc, pgno)
DB *dbp;
PAGE *pagep;
DB_LSN null_lsn;
+ DB_MPOOLFILE *mpf;
DBT tmp_dbt;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
+
do {
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &pagep)) != 0) {
- (void)__db_pgerr(dbp, pgno);
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
return (ret);
}
@@ -309,20 +333,24 @@ __db_doff(dbc, pgno)
* decrement the reference count and return.
*/
if (OV_REF(pagep) > 1) {
- (void)memp_fput(dbp->mpf, pagep, 0);
+ (void)mpf->put(mpf, pagep, 0);
return (__db_ovref(dbc, pgno, -1));
}
- if (DB_LOGGING(dbc)) {
- tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD;
+ if (DBC_LOGGING(dbc)) {
+ tmp_dbt.data = (u_int8_t *)pagep + P_OVERHEAD(dbp);
tmp_dbt.size = OV_LEN(pagep);
ZERO_LSN(null_lsn);
- if ((ret = __db_big_log(dbp->dbenv, dbc->txn,
- &LSN(pagep), 0, DB_REM_BIG, dbp->log_fileid,
- PGNO(pagep), PREV_PGNO(pagep), NEXT_PGNO(pagep),
- &tmp_dbt, &LSN(pagep), &null_lsn, &null_lsn)) != 0)
+ if ((ret = __db_big_log(dbp, dbc->txn,
+ &LSN(pagep), 0, DB_REM_BIG,
+ PGNO(pagep), PREV_PGNO(pagep),
+ NEXT_PGNO(pagep), &tmp_dbt,
+ &LSN(pagep), &null_lsn, &null_lsn)) != 0) {
+ (void)mpf->put(mpf, pagep, 0);
return (ret);
- }
+ }
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
pgno = pagep->next_pgno;
if ((ret = __db_free(dbc, pagep)) != 0)
return (ret);
@@ -352,13 +380,16 @@ __db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)
u_int32_t tlen;
int (*cmpfunc) __P((DB *, const DBT *, const DBT *)), *cmpp;
{
- PAGE *pagep;
DBT local_dbt;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
void *buf;
u_int32_t bufsize, cmp_bytes, key_left;
u_int8_t *p1, *p2;
int ret;
+ mpf = dbp->mpf;
+
/*
* If there is a user-specified comparison function, build a
* contiguous copy of the key, and call it.
@@ -373,27 +404,27 @@ __db_moff(dbp, dbt, pgno, tlen, cmpfunc, cmpp)
return (ret);
/* Pass the key as the first argument */
*cmpp = cmpfunc(dbp, dbt, &local_dbt);
- __os_free(buf, bufsize);
+ __os_free(dbp->dbenv, buf);
return (0);
}
/* While there are both keys to compare. */
for (*cmpp = 0, p1 = dbt->data,
key_left = dbt->size; key_left > 0 && pgno != PGNO_INVALID;) {
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &pagep)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
return (ret);
cmp_bytes = OV_LEN(pagep) < key_left ? OV_LEN(pagep) : key_left;
tlen -= cmp_bytes;
key_left -= cmp_bytes;
- for (p2 =
- (u_int8_t *)pagep + P_OVERHEAD; cmp_bytes-- > 0; ++p1, ++p2)
+ for (p2 = (u_int8_t *)pagep + P_OVERHEAD(dbp);
+ cmp_bytes-- > 0; ++p1, ++p2)
if (*p1 != *p2) {
*cmpp = (long)*p1 - (long)*p2;
break;
}
pgno = NEXT_PGNO(pagep);
- if ((ret = memp_fput(dbp->mpf, pagep, 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
return (ret);
if (*cmpp != 0)
return (0);
@@ -440,7 +471,7 @@ __db_vrfy_overflow(dbp, vdp, h, pgno, flags)
pip->refcount = OV_REF(h);
if (pip->refcount < 1) {
EPRINT((dbp->dbenv,
- "Overflow page %lu has zero reference count",
+ "Page %lu: overflow page has zero reference count",
(u_long)pgno));
isbad = 1;
}
@@ -448,7 +479,7 @@ __db_vrfy_overflow(dbp, vdp, h, pgno, flags)
/* Just store for now. */
pip->olen = HOFFSET(h);
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+err: if ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -495,7 +526,7 @@ __db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
if (pip->type != P_OVERFLOW) {
EPRINT((dbp->dbenv,
- "Overflow page %lu of invalid type",
+ "Page %lu: overflow page of invalid type %lu",
(u_long)pgno, (u_long)pip->type));
ret = DB_VERIFY_BAD;
goto err; /* Unsafe to continue. */
@@ -504,7 +535,8 @@ __db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
prev = pip->prev_pgno;
if (prev != PGNO_INVALID) {
EPRINT((dbp->dbenv,
- "First overflow page %lu has a prev_pgno", (u_long)pgno));
+ "Page %lu: first page in overflow chain has a prev_pgno %lu",
+ (u_long)pgno, (u_long)prev));
isbad = 1;
}
@@ -543,7 +575,7 @@ __db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
*/
if ((u_int32_t)p > refcount) {
EPRINT((dbp->dbenv,
- "Page %lu encountered twice in overflow traversal",
+ "Page %lu: encountered twice in overflow traversal",
(u_long)pgno));
ret = DB_VERIFY_BAD;
goto err;
@@ -571,19 +603,20 @@ __db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
if (!IS_VALID_PGNO(next)) {
DB_ASSERT(0);
EPRINT((dbp->dbenv,
- "Overflow page %lu has bad next_pgno",
- (u_long)pgno));
+ "Page %lu: bad next_pgno %lu on overflow page",
+ (u_long)pgno, (u_long)next));
ret = DB_VERIFY_BAD;
goto err;
}
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 ||
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
(ret = __db_vrfy_getpageinfo(vdp, next, &pip)) != 0)
return (ret);
if (pip->prev_pgno != pgno) {
EPRINT((dbp->dbenv,
- "Overflow page %lu has bogus prev_pgno value",
- (u_long)next));
+ "Page %lu: bad prev_pgno %lu on overflow page (should be %lu)",
+ (u_long)next, (u_long)pip->prev_pgno,
+ (u_long)pgno));
isbad = 1;
/*
* It's safe to continue because we have separate
@@ -597,10 +630,11 @@ __db_vrfy_ovfl_structure(dbp, vdp, pgno, tlen, flags)
if (tlen > 0) {
isbad = 1;
EPRINT((dbp->dbenv,
- "Overflow item incomplete on page %lu", (u_long)pgno));
+ "Page %lu: overflow item incomplete", (u_long)pgno));
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -622,13 +656,15 @@ __db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
void **buf;
u_int32_t flags;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
- int ret, err_ret;
+ int ret, t_ret;
u_int32_t bytesgot, bytes;
u_int8_t *src, *dest;
- ret = DB_VERIFY_BAD;
- err_ret = 0;
+ mpf = dbp->mpf;
+ h = NULL;
+ ret = t_ret = 0;
bytesgot = bytes = 0;
while ((pgno != PGNO_INVALID) && (IS_VALID_PGNO(pgno))) {
@@ -639,7 +675,7 @@ __db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
if ((ret = __db_salvage_markdone(vdp, pgno)) != 0)
break;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
break;
/*
@@ -651,14 +687,14 @@ __db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
break;
}
- src = (u_int8_t *)h + P_OVERHEAD;
+ src = (u_int8_t *)h + P_OVERHEAD(dbp);
bytes = OV_LEN(h);
- if (bytes + P_OVERHEAD > dbp->pgsize)
- bytes = dbp->pgsize - P_OVERHEAD;
+ if (bytes + P_OVERHEAD(dbp) > dbp->pgsize)
+ bytes = dbp->pgsize - P_OVERHEAD(dbp);
if ((ret = __os_realloc(dbp->dbenv,
- bytesgot + bytes, 0, buf)) != 0)
+ bytesgot + bytes, buf)) != 0)
break;
dest = (u_int8_t *)*buf + bytesgot;
@@ -667,15 +703,24 @@ __db_safe_goff(dbp, vdp, pgno, dbt, buf, flags)
memcpy(dest, src, bytes);
pgno = NEXT_PGNO(h);
- /* Not much we can do here--we don't want to quit. */
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
- err_ret = ret;
+
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
+ break;
+ h = NULL;
}
- if (ret == 0) {
+ /*
+ * If we're being aggressive, salvage a partial datum if there
+ * was an error somewhere along the way.
+ */
+ if (ret == 0 || LF_ISSET(DB_AGGRESSIVE)) {
dbt->size = bytesgot;
dbt->data = *buf;
}
- return ((err_ret != 0 && ret == 0) ? err_ret : ret);
+ /* If we broke out on error, don't leave pages pinned. */
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
}
diff --git a/bdb/db/db_pr.c b/bdb/db/db_pr.c
index cb977cadfda..235e7187f7c 100644
--- a/bdb/db/db_pr.c
+++ b/bdb/db/db_pr.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_pr.c,v 11.46 2001/01/22 17:25:06 krinsky Exp $";
+static const char revid[] = "$Id: db_pr.c,v 11.84 2002/09/10 02:45:20 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -21,34 +21,24 @@ static const char revid[] = "$Id: db_pr.c,v 11.46 2001/01/22 17:25:06 krinsky Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "db_am.h"
-#include "db_verify.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_verify.h"
static int __db_bmeta __P((DB *, FILE *, BTMETA *, u_int32_t));
static int __db_hmeta __P((DB *, FILE *, HMETA *, u_int32_t));
static void __db_meta __P((DB *, DBMETA *, FILE *, FN const *, u_int32_t));
-static const char *__db_dbtype_to_string __P((DB *));
-static void __db_prdb __P((DB *, FILE *, u_int32_t));
-static FILE *__db_prinit __P((FILE *));
-static void __db_proff __P((void *));
-static int __db_prtree __P((DB *, u_int32_t));
-static void __db_psize __P((DB *));
+static const char *__db_pagetype_to_string __P((u_int32_t));
+static void __db_prdb __P((DB *, FILE *));
+static void __db_proff __P((void *, FILE *));
+static int __db_prtree __P((DB *, FILE *, u_int32_t));
static int __db_qmeta __P((DB *, FILE *, QMETA *, u_int32_t));
/*
- * 64K is the maximum page size, so by default we check for offsets larger
- * than that, and, where possible, we refine the test.
- */
-#define PSIZE_BOUNDARY (64 * 1024 + 1)
-static size_t set_psize = PSIZE_BOUNDARY;
-
-static FILE *set_fp; /* Output file descriptor. */
-
-/*
* __db_loadme --
* A nice place to put a breakpoint.
*
@@ -57,7 +47,9 @@ static FILE *set_fp; /* Output file descriptor. */
void
__db_loadme()
{
- getpid();
+ u_int32_t id;
+
+ __os_id(&id);
}
/*
@@ -71,21 +63,9 @@ __db_dump(dbp, op, name)
DB *dbp;
char *op, *name;
{
- FILE *fp, *save_fp;
+ FILE *fp;
u_int32_t flags;
-
- COMPQUIET(save_fp, NULL);
-
- if (set_psize == PSIZE_BOUNDARY)
- __db_psize(dbp);
-
- if (name != NULL) {
- if ((fp = fopen(name, "w")) == NULL)
- return (__os_get_errno());
- save_fp = set_fp;
- set_fp = fp;
- } else
- fp = __db_prinit(NULL);
+ int ret;
for (flags = 0; *op != '\0'; ++op)
switch (*op) {
@@ -101,60 +81,93 @@ __db_dump(dbp, op, name)
return (EINVAL);
}
- __db_prdb(dbp, fp, flags);
+ if (name == NULL)
+ fp = stdout;
+ else {
+ if ((fp = fopen(name, "w")) == NULL)
+ return (__os_get_errno());
+ }
+
+ __db_prdb(dbp, fp);
fprintf(fp, "%s\n", DB_LINE);
- (void)__db_prtree(dbp, flags);
+ ret = __db_prtree(dbp, fp, flags);
fflush(fp);
-
- if (name != NULL) {
+ if (name != NULL)
fclose(fp);
- set_fp = save_fp;
- }
- return (0);
+
+ return (ret);
}
/*
- * __db_prdb --
- * Print out the DB structure information.
+ * __db_inmemdbflags --
+ * Call a callback for printing or other handling of strings associated
+ * with whatever in-memory DB structure flags are set.
+ *
+ * PUBLIC: void __db_inmemdbflags __P((u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *)));
*/
-static void
-__db_prdb(dbp, fp, flags)
- DB *dbp;
- FILE *fp;
+void
+__db_inmemdbflags(flags, cookie, callback)
u_int32_t flags;
+ void *cookie;
+ void (*callback) __P((u_int32_t, const FN *, void *));
{
static const FN fn[] = {
+ { DB_AM_CHKSUM, "checksumming" },
+ { DB_AM_CL_WRITER, "client replica writer" },
+ { DB_AM_COMPENSATE, "created by compensating transaction" },
+ { DB_AM_CREATED, "database created" },
+ { DB_AM_CREATED_MSTR, "encompassing file created" },
+ { DB_AM_DBM_ERROR, "dbm/ndbm error" },
+ { DB_AM_DELIMITER, "variable length" },
+ { DB_AM_DIRTY, "dirty reads" },
{ DB_AM_DISCARD, "discard cached pages" },
{ DB_AM_DUP, "duplicates" },
+ { DB_AM_DUPSORT, "sorted duplicates" },
+ { DB_AM_ENCRYPT, "encrypted" },
+ { DB_AM_FIXEDLEN, "fixed-length records" },
{ DB_AM_INMEM, "in-memory" },
+ { DB_AM_IN_RENAME, "file is being renamed" },
+ { DB_AM_OPEN_CALLED, "DB->open called" },
+ { DB_AM_PAD, "pad value" },
{ DB_AM_PGDEF, "default page size" },
{ DB_AM_RDONLY, "read-only" },
- { DB_AM_SUBDB, "multiple-databases" },
+ { DB_AM_RECNUM, "Btree record numbers" },
+ { DB_AM_RECOVER, "opened for recovery" },
+ { DB_AM_RENUMBER, "renumber" },
+ { DB_AM_REVSPLITOFF, "no reverse splits" },
+ { DB_AM_SECONDARY, "secondary" },
+ { DB_AM_SNAPSHOT, "load on open" },
+ { DB_AM_SUBDB, "subdatabases" },
{ DB_AM_SWAP, "needswap" },
- { DB_BT_RECNUM, "btree:recnum" },
- { DB_BT_REVSPLIT, "btree:no reverse split" },
- { DB_DBM_ERROR, "dbm/ndbm error" },
- { DB_OPEN_CALLED, "DB->open called" },
- { DB_RE_DELIMITER, "recno:delimiter" },
- { DB_RE_FIXEDLEN, "recno:fixed-length" },
- { DB_RE_PAD, "recno:pad" },
- { DB_RE_RENUMBER, "recno:renumber" },
- { DB_RE_SNAPSHOT, "recno:snapshot" },
+ { DB_AM_TXN, "transactional" },
+ { DB_AM_VERIFYING, "verifier" },
{ 0, NULL }
};
+
+ callback(flags, fn, cookie);
+}
+
+/*
+ * __db_prdb --
+ * Print out the DB structure information.
+ */
+static void
+__db_prdb(dbp, fp)
+ DB *dbp;
+ FILE *fp;
+{
BTREE *bt;
HASH *h;
QUEUE *q;
- COMPQUIET(flags, 0);
-
fprintf(fp,
"In-memory DB structure:\n%s: %#lx",
- __db_dbtype_to_string(dbp), (u_long)dbp->flags);
- __db_prflags(dbp->flags, fn, fp);
+ __db_dbtype_to_string(dbp->type), (u_long)dbp->flags);
+ __db_inmemdbflags(dbp->flags, fp, __db_prflags);
fprintf(fp, "\n");
switch (dbp->type) {
@@ -166,7 +179,7 @@ __db_prdb(dbp, fp, flags)
fprintf(fp, "bt_maxkey: %lu bt_minkey: %lu\n",
(u_long)bt->bt_maxkey, (u_long)bt->bt_minkey);
fprintf(fp, "bt_compare: %#lx bt_prefix: %#lx\n",
- (u_long)bt->bt_compare, (u_long)bt->bt_prefix);
+ P_TO_ULONG(bt->bt_compare), P_TO_ULONG(bt->bt_prefix));
fprintf(fp, "bt_lpgno: %lu\n", (u_long)bt->bt_lpgno);
if (dbp->type == DB_RECNO) {
fprintf(fp,
@@ -183,7 +196,7 @@ __db_prdb(dbp, fp, flags)
fprintf(fp, "meta_pgno: %lu\n", (u_long)h->meta_pgno);
fprintf(fp, "h_ffactor: %lu\n", (u_long)h->h_ffactor);
fprintf(fp, "h_nelem: %lu\n", (u_long)h->h_nelem);
- fprintf(fp, "h_hash: %#lx\n", (u_long)h->h_hash);
+ fprintf(fp, "h_hash: %#lx\n", P_TO_ULONG(h->h_hash));
break;
case DB_QUEUE:
q = dbp->q_internal;
@@ -204,39 +217,34 @@ __db_prdb(dbp, fp, flags)
* Print out the entire tree.
*/
static int
-__db_prtree(dbp, flags)
+__db_prtree(dbp, fp, flags)
DB *dbp;
+ FILE *fp;
u_int32_t flags;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t i, last;
int ret;
- if (set_psize == PSIZE_BOUNDARY)
- __db_psize(dbp);
+ mpf = dbp->mpf;
- if (dbp->type == DB_QUEUE) {
- ret = __db_prqueue(dbp, flags);
- goto done;
- }
-
- /* Find out the page number of the last page in the database. */
- if ((ret = memp_fget(dbp->mpf, &last, DB_MPOOL_LAST, &h)) != 0)
- return (ret);
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
- return (ret);
+ if (dbp->type == DB_QUEUE)
+ return (__db_prqueue(dbp, fp, flags));
- /* Dump each page. */
+ /*
+ * Find out the page number of the last page in the database, then
+ * dump each page.
+ */
+ mpf->last_pgno(mpf, &last);
for (i = 0; i <= last; ++i) {
- if ((ret = memp_fget(dbp->mpf, &i, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &i, 0, &h)) != 0)
return (ret);
- (void)__db_prpage(dbp, h, flags);
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ (void)__db_prpage(dbp, h, fp, flags);
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
return (ret);
}
-done:
- (void)fflush(__db_prinit(NULL));
return (0);
}
@@ -252,13 +260,15 @@ __db_meta(dbp, dbmeta, fp, fn, flags)
FN const *fn;
u_int32_t flags;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
- int cnt;
db_pgno_t pgno;
u_int8_t *p;
- int ret;
+ int cnt, ret;
const char *sep;
+ mpf = dbp->mpf;
+
fprintf(fp, "\tmagic: %#lx\n", (u_long)dbmeta->magic);
fprintf(fp, "\tversion: %lu\n", (u_long)dbmeta->version);
fprintf(fp, "\tpagesize: %lu\n", (u_long)dbmeta->pagesize);
@@ -275,14 +285,14 @@ __db_meta(dbp, dbmeta, fp, fn, flags)
fprintf(fp, "\tfree list: %lu", (u_long)dbmeta->free);
for (pgno = dbmeta->free,
cnt = 0, sep = ", "; pgno != PGNO_INVALID;) {
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
fprintf(fp,
"Unable to retrieve free-list page: %lu: %s\n",
(u_long)pgno, db_strerror(ret));
break;
}
pgno = h->next_pgno;
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
fprintf(fp, "%s%lu", sep, (u_long)pgno);
if (++cnt % 10 == 0) {
fprintf(fp, "\n");
@@ -292,6 +302,7 @@ __db_meta(dbp, dbmeta, fp, fn, flags)
sep = ", ";
}
fprintf(fp, "\n");
+ fprintf(fp, "\tlast_pgno: %lu\n", (u_long)dbmeta->last_pgno);
}
if (fn != NULL) {
@@ -404,26 +415,28 @@ __db_qmeta(dbp, fp, h, flags)
* __db_prnpage
* -- Print out a specific page.
*
- * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t));
+ * PUBLIC: int __db_prnpage __P((DB *, db_pgno_t, FILE *));
*/
int
-__db_prnpage(dbp, pgno)
+__db_prnpage(dbp, pgno, fp)
DB *dbp;
db_pgno_t pgno;
+ FILE *fp;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
- int ret;
+ int ret, t_ret;
- if (set_psize == PSIZE_BOUNDARY)
- __db_psize(dbp);
+ mpf = dbp->mpf;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
- ret = __db_prpage(dbp, h, DB_PR_PAGE);
- (void)fflush(__db_prinit(NULL));
+ ret = __db_prpage(dbp, h, fp, DB_PR_PAGE);
+
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
+ ret = t_ret;
- (void)memp_fput(dbp->mpf, h, 0);
return (ret);
}
@@ -431,32 +444,29 @@ __db_prnpage(dbp, pgno)
* __db_prpage
* -- Print out a page.
*
- * PUBLIC: int __db_prpage __P((DB *, PAGE *, u_int32_t));
+ * PUBLIC: int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t));
*/
int
-__db_prpage(dbp, h, flags)
+__db_prpage(dbp, h, fp, flags)
DB *dbp;
PAGE *h;
+ FILE *fp;
u_int32_t flags;
{
BINTERNAL *bi;
BKEYDATA *bk;
- BTREE *t;
- FILE *fp;
HOFFPAGE a_hkd;
QAMDATA *qp, *qep;
RINTERNAL *ri;
- db_indx_t dlen, len, i;
+ db_indx_t dlen, len, i, *inp;
db_pgno_t pgno;
db_recno_t recno;
+ u_int32_t pagesize, qlen;
+ u_int8_t *ep, *hk, *p;
int deleted, ret;
const char *s;
- u_int32_t qlen;
- u_int8_t *ep, *hk, *p;
void *sp;
- fp = __db_prinit(NULL);
-
/*
* If we're doing recovery testing and this page is P_INVALID,
* assume it's a page that's on the free list, and don't display it.
@@ -471,6 +481,14 @@ __db_prpage(dbp, h, flags)
return (1);
}
+ /*
+ * !!!
+ * Find out the page size. We don't want to do it the "right" way,
+ * by reading the value from the meta-data page, that's going to be
+ * slow. Reach down into the mpool region.
+ */
+ pagesize = (u_int32_t)dbp->mpf->mfp->stat.st_pagesize;
+
/* Page number, page type. */
fprintf(fp, "page %lu: %s level: %lu",
(u_long)h->pgno, s, (u_long)h->level);
@@ -500,7 +518,7 @@ __db_prpage(dbp, h, flags)
qlen = ((QUEUE *)dbp->q_internal)->re_len;
recno = (h->pgno - 1) * QAM_RECNO_PER_PAGE(dbp) + 1;
i = 0;
- qep = (QAMDATA *)((u_int8_t *)h + set_psize - qlen);
+ qep = (QAMDATA *)((u_int8_t *)h + pagesize - qlen);
for (qp = QAM_GET_RECORD(dbp, h, i); qp < qep;
recno++, i++, qp = QAM_GET_RECORD(dbp, h, i)) {
if (!F_ISSET(qp, QAM_SET))
@@ -508,9 +526,9 @@ __db_prpage(dbp, h, flags)
fprintf(fp, "%s",
F_ISSET(qp, QAM_VALID) ? "\t" : " D");
- fprintf(fp, "[%03lu] %4lu ",
- (u_long)recno, (u_long)qp - (u_long)h);
- __db_pr(qp->data, qlen);
+ fprintf(fp, "[%03lu] %4lu ", (u_long)recno,
+ (u_long)((u_int8_t *)qp - (u_int8_t *)h));
+ __db_pr(qp->data, qlen, fp);
}
return (0);
}
@@ -520,8 +538,6 @@ __db_prpage(dbp, h, flags)
fprintf(fp, " (lsn.file: %lu lsn.offset: %lu)\n",
(u_long)LSN(h).file, (u_long)LSN(h).offset);
- t = dbp->bt_internal;
-
s = "\t";
if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
fprintf(fp, "%sprev: %4lu next: %4lu",
@@ -530,7 +546,7 @@ __db_prpage(dbp, h, flags)
}
if (TYPE(h) == P_OVERFLOW) {
fprintf(fp, "%sref cnt: %4lu ", s, (u_long)OV_REF(h));
- __db_pr((u_int8_t *)h + P_OVERHEAD, OV_LEN(h));
+ __db_pr((u_int8_t *)h + P_OVERHEAD(dbp), OV_LEN(h), fp);
return (0);
}
fprintf(fp, "%sentries: %4lu", s, (u_long)NUM_ENT(h));
@@ -540,12 +556,14 @@ __db_prpage(dbp, h, flags)
return (0);
ret = 0;
+ inp = P_INP(dbp, h);
for (i = 0; i < NUM_ENT(h); i++) {
- if (P_ENTRY(h, i) - (u_int8_t *)h < P_OVERHEAD ||
- (size_t)(P_ENTRY(h, i) - (u_int8_t *)h) >= set_psize) {
+ if ((db_alignp_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) <
+ (db_alignp_t)(P_OVERHEAD(dbp)) ||
+ (size_t)(P_ENTRY(dbp, h, i) - (u_int8_t *)h) >= pagesize) {
fprintf(fp,
"ILLEGAL PAGE OFFSET: indx: %lu of %lu\n",
- (u_long)i, (u_long)h->inp[i]);
+ (u_long)i, (u_long)inp[i]);
ret = EINVAL;
continue;
}
@@ -554,17 +572,17 @@ __db_prpage(dbp, h, flags)
case P_HASH:
case P_IBTREE:
case P_IRECNO:
- sp = P_ENTRY(h, i);
+ sp = P_ENTRY(dbp, h, i);
break;
case P_LBTREE:
- sp = P_ENTRY(h, i);
+ sp = P_ENTRY(dbp, h, i);
deleted = i % 2 == 0 &&
- B_DISSET(GET_BKEYDATA(h, i + O_INDX)->type);
+ B_DISSET(GET_BKEYDATA(dbp, h, i + O_INDX)->type);
break;
case P_LDUP:
case P_LRECNO:
- sp = P_ENTRY(h, i);
- deleted = B_DISSET(GET_BKEYDATA(h, i)->type);
+ sp = P_ENTRY(dbp, h, i);
+ deleted = B_DISSET(GET_BKEYDATA(dbp, h, i)->type);
break;
default:
fprintf(fp,
@@ -573,7 +591,7 @@ __db_prpage(dbp, h, flags)
continue;
}
fprintf(fp, "%s", deleted ? " D" : "\t");
- fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)h->inp[i]);
+ fprintf(fp, "[%03lu] %4lu ", (u_long)i, (u_long)inp[i]);
switch (TYPE(h)) {
case P_HASH:
hk = sp;
@@ -592,7 +610,7 @@ __db_prpage(dbp, h, flags)
* set.
*/
if (i != 0)
- len = LEN_HKEYDATA(h, 0, i);
+ len = LEN_HKEYDATA(dbp, h, 0, i);
else
len = 1;
@@ -602,13 +620,14 @@ __db_prpage(dbp, h, flags)
memcpy(&dlen, p, sizeof(db_indx_t));
p += sizeof(db_indx_t);
fprintf(fp, "\t\t");
- __db_pr(p, dlen);
+ __db_pr(p, dlen, fp);
p += sizeof(db_indx_t) + dlen;
}
break;
case H_KEYDATA:
__db_pr(HKEYDATA_DATA(hk),
- LEN_HKEYDATA(h, i == 0 ? set_psize : 0, i));
+ LEN_HKEYDATA(dbp, h, i == 0 ?
+ pagesize : 0, i), fp);
break;
case H_OFFPAGE:
memcpy(&a_hkd, hk, HOFFPAGE_SIZE);
@@ -625,11 +644,11 @@ __db_prpage(dbp, h, flags)
(u_long)bi->type);
switch (B_TYPE(bi->type)) {
case B_KEYDATA:
- __db_pr(bi->data, bi->len);
+ __db_pr(bi->data, bi->len, fp);
break;
case B_DUPLICATE:
case B_OVERFLOW:
- __db_proff(bi->data);
+ __db_proff(bi->data, fp);
break;
default:
fprintf(fp, "ILLEGAL BINTERNAL TYPE: %lu\n",
@@ -649,11 +668,11 @@ __db_prpage(dbp, h, flags)
bk = sp;
switch (B_TYPE(bk->type)) {
case B_KEYDATA:
- __db_pr(bk->data, bk->len);
+ __db_pr(bk->data, bk->len, fp);
break;
case B_DUPLICATE:
case B_OVERFLOW:
- __db_proff(bk);
+ __db_proff(bk, fp);
break;
default:
fprintf(fp,
@@ -673,19 +692,17 @@ __db_prpage(dbp, h, flags)
* __db_pr --
* Print out a data element.
*
- * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t));
+ * PUBLIC: void __db_pr __P((u_int8_t *, u_int32_t, FILE *));
*/
void
-__db_pr(p, len)
+__db_pr(p, len, fp)
u_int8_t *p;
u_int32_t len;
-{
FILE *fp;
+{
u_int lastch;
int i;
- fp = __db_prinit(NULL);
-
fprintf(fp, "len: %3lu", (u_long)len);
lastch = '.';
if (len != 0) {
@@ -744,6 +761,13 @@ __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
handle, callback, vdp, 0);
F_CLR(vdp, SALVAGE_PRINTHEADER);
F_SET(vdp, SALVAGE_PRINTFOOTER);
+
+ /*
+ * Even if the printable flag wasn't set by our immediate
+ * caller, it may be set on a salvage-wide basis.
+ */
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ checkprint = 1;
}
/*
@@ -760,12 +784,12 @@ __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
* in a platform-independent way. So we use the numeral in
* straight ASCII.
*/
- __ua_memcpy(&recno, dbtp->data, sizeof(recno));
+ (void)__ua_memcpy(&recno, dbtp->data, sizeof(recno));
snprintf(buf, DBTBUFLEN, "%lu", (u_long)recno);
/* If we're printing data as hex, print keys as hex too. */
if (!checkprint) {
- for (len = strlen(buf), p = buf, hp = hbuf;
+ for (len = (u_int32_t)strlen(buf), p = buf, hp = hbuf;
len-- > 0; ++p) {
*hp++ = hex[(u_int8_t)(*p & 0xf0) >> 4];
*hp++ = hex[*p & 0x0f];
@@ -810,14 +834,12 @@ __db_prdbt(dbtp, checkprint, prefix, handle, callback, is_recno, vdp)
* Print out an off-page element.
*/
static void
-__db_proff(vp)
+__db_proff(vp, fp)
void *vp;
-{
FILE *fp;
+{
BOVERFLOW *bo;
- fp = __db_prinit(NULL);
-
bo = vp;
switch (B_TYPE(bo->type)) {
case B_OVERFLOW:
@@ -834,18 +856,25 @@ __db_proff(vp)
* __db_prflags --
* Print out flags values.
*
- * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, FILE *));
+ * PUBLIC: void __db_prflags __P((u_int32_t, const FN *, void *));
*/
void
-__db_prflags(flags, fn, fp)
+__db_prflags(flags, fn, vfp)
u_int32_t flags;
FN const *fn;
- FILE *fp;
+ void *vfp;
{
+ FILE *fp;
const FN *fnp;
int found;
const char *sep;
+ /*
+ * We pass the FILE * through a void * so that we can use
+ * this function as as a callback.
+ */
+ fp = (FILE *)vfp;
+
sep = " (";
for (found = 0, fnp = fn; fnp->mask != 0; ++fnp)
if (LF_ISSET(fnp->mask)) {
@@ -858,62 +887,21 @@ __db_prflags(flags, fn, fp)
}
/*
- * __db_prinit --
- * Initialize tree printing routines.
- */
-static FILE *
-__db_prinit(fp)
- FILE *fp;
-{
- if (set_fp == NULL)
- set_fp = fp == NULL ? stdout : fp;
- return (set_fp);
-}
-
-/*
- * __db_psize --
- * Get the page size.
- */
-static void
-__db_psize(dbp)
- DB *dbp;
-{
- DBMETA *mp;
- db_pgno_t pgno;
-
- set_psize = PSIZE_BOUNDARY - 1;
-
- pgno = PGNO_BASE_MD;
- if (memp_fget(dbp->mpf, &pgno, 0, &mp) != 0)
- return;
-
- switch (mp->magic) {
- case DB_BTREEMAGIC:
- case DB_HASHMAGIC:
- case DB_QAMMAGIC:
- set_psize = mp->pagesize;
- break;
- }
- (void)memp_fput(dbp->mpf, mp, 0);
-}
-
-/*
* __db_dbtype_to_string --
* Return the name of the database type.
+ * PUBLIC: const char * __db_dbtype_to_string __P((DBTYPE));
*/
-static const char *
-__db_dbtype_to_string(dbp)
- DB *dbp;
+const char *
+__db_dbtype_to_string(type)
+ DBTYPE type;
{
- switch (dbp->type) {
+ switch (type) {
case DB_BTREE:
return ("btree");
case DB_HASH:
return ("hash");
- break;
case DB_RECNO:
return ("recno");
- break;
case DB_QUEUE:
return ("queue");
default:
@@ -925,10 +913,8 @@ __db_dbtype_to_string(dbp)
/*
* __db_pagetype_to_string --
* Return the name of the specified page type.
- *
- * PUBLIC: const char *__db_pagetype_to_string __P((u_int32_t));
*/
-const char *
+static const char *
__db_pagetype_to_string(type)
u_int32_t type;
{
@@ -1000,6 +986,7 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
DB_ENV *dbenv;
DB_HASH_STAT *hsp;
DB_QUEUE_STAT *qsp;
+ DBT dbt;
VRFY_PAGEINFO *pip;
char *buf;
int buflen, ret, t_ret;
@@ -1021,10 +1008,16 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
* If we've been passed a verifier statistics object, use
* that; we're being called in a context where dbp->stat
* is unsafe.
+ *
+ * Also, the verifier may set the pflag on a per-salvage basis.
+ * If so, respect that.
*/
if (vdp != NULL) {
if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
return (ret);
+
+ if (F_ISSET(vdp, SALVAGE_PRINTABLE))
+ pflag = 1;
} else
pip = NULL;
@@ -1071,16 +1064,22 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
/*
* 64 bytes is long enough, as a minimum bound, for any of the
- * fields besides subname. Subname can be anything, and so
- * 64 + subname is big enough for all the things we need to print here.
+ * fields besides subname. Subname uses __db_prdbt and therefore
+ * does not need buffer space here.
*/
- buflen = 64 + ((subname != NULL) ? strlen(subname) : 0);
- if ((ret = __os_malloc(dbenv, buflen, NULL, &buf)) != 0)
+ buflen = 64;
+ if ((ret = __os_malloc(dbenv, buflen, &buf)) != 0)
goto err;
if (subname != NULL) {
- snprintf(buf, buflen, "database=%s\n", subname);
+ snprintf(buf, buflen, "database=");
if ((ret = callback(handle, buf)) != 0)
goto err;
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = subname;
+ dbt.size = (u_int32_t)strlen(subname);
+ if ((ret = __db_prdbt(&dbt,
+ 1, NULL, handle, callback, 0, NULL)) != 0)
+ goto err;
}
switch (dbtype) {
case DB_BTREE:
@@ -1106,11 +1105,11 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
}
break;
}
- if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
dbp->err(dbp, ret, "DB->stat");
goto err;
}
- if (F_ISSET(dbp, DB_BT_RECNUM))
+ if (F_ISSET(dbp, DB_AM_RECNUM))
if ((ret = callback(handle, "recnum=1\n")) != 0)
goto err;
if (btsp->bt_maxkey != 0) {
@@ -1144,7 +1143,7 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
}
break;
}
- if ((ret = dbp->stat(dbp, &hsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &hsp, 0)) != 0) {
dbp->err(dbp, ret, "DB->stat");
goto err;
}
@@ -1154,10 +1153,9 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
if ((ret = callback(handle, buf)) != 0)
goto err;
}
- if (hsp->hash_nelem != 0 || hsp->hash_nkeys != 0) {
- snprintf(buf, buflen, "h_nelem=%lu\n",
- hsp->hash_nelem > hsp->hash_nkeys ?
- (u_long)hsp->hash_nelem : (u_long)hsp->hash_nkeys);
+ if (hsp->hash_nkeys != 0) {
+ snprintf(buf, buflen,
+ "h_nelem=%lu\n", (u_long)hsp->hash_nkeys);
if ((ret = callback(handle, buf)) != 0)
goto err;
}
@@ -1172,15 +1170,24 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
goto err;
break;
}
- if ((ret = dbp->stat(dbp, &qsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &qsp, 0)) != 0) {
dbp->err(dbp, ret, "DB->stat");
goto err;
}
snprintf(buf, buflen, "re_len=%lu\n", (u_long)qsp->qs_re_len);
- if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ')
- snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad);
if ((ret = callback(handle, buf)) != 0)
goto err;
+ if (qsp->qs_re_pad != 0 && qsp->qs_re_pad != ' ') {
+ snprintf(buf, buflen, "re_pad=%#x\n", qsp->qs_re_pad);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
+ if (qsp->qs_extentsize != 0) {
+ snprintf(buf, buflen,
+ "extentsize=%lu\n", (u_long)qsp->qs_extentsize);
+ if ((ret = callback(handle, buf)) != 0)
+ goto err;
+ }
break;
case DB_RECNO:
if ((ret = callback(handle, "type=recno\n")) != 0)
@@ -1198,14 +1205,14 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
}
break;
}
- if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &btsp, 0)) != 0) {
dbp->err(dbp, ret, "DB->stat");
goto err;
}
- if (F_ISSET(dbp, DB_RE_RENUMBER))
+ if (F_ISSET(dbp, DB_AM_RENUMBER))
if ((ret = callback(handle, "renumber=1\n")) != 0)
goto err;
- if (F_ISSET(dbp, DB_RE_FIXEDLEN)) {
+ if (F_ISSET(dbp, DB_AM_FIXEDLEN)) {
snprintf(buf, buflen,
"re_len=%lu\n", (u_long)btsp->bt_re_len);
if ((ret = callback(handle, buf)) != 0)
@@ -1233,6 +1240,9 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
goto err;
/* We should handle page size. XXX */
} else {
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ if ((ret = callback(handle, "chksum=1\n")) != 0)
+ goto err;
if (F_ISSET(dbp, DB_AM_DUP))
if ((ret = callback(handle, "duplicates=1\n")) != 0)
goto err;
@@ -1253,16 +1263,16 @@ __db_prheader(dbp, subname, pflag, keyflag, handle, callback, vdp, meta_pgno)
ret = callback(handle, "HEADER=END\n");
err: if (pip != NULL &&
- (t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ (t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
if (btsp != NULL)
- __os_free(btsp, 0);
+ __os_ufree(dbenv, btsp);
if (hsp != NULL)
- __os_free(hsp, 0);
+ __os_ufree(dbenv, hsp);
if (qsp != NULL)
- __os_free(qsp, 0);
+ __os_ufree(dbenv, qsp);
if (buf != NULL)
- __os_free(buf, buflen);
+ __os_free(dbenv, buf);
return (ret);
}
diff --git a/bdb/db/db_rec.c b/bdb/db/db_rec.c
index 998d074290d..303ab2fe1d4 100644
--- a/bdb/db/db_rec.c
+++ b/bdb/db/db_rec.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_rec.c,v 11.10 2000/08/03 15:32:19 ubell Exp $";
+static const char revid[] = "$Id: db_rec.c,v 11.35 2002/08/08 03:57:49 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,9 +18,9 @@ static const char revid[] = "$Id: db_rec.c,v 11.10 2000/08/03 15:32:19 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "log.h"
-#include "hash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
/*
* PUBLIC: int __db_addrem_recover
@@ -45,11 +45,12 @@ __db_addrem_recover(dbenv, dbtp, lsnp, op, info)
u_int32_t change;
int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__db_addrem_print);
REC_INTRO(__db_addrem_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -59,7 +60,7 @@ __db_addrem_recover(dbenv, dbtp, lsnp, op, info)
*/
goto done;
} else
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -95,13 +96,16 @@ __db_addrem_recover(dbenv, dbtp, lsnp, op, info)
LSN(pagep) = argp->pagelsn;
}
- if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -124,11 +128,12 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
u_int32_t change;
int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__db_big_print);
REC_INTRO(__db_big_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -139,7 +144,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
ret = 0;
goto ppage;
} else
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -161,7 +166,7 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
argp->next_pgno, 0, P_OVERFLOW);
OV_LEN(pagep) = argp->dbt.size;
OV_REF(pagep) = 1;
- memcpy((u_int8_t *)pagep + P_OVERHEAD, argp->dbt.data,
+ memcpy((u_int8_t *)pagep + P_OVERHEAD(file_dbp), argp->dbt.data,
argp->dbt.size);
PREV_PGNO(pagep) = argp->prev_pgno;
change = DB_MPOOL_DIRTY;
@@ -177,13 +182,21 @@ __db_big_recover(dbenv, dbtp, lsnp, op, info)
if (change)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
- if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
goto out;
+ pagep = NULL;
+
+ /*
+ * We only delete a whole chain of overflow.
+ * Each page is handled individually
+ */
+ if (argp->opcode == DB_REM_BIG)
+ goto done;
/* Now check the previous page. */
ppage: if (argp->prev_pgno != PGNO_INVALID) {
change = 0;
- if ((ret = memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist.
@@ -195,7 +208,7 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) {
ret = 0;
goto npage;
} else
- if ((ret = memp_fget(mpf, &argp->prev_pgno,
+ if ((ret = mpf->get(mpf, &argp->prev_pgno,
DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -204,28 +217,27 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) {
cmp_p = log_compare(&LSN(pagep), &argp->prevlsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->prevlsn);
- if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) ||
- (cmp_n == 0 && DB_UNDO(op) && argp->opcode == DB_REM_BIG)) {
+ if (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_ADD_BIG) {
/* Redo add, undo delete. */
NEXT_PGNO(pagep) = argp->pgno;
change = DB_MPOOL_DIRTY;
- } else if ((cmp_n == 0 &&
- DB_UNDO(op) && argp->opcode == DB_ADD_BIG) ||
- (cmp_p == 0 && DB_REDO(op) && argp->opcode == DB_REM_BIG)) {
+ } else if (cmp_n == 0 &&
+ DB_UNDO(op) && argp->opcode == DB_ADD_BIG) {
/* Redo delete, undo add. */
NEXT_PGNO(pagep) = argp->next_pgno;
change = DB_MPOOL_DIRTY;
}
if (change)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
- if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
goto out;
}
+ pagep = NULL;
/* Now check the next page. Can only be set on a delete. */
npage: if (argp->next_pgno != PGNO_INVALID) {
change = 0;
- if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist.
@@ -235,7 +247,7 @@ npage: if (argp->next_pgno != PGNO_INVALID) {
*/
goto done;
} else
- if ((ret = memp_fget(mpf, &argp->next_pgno,
+ if ((ret = mpf->get(mpf, &argp->next_pgno,
DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -252,21 +264,25 @@ npage: if (argp->next_pgno != PGNO_INVALID) {
}
if (change)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
- if ((ret = memp_fput(mpf, pagep, change)) != 0)
+ if ((ret = mpf->put(mpf, pagep, change)) != 0)
goto out;
}
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
* __db_ovref_recover --
* Recovery function for __db_ovref().
*
- * PUBLIC: int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ * PUBLIC: int __db_ovref_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
*/
int
__db_ovref_recover(dbenv, dbtp, lsnp, op, info)
@@ -283,14 +299,15 @@ __db_ovref_recover(dbenv, dbtp, lsnp, op, info)
PAGE *pagep;
int cmp, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__db_ovref_print);
REC_INTRO(__db_ovref_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op))
goto done;
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
@@ -310,13 +327,16 @@ __db_ovref_recover(dbenv, dbtp, lsnp, op, info)
pagep->lsn = argp->lsn;
modified = 1;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -341,6 +361,7 @@ __db_relink_recover(dbenv, dbtp, lsnp, op, info)
PAGE *pagep;
int cmp_n, cmp_p, modified, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__db_relink_print);
REC_INTRO(__db_relink_read, 1);
@@ -351,9 +372,9 @@ __db_relink_recover(dbenv, dbtp, lsnp, op, info)
* the current page is the result of a split and is being recovered
* elsewhere, so all we need do is recover the next page.
*/
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_REDO(op)) {
- (void)__db_pgerr(file_dbp, argp->pgno);
+ __db_pgerr(file_dbp, argp->pgno, ret);
goto out;
}
goto next2;
@@ -376,12 +397,13 @@ __db_relink_recover(dbenv, dbtp, lsnp, op, info)
pagep->lsn = argp->lsn;
modified = 1;
}
-next1: if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+next1: if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
-next2: if ((ret = memp_fget(mpf, &argp->next, 0, &pagep)) != 0) {
+next2: if ((ret = mpf->get(mpf, &argp->next, 0, &pagep)) != 0) {
if (DB_REDO(op)) {
- (void)__db_pgerr(file_dbp, argp->next);
+ __db_pgerr(file_dbp, argp->next, ret);
goto out;
}
goto prev;
@@ -409,14 +431,15 @@ next2: if ((ret = memp_fget(mpf, &argp->next, 0, &pagep)) != 0) {
else
pagep->lsn = *lsnp;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
if (argp->opcode == DB_ADD_PAGE)
goto done;
-prev: if ((ret = memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) {
+prev: if ((ret = mpf->get(mpf, &argp->prev, 0, &pagep)) != 0) {
if (DB_REDO(op)) {
- (void)__db_pgerr(file_dbp, argp->prev);
+ __db_pgerr(file_dbp, argp->prev, ret);
goto out;
}
goto done;
@@ -441,13 +464,16 @@ prev: if ((ret = memp_fget(mpf, &argp->prev, 0, &pagep)) != 0) {
else
pagep->lsn = *lsnp;
}
- if ((ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
}
/*
@@ -468,8 +494,8 @@ __db_debug_recover(dbenv, dbtp, lsnp, op, info)
__db_debug_args *argp;
int ret;
- COMPQUIET(op, 0);
COMPQUIET(dbenv, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
COMPQUIET(info, NULL);
REC_PRINT(__db_debug_print);
@@ -504,11 +530,12 @@ __db_noop_recover(dbenv, dbtp, lsnp, op, info)
u_int32_t change;
int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
REC_PRINT(__db_noop_print);
REC_INTRO(__db_noop_read, 0);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
goto out;
cmp_n = log_compare(lsnp, &LSN(pagep));
@@ -522,8 +549,349 @@ __db_noop_recover(dbenv, dbtp, lsnp, op, info)
LSN(pagep) = argp->prevlsn;
change = DB_MPOOL_DIRTY;
}
- ret = memp_fput(mpf, pagep, change);
+ ret = mpf->put(mpf, pagep, change);
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
-out: REC_CLOSE;
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_alloc_recover --
+ * Recovery function for pg_alloc.
+ *
+ * PUBLIC: int __db_pg_alloc_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_alloc_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_alloc_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, created, level, modified, ret;
+
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_alloc_print);
+ REC_INTRO(__db_pg_alloc_read, 0);
+
+ /*
+ * Fix up the allocated page. If we're redoing the operation, we have
+ * to get the page (creating it if it doesn't exist), and update its
+ * LSN. If we're undoing the operation, we have to reset the page's
+ * LSN and put it on the free list.
+ *
+ * Fix up the metadata page. If we're redoing the operation, we have
+ * to get the metadata page and update its LSN and its free pointer.
+ * If we're undoing the operation and the page was ever created, we put
+ * it on the freelist.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist on redo. */
+ if (DB_REDO(op)) {
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ } else
+ goto done;
+ }
+ created = modified = 0;
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ /*
+ * We have to be able to identify if a page was newly
+ * created so we can recover it properly. We cannot simply
+ * look for an empty header, because hash uses a pgin
+ * function that will set the header. Instead, we explicitly
+ * try for the page without CREATE and if that fails, then
+ * create it.
+ */
+ if ((ret =
+ mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(file_dbp, argp->pgno, ret);
+ goto out;
+ }
+ created = modified = 1;
+ }
+
+ /* Fix up the allocated page. */
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &argp->page_lsn);
+
+ /*
+ * If an inital allocation is aborted and then reallocated
+ * during an archival restore the log record will have
+ * an LSN for the page but the page will be empty.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)))
+ cmp_p = 0;
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->page_lsn);
+ /*
+ * If we we rolled back this allocation previously during an
+ * archive restore, the page may have the LSN of the meta page
+ * at the point of the roll back. This will be no more
+ * than the LSN of the metadata page at the time of this allocation.
+ * Another special case we have to handle is if we ended up with a
+ * page of all 0's which can happen if we abort between allocating a
+ * page in mpool and initializing it. In that case, even if we're
+ * undoing, we need to re-initialize the page.
+ */
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(argp->page_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ switch (argp->ptype) {
+ case P_LBTREE:
+ case P_LRECNO:
+ case P_LDUP:
+ level = LEAFLEVEL;
+ break;
+ default:
+ level = 0;
+ break;
+ }
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, PGNO_INVALID, level, argp->ptype);
+
+ pagep->lsn = *lsnp;
+ modified = 1;
+ } else if (DB_UNDO(op) && (cmp_n == 0 || created)) {
+ /*
+ * This is where we handle the case of a 0'd page (pagep->pgno
+ * is equal to PGNO_INVALID).
+ * Undo the allocation, reinitialize the page and
+ * link its next pointer to the free list.
+ */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+
+ pagep->lsn = argp->page_lsn;
+ modified = 1;
+ }
+
+ /*
+ * If the page was newly created, put it on the limbo list.
+ */
+ if (IS_ZERO_LSN(LSN(pagep)) &&
+ IS_ZERO_LSN(argp->page_lsn) && DB_UNDO(op)) {
+ /* Put the page in limbo.*/
+ if ((ret = __db_add_limbo(dbenv,
+ info, argp->fileid, argp->pgno, 1)) != 0)
+ goto out;
+ }
+
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /* Fix up the metadata page. */
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ LSN(meta) = *lsnp;
+ meta->free = argp->next;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ LSN(meta) = argp->meta_lsn;
+
+ /*
+ * If the page has a zero LSN then its newly created
+ * and will go into limbo rather than directly on the
+ * free list.
+ */
+ if (!IS_ZERO_LSN(argp->page_lsn))
+ meta->free = argp->pgno;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+ /*
+ * This could be the metapage from a subdb which is read from disk
+ * to recover its creation.
+ */
+ if (F_ISSET(file_dbp, DB_AM_SUBDB))
+ switch (argp->type) {
+ case P_BTREEMETA:
+ case P_HASHMETA:
+ case P_QAMMETA:
+ file_dbp->sync(file_dbp, 0);
+ break;
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
+}
+
+/*
+ * __db_pg_free_recover --
+ * Recovery function for pg_free.
+ *
+ * PUBLIC: int __db_pg_free_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_pg_free_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_pg_free_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DBMETA *meta;
+ DB_LSN copy_lsn;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ db_pgno_t pgno;
+ int cmp_n, cmp_p, modified, ret;
+
+ COMPQUIET(info, NULL);
+ meta = NULL;
+ pagep = NULL;
+ REC_PRINT(__db_pg_free_print);
+ REC_INTRO(__db_pg_free_read, 1);
+
+ /*
+ * Fix up the freed page. If we're redoing the operation we get the
+ * page and explicitly discard its contents, then update its LSN. If
+ * we're undoing the operation, we get the page and restore its header.
+ * Create the page if necessary, we may be freeing an aborted
+ * create.
+ */
+ if ((ret = mpf->get(mpf, &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ modified = 0;
+ (void)__ua_memcpy(&copy_lsn, &LSN(argp->header.data), sizeof(DB_LSN));
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+ cmp_p = log_compare(&LSN(pagep), &copy_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(pagep), &copy_lsn);
+ if (DB_REDO(op) &&
+ (cmp_p == 0 ||
+ (IS_ZERO_LSN(copy_lsn) &&
+ log_compare(&LSN(pagep), &argp->meta_lsn) <= 0))) {
+ /* Need to redo update described. */
+ P_INIT(pagep, file_dbp->pgsize,
+ argp->pgno, PGNO_INVALID, argp->next, 0, P_INVALID);
+ pagep->lsn = *lsnp;
+
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo update described. */
+ memcpy(pagep, argp->header.data, argp->header.size);
+
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ pagep = NULL;
+
+ /*
+ * Fix up the metadata page. If we're redoing or undoing the operation
+ * we get the page and update its LSN and free pointer.
+ */
+ pgno = PGNO_BASE_MD;
+ if ((ret = mpf->get(mpf, &pgno, 0, &meta)) != 0) {
+ /* The metadata page must always exist. */
+ __db_pgerr(file_dbp, pgno, ret);
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->meta_lsn);
+ CHECK_LSN(op, cmp_p, &LSN(meta), &argp->meta_lsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo the deallocation. */
+ meta->free = argp->pgno;
+ LSN(meta) = *lsnp;
+ modified = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Need to undo the deallocation. */
+ meta->free = argp->next;
+ LSN(meta) = argp->meta_lsn;
+ modified = 1;
+ }
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto out;
+ meta = NULL;
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ REC_CLOSE;
+}
+
+/*
+ * __db_cksum_recover --
+ * Recovery function for checksum failure log record.
+ *
+ * PUBLIC: int __db_cksum_recover __P((DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__db_cksum_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __db_cksum_args *argp;
+
+ int ret;
+
+ COMPQUIET(info, NULL);
+ COMPQUIET(lsnp, NULL);
+ COMPQUIET(op, DB_TXN_ABORT);
+
+ REC_PRINT(__db_cksum_print);
+
+ if ((ret = __db_cksum_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ /*
+ * We had a checksum failure -- the only option is to run catastrophic
+ * recovery.
+ */
+ if (F_ISSET(dbenv, DB_ENV_FATAL))
+ ret = 0;
+ else {
+ __db_err(dbenv,
+ "Checksum failure requires catastrophic recovery");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+
+ __os_free(dbenv, argp);
+ return (ret);
}
diff --git a/bdb/db/db_reclaim.c b/bdb/db/db_reclaim.c
index 739f348407d..9aa39bcfa9b 100644
--- a/bdb/db/db_reclaim.c
+++ b/bdb/db/db_reclaim.c
@@ -1,74 +1,26 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_reclaim.c,v 11.5 2000/04/07 14:26:58 bostic Exp $";
+static const char revid[] = "$Id: db_reclaim.c,v 11.28 2002/08/06 06:11:17 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-
-/*
- * Assume that we enter with a valid pgno. We traverse a set of
- * duplicate pages. The format of the callback routine is:
- * callback(dbp, page, cookie, did_put). did_put is an output
- * value that will be set to 1 by the callback routine if it
- * already put the page back. Otherwise, this routine must
- * put the page.
- *
- * PUBLIC: int __db_traverse_dup __P((DB *,
- * PUBLIC: db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
- */
-int
-__db_traverse_dup(dbp, pgno, callback, cookie)
- DB *dbp;
- db_pgno_t pgno;
- int (*callback) __P((DB *, PAGE *, void *, int *));
- void *cookie;
-{
- PAGE *p;
- int did_put, i, opgno, ret;
-
- do {
- did_put = 0;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &p)) != 0)
- return (ret);
- pgno = NEXT_PGNO(p);
-
- for (i = 0; i < NUM_ENT(p); i++) {
- if (B_TYPE(GET_BKEYDATA(p, i)->type) == B_OVERFLOW) {
- opgno = GET_BOVERFLOW(p, i)->pgno;
- if ((ret = __db_traverse_big(dbp,
- opgno, callback, cookie)) != 0)
- goto err;
- }
- }
-
- if ((ret = callback(dbp, p, cookie, &did_put)) != 0)
- goto err;
-
- if (!did_put)
- if ((ret = memp_fput(dbp->mpf, p, 0)) != 0)
- return (ret);
- } while (pgno != PGNO_INVALID);
-
- if (0) {
-err: if (did_put == 0)
- (void)memp_fput(dbp->mpf, p, 0);
- }
- return (ret);
-}
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
/*
* __db_traverse_big
@@ -88,17 +40,20 @@ __db_traverse_big(dbp, pgno, callback, cookie)
int (*callback) __P((DB *, PAGE *, void *, int *));
void *cookie;
{
+ DB_MPOOLFILE *mpf;
PAGE *p;
int did_put, ret;
+ mpf = dbp->mpf;
+
do {
did_put = 0;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &p)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &p)) != 0)
return (ret);
pgno = NEXT_PGNO(p);
if ((ret = callback(dbp, p, cookie, &did_put)) == 0 &&
!did_put)
- ret = memp_fput(dbp->mpf, p, 0);
+ ret = mpf->put(mpf, p, 0);
} while (ret == 0 && pgno != PGNO_INVALID);
return (ret);
@@ -132,3 +87,162 @@ __db_reclaim_callback(dbp, p, cookie, putp)
return (0);
}
+
+/*
+ * __db_truncate_callback
+ * This is the callback routine used during a truncate.
+ * we are traversing a btree or hash table and trying to free all the
+ * pages.
+ *
+ * PUBLIC: int __db_truncate_callback __P((DB *, PAGE *, void *, int *));
+ */
+int
+__db_truncate_callback(dbp, p, cookie, putp)
+ DB *dbp;
+ PAGE *p;
+ void *cookie;
+ int *putp;
+{
+ DBMETA *meta;
+ DBT ldbt;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ db_indx_t indx, len, off, tlen, top;
+ db_pgno_t pgno;
+ db_trunc_param *param;
+ u_int8_t *hk, type;
+ int ret;
+
+ top = NUM_ENT(p);
+ mpf = dbp->mpf;
+ param = cookie;
+ *putp = 1;
+
+ switch (TYPE(p)) {
+ case P_LBTREE:
+ /* Skip for off-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ type = GET_BKEYDATA(dbp, p, indx + O_INDX)->type;
+ if (!B_DISSET(type) && B_TYPE(type) != B_DUPLICATE)
+ ++param->count;
+ }
+ /* FALLTHROUGH */
+ case P_IBTREE:
+ case P_IRECNO:
+ case P_INVALID:
+ if (dbp->type != DB_HASH &&
+ ((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = dbp->type == DB_RECNO ? P_LRECNO : P_LBTREE;
+ goto reinit;
+ }
+ break;
+ case P_OVERFLOW:
+ if (DBC_LOGGING(param->dbc)) {
+ if ((ret = __db_ovref_log(dbp, param->dbc->txn,
+ &LSN(p), 0, p->pgno, -1, &LSN(p))) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+ if (--OV_REF(p) != 0)
+ *putp = 0;
+ break;
+ case P_LRECNO:
+ param->count += top;
+ if (((BTREE *)dbp->bt_internal)->bt_root == PGNO(p)) {
+ type = P_LRECNO;
+ goto reinit;
+ }
+ break;
+ case P_LDUP:
+ /* Correct for deleted items. */
+ for (indx = 0; indx < top; indx += O_INDX)
+ if (!B_DISSET(GET_BKEYDATA(dbp, p, indx)->type))
+ ++param->count;
+
+ break;
+ case P_HASH:
+ /* Correct for on-page duplicates and deleted items. */
+ for (indx = 0; indx < top; indx += P_INDX) {
+ switch (*H_PAIRDATA(dbp, p, indx)) {
+ case H_OFFDUP:
+ case H_OFFPAGE:
+ break;
+ case H_KEYDATA:
+ ++param->count;
+ break;
+ case H_DUPLICATE:
+ tlen = LEN_HDATA(dbp, p, 0, indx);
+ hk = H_PAIRDATA(dbp, p, indx);
+ for (off = 0; off < tlen;
+ off += len + 2 * sizeof (db_indx_t)) {
+ ++param->count;
+ memcpy(&len,
+ HKEYDATA_DATA(hk)
+ + off, sizeof(db_indx_t));
+ }
+ }
+ }
+ /* Don't free the head of the bucket. */
+ if (PREV_PGNO(p) == PGNO_INVALID) {
+ type = P_HASH;
+
+reinit: *putp = 0;
+ if (DBC_LOGGING(param->dbc)) {
+ pgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(param->dbc, LCK_ALWAYS,
+ pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf,
+ &pgno, 0, (PAGE **)&meta)) != 0) {
+ goto err;
+ }
+ memset(&ldbt, 0, sizeof(ldbt));
+ ldbt.data = p;
+ ldbt.size = P_OVERHEAD(dbp);
+ if ((ret = __db_pg_free_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ p->pgno, &LSN(meta),
+ PGNO_BASE_MD, &ldbt, meta->free)) != 0)
+ goto err;
+ LSN(p) = LSN(meta);
+
+ if ((ret =
+ __db_pg_alloc_log(dbp,
+ param->dbc->txn, &LSN(meta), 0,
+ &LSN(meta), PGNO_BASE_MD,
+ &p->lsn, p->pgno, type, meta->free)) != 0) {
+err: (void)mpf->put(mpf, (PAGE *)meta, 0);
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ LSN(p) = LSN(meta);
+
+ if ((ret = mpf->put(mpf,
+ (PAGE *)meta, DB_MPOOL_DIRTY)) != 0) {
+ (void)__TLPUT(param->dbc, metalock);
+ return (ret);
+ }
+ if ((ret = __TLPUT(param->dbc, metalock)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(p));
+
+ P_INIT(p, dbp->pgsize, PGNO(p), PGNO_INVALID,
+ PGNO_INVALID, type == P_HASH ? 0 : 1, type);
+ }
+ break;
+ default:
+ return (__db_pgfmt(dbp->dbenv, p->pgno));
+ }
+
+ if (*putp == 1) {
+ if ((ret = __db_free(param->dbc, p)) != 0)
+ return (ret);
+ } else {
+ if ((ret = mpf->put(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ return (ret);
+ *putp = 1;
+ }
+
+ return (0);
+}
diff --git a/bdb/db/db_remove.c b/bdb/db/db_remove.c
new file mode 100644
index 00000000000..ef11c342555
--- /dev/null
+++ b/bdb/db/db_remove.c
@@ -0,0 +1,318 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_remove.c,v 11.203 2002/08/19 18:34:18 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+static int __db_subdb_remove __P((DB *, DB_TXN *, const char *, const char *));
+static int __db_dbtxn_remove __P((DB *, DB_TXN *, const char *));
+
+/*
+ * __dbenv_dbremove
+ * Remove method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbremove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbremove(dbenv, txn, name, subdb, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbremove");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_remove_i(dbp, txn, name, subdb);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove
+ * Remove method for DB.
+ *
+ * PUBLIC: int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+ */
+int
+__db_remove(dbp, name, subdb, flags)
+ DB *dbp;
+ const char *name, *subdb;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->remove", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->remove", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Remove the file. */
+ ret = __db_remove_i(dbp, NULL, name, subdb);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_remove_i
+ * Internal remove method for DB.
+ *
+ * PUBLIC: int __db_remove_i __P((DB *, DB_TXN *, const char *, const char *));
+ */
+int
+__db_remove_i(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ /* Handle subdatabase removes separately. */
+ if (subdb != NULL)
+ return (__db_subdb_remove(dbp, txn, name, subdb));
+
+ /* Handle transactional file removes separately. */
+ if (txn != NULL)
+ return (__db_dbtxn_remove(dbp, txn, name));
+
+ /*
+ * The remaining case is a non-transactional file remove.
+ *
+ * Find the real name of the file.
+ */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ return (ret);
+
+ if ((ret = __fop_remove_setup(dbp, NULL, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, NULL, name, subdb, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, NULL, dbp->fileid, name, DB_APP_DATA);
+
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_remove --
+ * Remove a subdatabase.
+ */
+static int
+__db_subdb_remove(dbp, txn, name, subdb)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb;
+{
+ DB *mdbp, *sdbp;
+ int ret, t_ret;
+
+ mdbp = sdbp = NULL;
+
+ /* Open the subdatabase. */
+ if ((ret = db_create(&sdbp, dbp->dbenv, 0)) != 0)
+ goto err;
+ if ((ret = __db_open(sdbp,
+ txn, name, subdb, DB_UNKNOWN, DB_WRITEOPEN, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_PREDESTROY, ret, name);
+
+ /* Free up the pages in the subdatabase. */
+ switch (sdbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_reclaim(sdbp, txn)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ sdbp->dbenv, "__db_subdb_remove", sdbp->type);
+ goto err;
+ }
+
+ /*
+ * Remove the entry from the main database and free the subdatabase
+ * metadata page.
+ */
+ if ((ret = __db_master_open(sdbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(
+ mdbp, sdbp, txn, subdb, sdbp->type, MU_REMOVE, NULL, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(sdbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Close the main and subdatabases. */
+ if ((t_ret = __db_close_i(sdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static int
+__db_dbtxn_remove(dbp, txn, name)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN newlsn;
+ int ret;
+ char *tmpname;
+
+ dbenv = dbp->dbenv;
+ tmpname = NULL;
+
+ /*
+ * This is a transactional rename, so we have to keep the name
+ * of the file locked until the transaction commits. As a result,
+ * we implement remove by renaming the file to some other name
+ * (which creates a dummy named file as a placeholder for the
+ * file being rename/dremoved) and then deleting that file as
+ * a delayed remove at commit.
+ */
+ if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ return (ret);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if ((ret = __db_rename_i(dbp, txn, name, NULL, tmpname)) != 0)
+ goto err;
+
+ /* The internal removes will also translate into delayed removes. */
+ if (dbp->db_am_remove != NULL &&
+ (ret = dbp->db_am_remove(dbp, txn, tmpname, NULL, &newlsn)) != 0)
+ goto err;
+
+ ret = __fop_remove(dbenv, txn, dbp->fileid, tmpname, DB_APP_DATA);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (tmpname != NULL)
+ __os_free(dbenv, tmpname);
+
+ return (ret);
+}
diff --git a/bdb/db/db_rename.c b/bdb/db/db_rename.c
new file mode 100644
index 00000000000..87f88232cda
--- /dev/null
+++ b/bdb/db/db_rename.c
@@ -0,0 +1,297 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_rename.c,v 11.203 2002/08/07 16:16:47 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+
+static int __db_subdb_rename __P(( DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+/*
+ * __dbenv_dbrename
+ * Rename method for DB_ENV.
+ *
+ * PUBLIC: int __dbenv_dbrename __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__dbenv_dbrename(dbenv, txn, name, subdb, newname, flags)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret, t_ret, txn_local;
+
+ txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "DB_ENV->dbrename");
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ ret = __db_rename_i(dbp, txn, name, subdb, newname);
+
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+
+ /*
+ * We created the DBP here and when we committed/aborted,
+ * we release all the tranasctional locks, which includes
+ * the handle lock; mark the handle cleared explicitly.
+ */
+ LOCK_INIT(dbp->handle_lock);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+
+ /*
+ * We never opened this dbp for real, so don't call the transactional
+ * version of DB->close, and use NOSYNC to avoid calling into mpool.
+ */
+ if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename
+ * Rename method for DB.
+ *
+ * PUBLIC: int __db_rename __P((DB *,
+ * PUBLIC: const char *, const char *, const char *, u_int32_t));
+ */
+int
+__db_rename(dbp, name, subdb, newname, flags)
+ DB *dbp;
+ const char *name, *subdb, *newname;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * Validate arguments, continuing to destroy the handle on failure.
+ *
+ * Cannot use DB_ILLEGAL_AFTER_OPEN directly because it returns.
+ *
+ * !!!
+ * We have a serious problem if we're here with a handle used to open
+ * a database -- we'll destroy the handle, and the application won't
+ * ever be able to close the database.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED)) {
+ ret = __db_mi_open(dbenv, "DB->rename", 1);
+ goto err;
+ }
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB->rename", flags, 0)) != 0)
+ goto err;
+
+ /* Check for consistent transaction usage. */
+ if ((ret = __db_check_txn(dbp, NULL, DB_LOCK_INVALIDID, 0)) != 0)
+ goto err;
+
+ /* Rename the file. */
+ ret = __db_rename_i(dbp, NULL, name, subdb, newname);
+
+ /*
+ * We never opened this dbp for real, use NOSYNC to avoid calling into
+ * mpool.
+ */
+err: if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __db_rename_i
+ * Internal rename method for DB.
+ *
+ * PUBLIC: int __db_rename_i __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, const char *));
+ */
+int
+__db_rename_i(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *real_name;
+
+ dbenv = dbp->dbenv;
+ real_name = NULL;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, name);
+
+ if (subdb != NULL) {
+ ret = __db_subdb_rename(dbp, txn, name, subdb, newname);
+ goto err;
+ }
+
+ /* From here on down, this pertains to files. */
+
+ /* Find the real name of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if ((ret = __fop_remove_setup(dbp, txn, real_name, 0)) != 0)
+ goto err;
+
+ if (dbp->db_am_rename != NULL &&
+ (ret = dbp->db_am_rename(dbp, txn, name, subdb, newname)) != 0)
+ goto err;
+
+ /*
+ * The transactional case and non-transactional case are
+ * quite different. In the non-transactional case, we simply
+ * do the rename. In the transactional case, since we need
+ * the ability to back out and maintain locking, we have to
+ * create a temporary object as a placeholder. This is all
+ * taken care of in the fop layer.
+ */
+ if (txn != NULL) {
+ if ((ret = __fop_dummy(dbp, txn, name, newname, 0)) != 0)
+ goto err;
+ } else {
+ if ((ret = __fop_dbrename(dbp, name, newname)) != 0)
+ goto err;
+ }
+
+ /*
+ * I am pretty sure that we haven't gotten a dbreg id, so calling
+ * dbreg_filelist_update is not necessary.
+ */
+ DB_ASSERT(dbp->log_filename == NULL ||
+ dbp->log_filename->id == DB_LOGFILEID_INVALID);
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, newname);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ return (ret);
+}
+
+/*
+ * __db_subdb_rename --
+ * Rename a subdatabase.
+ */
+static int
+__db_subdb_rename(dbp, txn, name, subdb, newname)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name, *subdb, *newname;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ PAGE *meta;
+ int ret, t_ret;
+
+ mdbp = NULL;
+ meta = NULL;
+ dbenv = dbp->dbenv;
+
+ /*
+ * We have not opened this dbp so it isn't marked as a subdb,
+ * but it ought to be.
+ */
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /*
+ * Rename the entry in the main database. We need to first
+ * get the meta-data page number (via MU_OPEN) so that we can
+ * read the meta-data page and obtain a handle lock. Once we've
+ * done that, we can proceed to do the rename in the master.
+ */
+ if ((ret = __db_master_open(dbp, txn, name, 0, 0, &mdbp)) != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn, subdb, dbp->type,
+ MU_OPEN, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = mdbp->mpf->get(mdbp->mpf, &dbp->meta_pgno, 0, &meta)) != 0)
+ goto err;
+ memcpy(&dbp->fileid, ((DBMETA *)meta)->uid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, mdbp->lid, DB_LOCK_WRITE, NULL, 0)) != 0)
+ goto err;
+
+ ret = mdbp->mpf->put(mdbp->mpf, meta, 0);
+ meta = NULL;
+ if (ret != 0)
+ goto err;
+
+ if ((ret = __db_master_update(mdbp, dbp, txn,
+ subdb, dbp->type, MU_RENAME, newname, 0)) != 0)
+ goto err;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, name);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ if (meta != NULL &&
+ (t_ret = mdbp->mpf->put(mdbp->mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (mdbp != NULL &&
+ (t_ret = __db_close_i(mdbp, txn, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/db/db_ret.c b/bdb/db/db_ret.c
index 0782de3e450..b1af7b4ffeb 100644
--- a/bdb/db/db_ret.c
+++ b/bdb/db/db_ret.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_ret.c,v 11.12 2000/11/30 00:58:33 ubell Exp $";
+static const char revid[] = "$Id: db_ret.c,v 11.21 2002/03/28 19:21:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,9 +18,8 @@ static const char revid[] = "$Id: db_ret.c,v 11.12 2000/11/30 00:58:33 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "db_am.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
/*
* __db_ret --
@@ -47,19 +46,19 @@ __db_ret(dbp, h, indx, dbt, memp, memsize)
switch (TYPE(h)) {
case P_HASH:
- hk = P_ENTRY(h, indx);
+ hk = P_ENTRY(dbp, h, indx);
if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
memcpy(&ho, hk, sizeof(HOFFPAGE));
return (__db_goff(dbp, dbt,
ho.tlen, ho.pgno, memp, memsize));
}
- len = LEN_HKEYDATA(h, dbp->pgsize, indx);
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, indx);
data = HKEYDATA_DATA(hk);
break;
case P_LBTREE:
case P_LDUP:
case P_LRECNO:
- bk = GET_BKEYDATA(h, indx);
+ bk = GET_BKEYDATA(dbp, h, indx);
if (B_TYPE(bk->type) == B_OVERFLOW) {
bo = (BOVERFLOW *)bk;
return (__db_goff(dbp, dbt,
@@ -69,33 +68,30 @@ __db_ret(dbp, h, indx, dbt, memp, memsize)
data = bk->data;
break;
default:
- return (__db_pgfmt(dbp, h->pgno));
+ return (__db_pgfmt(dbp->dbenv, h->pgno));
}
- return (__db_retcopy(dbp, dbt, data, len, memp, memsize));
+ return (__db_retcopy(dbp->dbenv, dbt, data, len, memp, memsize));
}
/*
* __db_retcopy --
* Copy the returned data into the user's DBT, handling special flags.
*
- * PUBLIC: int __db_retcopy __P((DB *, DBT *,
+ * PUBLIC: int __db_retcopy __P((DB_ENV *, DBT *,
* PUBLIC: void *, u_int32_t, void **, u_int32_t *));
*/
int
-__db_retcopy(dbp, dbt, data, len, memp, memsize)
- DB *dbp;
+__db_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
DBT *dbt;
void *data;
u_int32_t len;
void **memp;
u_int32_t *memsize;
{
- DB_ENV *dbenv;
int ret;
- dbenv = dbp == NULL ? NULL : dbp->dbenv;
-
/* If returning a partial record, reset the length. */
if (F_ISSET(dbt, DB_DBT_PARTIAL)) {
data = (u_int8_t *)data + dbt->doff;
@@ -131,12 +127,10 @@ __db_retcopy(dbp, dbt, data, len, memp, memsize)
* memory pointer is allowed to be NULL.
*/
if (F_ISSET(dbt, DB_DBT_MALLOC)) {
- if ((ret = __os_malloc(dbenv, len,
- dbp == NULL ? NULL : dbp->db_malloc, &dbt->data)) != 0)
+ if ((ret = __os_umalloc(dbenv, len, &dbt->data)) != 0)
return (ret);
} else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
- if ((ret = __os_realloc(dbenv, len,
- dbp == NULL ? NULL : dbp->db_realloc, &dbt->data)) != 0)
+ if ((ret = __os_urealloc(dbenv, len, &dbt->data)) != 0)
return (ret);
} else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
@@ -145,7 +139,7 @@ __db_retcopy(dbp, dbt, data, len, memp, memsize)
return (EINVAL);
} else {
if (len != 0 && (*memsize == 0 || *memsize < len)) {
- if ((ret = __os_realloc(dbenv, len, NULL, memp)) != 0) {
+ if ((ret = __os_realloc(dbenv, len, memp)) != 0) {
*memsize = 0;
return (ret);
}
diff --git a/bdb/db/db_truncate.c b/bdb/db/db_truncate.c
new file mode 100644
index 00000000000..49546ae51b9
--- /dev/null
+++ b/bdb/db/db_truncate.c
@@ -0,0 +1,95 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_truncate.c,v 11.185 2002/08/07 16:16:48 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
+
+/*
+ * __db_truncate
+ * truncate method for DB.
+ *
+ * PUBLIC: int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ */
+int
+__db_truncate(dbp, txn, countp, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp, flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret, txn_local;
+
+ dbenv = dbp->dbenv;
+ ret = txn_local = 0;
+
+ PANIC_CHECK(dbenv);
+
+ /* Check for invalid flags. */
+ if ((ret =
+ __db_fchk(dbenv, "DB->truncate", flags, DB_AUTO_COMMIT)) != 0)
+ return (ret);
+
+ /*
+ * Create local transaction as necessary, check for consistent
+ * transaction usage.
+ */
+ if (IS_AUTO_COMMIT(dbenv, txn, flags)) {
+ if ((ret = __db_txn_auto(dbp, &txn)) != 0)
+ return (ret);
+ txn_local = 1;
+ } else
+ if (txn != NULL && !TXN_ON(dbenv))
+ return (__db_not_txn_env(dbenv));
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_PREDESTROY, ret, NULL);
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = __bam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_HASH:
+ if ((ret = __ham_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ case DB_QUEUE:
+ if ((ret = __qam_truncate(dbp, txn, countp)) != 0)
+ goto err;
+ break;
+ default:
+ ret = __db_unknown_type(
+ dbenv, "__db_truncate", dbp->type);
+ goto err;
+ }
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTDESTROY, ret, NULL);
+
+DB_TEST_RECOVERY_LABEL
+err:
+ /* Commit for DB_AUTO_COMMIT. */
+ if (txn_local) {
+ if (ret == 0)
+ ret = txn->commit(txn, 0);
+ else
+ if ((t_ret = txn->abort(txn)) != 0)
+ ret = __db_panic(dbenv, t_ret);
+ }
+
+ return (ret);
+}
diff --git a/bdb/db/db_upg.c b/bdb/db/db_upg.c
index d8573146ad6..c0eb72f3713 100644
--- a/bdb/db/db_upg.c
+++ b/bdb/db/db_upg.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_upg.c,v 11.20 2000/12/12 17:35:30 bostic Exp $";
+static const char revid[] = "$Id: db_upg.c,v 11.29 2002/03/27 18:59:04 krinsky Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,11 +18,11 @@ static const char revid[] = "$Id: db_upg.c,v 11.20 2000/12/12 17:35:30 bostic Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/qam.h"
static int (* const func_31_list[P_PAGETYPE_MAX])
__P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *)) = {
@@ -68,7 +68,7 @@ __db_upgrade(dbp, fname, flags)
/* Get the real backing file name. */
if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, fname, 0, NULL, &real_name)) != 0)
+ DB_APP_DATA, fname, 0, NULL, &real_name)) != 0)
return (ret);
/* Open the file. */
@@ -117,6 +117,7 @@ __db_upgrade(dbp, fname, flags)
goto err;
/* FALLTHROUGH */
case 8:
+ case 9:
break;
default:
__db_err(dbenv, "%s: unsupported btree version: %lu",
@@ -173,6 +174,7 @@ __db_upgrade(dbp, fname, flags)
goto err;
/* FALLTHROUGH */
case 7:
+ case 8:
break;
default:
__db_err(dbenv, "%s: unsupported hash version: %lu",
@@ -202,6 +204,7 @@ __db_upgrade(dbp, fname, flags)
goto err;
/* FALLTHROUGH */
case 3:
+ case 4:
break;
default:
__db_err(dbenv, "%s: unsupported queue version: %lu",
@@ -231,9 +234,9 @@ __db_upgrade(dbp, fname, flags)
ret = __os_fsync(dbenv, &fh);
-err: if ((t_ret = __os_closehandle(&fh)) != 0 && ret == 0)
+err: if ((t_ret = __os_closehandle(dbenv, &fh)) != 0 && ret == 0)
ret = t_ret;
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
/* We're done. */
if (dbp->db_feedback != NULL)
@@ -268,7 +271,7 @@ __db_page_pass(dbp, real_name, flags, fl, fhp)
return (ret);
/* Allocate memory for a single page. */
- if ((ret = __os_malloc(dbenv, dbp->pgsize, NULL, &page)) != 0)
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &page)) != 0)
return (ret);
/* Walk the file, calling the underlying conversion functions. */
@@ -294,7 +297,7 @@ __db_page_pass(dbp, real_name, flags, fl, fhp)
}
}
- __os_free(page, dbp->pgsize);
+ __os_free(dbp->dbenv, page);
return (ret);
}
diff --git a/bdb/db/db_upg_opd.c b/bdb/db/db_upg_opd.c
index a7be784afb8..f410b797bff 100644
--- a/bdb/db/db_upg_opd.c
+++ b/bdb/db/db_upg_opd.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_upg_opd.c,v 11.9 2000/11/30 00:58:33 ubell Exp $";
+static const char revid[] = "$Id: db_upg_opd.c,v 11.18 2002/08/06 06:11:18 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,11 +18,8 @@ static const char revid[] = "$Id: db_upg_opd.c,v 11.9 2000/11/30 00:58:33 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
static int __db_build_bi __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
static int __db_build_ri __P((DB *, DB_FH *, PAGE *, PAGE *, u_int32_t, int *));
@@ -71,7 +68,7 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
pgno_cur = pgno_next = NULL;
/* Allocate room to hold a page. */
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &page)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
goto err;
/*
@@ -85,7 +82,7 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
if (pgno_max == cur_cnt) {
pgno_max += 20;
if ((ret = __os_realloc(dbp->dbenv, pgno_max *
- sizeof(db_pgno_t), NULL, &pgno_cur)) != 0)
+ sizeof(db_pgno_t), &pgno_cur)) != 0)
goto err;
}
pgno_cur[cur_cnt++] = pgno;
@@ -112,7 +109,7 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
* list while we do so.
*/
if ((ret = __os_malloc(dbp->dbenv,
- cur_cnt * sizeof(db_pgno_t), NULL, &pgno_next)) != 0)
+ cur_cnt * sizeof(db_pgno_t), &pgno_next)) != 0)
goto err;
/* Figure out where we can start allocating new pages. */
@@ -121,7 +118,7 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
/* Allocate room for an internal page. */
if ((ret = __os_malloc(dbp->dbenv,
- dbp->pgsize, NULL, &ipage)) != 0)
+ dbp->pgsize, &ipage)) != 0)
goto err;
PGNO(ipage) = PGNO_INVALID;
}
@@ -187,13 +184,13 @@ __db_31_offdup(dbp, real_name, fhp, sorted, pgnop)
*pgnop = pgno_cur[0];
err: if (pgno_cur != NULL)
- __os_free(pgno_cur, 0);
+ __os_free(dbp->dbenv, pgno_cur);
if (pgno_next != NULL)
- __os_free(pgno_next, 0);
+ __os_free(dbp->dbenv, pgno_next);
if (ipage != NULL)
- __os_free(ipage, dbp->pgsize);
+ __os_free(dbp->dbenv, ipage);
if (page != NULL)
- __os_free(page, dbp->pgsize);
+ __os_free(dbp->dbenv, page);
return (ret);
}
@@ -214,22 +211,24 @@ __db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
BKEYDATA *child_bk;
u_int8_t *p;
int ret;
+ db_indx_t *inp;
+ inp = P_INP(dbp, ipage);
switch (TYPE(page)) {
case P_IBTREE:
- child_bi = GET_BINTERNAL(page, 0);
- if (P_FREESPACE(ipage) < BINTERNAL_PSIZE(child_bi->len)) {
+ child_bi = GET_BINTERNAL(dbp, page, 0);
+ if (P_FREESPACE(dbp, ipage) < BINTERNAL_PSIZE(child_bi->len)) {
*nomemp = 1;
return (0);
}
- ipage->inp[indx] =
- HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len);
- p = P_ENTRY(ipage, indx);
+ inp[indx] =
+ HOFFSET(ipage) -= BINTERNAL_SIZE(child_bi->len);
+ p = P_ENTRY(dbp, ipage, indx);
bi.len = child_bi->len;
B_TSET(bi.type, child_bi->type, 0);
bi.pgno = PGNO(page);
- bi.nrecs = __bam_total(page);
+ bi.nrecs = __bam_total(dbp, page);
memcpy(p, &bi, SSZA(BINTERNAL, data));
p += SSZA(BINTERNAL, data);
memcpy(p, child_bi->data, child_bi->len);
@@ -241,40 +240,40 @@ __db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
return (ret);
break;
case P_LDUP:
- child_bk = GET_BKEYDATA(page, 0);
+ child_bk = GET_BKEYDATA(dbp, page, 0);
switch (B_TYPE(child_bk->type)) {
case B_KEYDATA:
- if (P_FREESPACE(ipage) <
+ if (P_FREESPACE(dbp, ipage) <
BINTERNAL_PSIZE(child_bk->len)) {
*nomemp = 1;
return (0);
}
- ipage->inp[indx] =
+ inp[indx] =
HOFFSET(ipage) -= BINTERNAL_SIZE(child_bk->len);
- p = P_ENTRY(ipage, indx);
+ p = P_ENTRY(dbp, ipage, indx);
bi.len = child_bk->len;
B_TSET(bi.type, child_bk->type, 0);
bi.pgno = PGNO(page);
- bi.nrecs = __bam_total(page);
+ bi.nrecs = __bam_total(dbp, page);
memcpy(p, &bi, SSZA(BINTERNAL, data));
p += SSZA(BINTERNAL, data);
memcpy(p, child_bk->data, child_bk->len);
break;
case B_OVERFLOW:
- if (P_FREESPACE(ipage) <
+ if (P_FREESPACE(dbp, ipage) <
BINTERNAL_PSIZE(BOVERFLOW_SIZE)) {
*nomemp = 1;
return (0);
}
- ipage->inp[indx] =
+ inp[indx] =
HOFFSET(ipage) -= BINTERNAL_SIZE(BOVERFLOW_SIZE);
- p = P_ENTRY(ipage, indx);
+ p = P_ENTRY(dbp, ipage, indx);
bi.len = BOVERFLOW_SIZE;
B_TSET(bi.type, child_bk->type, 0);
bi.pgno = PGNO(page);
- bi.nrecs = __bam_total(page);
+ bi.nrecs = __bam_total(dbp, page);
memcpy(p, &bi, SSZA(BINTERNAL, data));
p += SSZA(BINTERNAL, data);
memcpy(p, child_bk, BOVERFLOW_SIZE);
@@ -285,11 +284,11 @@ __db_build_bi(dbp, fhp, ipage, page, indx, nomemp)
return (ret);
break;
default:
- return (__db_pgfmt(dbp, PGNO(page)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
}
break;
default:
- return (__db_pgfmt(dbp, PGNO(page)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(page)));
}
return (0);
@@ -308,19 +307,19 @@ __db_build_ri(dbp, fhp, ipage, page, indx, nomemp)
int *nomemp;
{
RINTERNAL ri;
+ db_indx_t *inp;
- COMPQUIET(dbp, NULL);
COMPQUIET(fhp, NULL);
-
- if (P_FREESPACE(ipage) < RINTERNAL_PSIZE) {
+ inp = P_INP(dbp, ipage);
+ if (P_FREESPACE(dbp, ipage) < RINTERNAL_PSIZE) {
*nomemp = 1;
return (0);
}
ri.pgno = PGNO(page);
- ri.nrecs = __bam_total(page);
- ipage->inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE;
- memcpy(P_ENTRY(ipage, indx), &ri, RINTERNAL_SIZE);
+ ri.nrecs = __bam_total(dbp, page);
+ inp[indx] = HOFFSET(ipage) -= RINTERNAL_SIZE;
+ memcpy(P_ENTRY(dbp, ipage, indx), &ri, RINTERNAL_SIZE);
return (0);
}
@@ -340,14 +339,14 @@ __db_up_ovref(dbp, fhp, pgno)
int ret;
/* Allocate room to hold a page. */
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &page)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &page)) != 0)
return (ret);
GET_PAGE(dbp, fhp, pgno, page);
++OV_REF(page);
PUT_PAGE(dbp, fhp, pgno, page);
-err: __os_free(page, dbp->pgsize);
+err: __os_free(dbp->dbenv, page);
return (ret);
}
diff --git a/bdb/db/db_vrfy.c b/bdb/db/db_vrfy.c
index 3509e05e91f..1bbecdbd87a 100644
--- a/bdb/db/db_vrfy.c
+++ b/bdb/db/db_vrfy.c
@@ -1,16 +1,16 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_vrfy.c,v 1.53 2001/01/11 18:19:51 bostic Exp $
+ * $Id: db_vrfy.c,v 1.107 2002/09/03 17:27:15 bostic Exp $
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_vrfy.c,v 1.53 2001/01/11 18:19:51 bostic Exp $";
+static const char revid[] = "$Id: db_vrfy.c,v 1.107 2002/09/03 17:27:15 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,19 +20,25 @@ static const char revid[] = "$Id: db_vrfy.c,v 1.53 2001/01/11 18:19:51 bostic Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "db_verify.h"
-#include "db_ext.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
static int __db_guesspgsize __P((DB_ENV *, DB_FH *));
static int __db_is_valid_magicno __P((u_int32_t, DBTYPE *));
static int __db_is_valid_pagetype __P((u_int32_t));
static int __db_meta2pgset
__P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, DB *));
+static int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+static int __db_salvage_subdbpg __P((DB *, VRFY_DBINFO *,
+ PAGE *, void *, int (*)(void *, const void *), u_int32_t));
static int __db_salvage_subdbs
__P((DB *, VRFY_DBINFO *, void *,
int(*)(void *, const void *), u_int32_t, int *));
@@ -136,9 +142,7 @@ __db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
DB *dbp;
DB_ENV *dbenv;
DB_FH fh, *fhp;
- PAGE *h;
VRFY_DBINFO *vdp;
- db_pgno_t last;
int has, ret, isbad;
char *real_name;
@@ -153,16 +157,22 @@ __db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
PANIC_CHECK(dbenv);
DB_ILLEGAL_AFTER_OPEN(dbp_orig, "verify");
-#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | DB_SALVAGE)
+#define OKFLAGS (DB_AGGRESSIVE | DB_NOORDERCHK | DB_ORDERCHKONLY | \
+ DB_PRINTABLE | DB_SALVAGE)
if ((ret = __db_fchk(dbenv, "DB->verify", flags, OKFLAGS)) != 0)
return (ret);
/*
* DB_SALVAGE is mutually exclusive with the other flags except
- * DB_AGGRESSIVE.
+ * DB_AGGRESSIVE and DB_PRINTABLE.
*/
if (LF_ISSET(DB_SALVAGE) &&
- (flags & ~DB_AGGRESSIVE) != DB_SALVAGE)
+ (flags & ~DB_AGGRESSIVE & ~DB_PRINTABLE) != DB_SALVAGE)
+ return (__db_ferr(dbenv, "__db_verify", 1));
+
+ /* DB_AGGRESSIVE and DB_PRINTABLE are only meaningful when salvaging. */
+ if ((LF_ISSET(DB_AGGRESSIVE) || LF_ISSET(DB_PRINTABLE)) &&
+ !LF_ISSET(DB_SALVAGE))
return (__db_ferr(dbenv, "__db_verify", 1));
if (LF_ISSET(DB_ORDERCHKONLY) && flags != DB_ORDERCHKONLY)
@@ -232,9 +242,17 @@ __db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
if ((ret = __db_vrfy_dbinfo_create(dbenv, 1024, &vdp)) != 0)
goto err;
+ /*
+ * Note whether the user has requested that we use printable
+ * chars where possible. We won't get here with this flag if
+ * we're not salvaging.
+ */
+ if (LF_ISSET(DB_PRINTABLE))
+ F_SET(vdp, SALVAGE_PRINTABLE);
+
/* Find the real name of the file. */
if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, name, 0, NULL, &real_name)) != 0)
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
goto err;
/*
@@ -271,25 +289,15 @@ __db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
* the [safe] part of __db_open that initializes the environment--
* and the mpool--manually.
*/
- if ((ret = __db_dbenv_setup(dbp,
- name, DB_ODDFILESIZE | DB_RDONLY)) != 0)
+ if ((ret = __db_dbenv_setup(dbp, NULL,
+ name, TXN_INVALID, DB_ODDFILESIZE | DB_RDONLY)) != 0)
return (ret);
/* Mark the dbp as opened, so that we correctly handle its close. */
- F_SET(dbp, DB_OPEN_CALLED);
-
- /*
- * Find out the page number of the last page in the database.
- *
- * XXX: This currently fails if the last page is of bad type,
- * because it calls __db_pgin and that pukes. This is bad.
- */
- if ((ret = memp_fget(dbp->mpf, &last, DB_MPOOL_LAST, &h)) != 0)
- goto err;
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
- goto err;
+ F_SET(dbp, DB_AM_OPEN_CALLED);
- vdp->last_pgno = last;
+ /* Find out the page number of the last page in the database. */
+ dbp->mpf->last_pgno(dbp->mpf, &vdp->last_pgno);
/*
* DB_ORDERCHKONLY is a special case; our file consists of
@@ -373,7 +381,10 @@ __db_verify_internal(dbp_orig, name, subdb, handle, callback, flags)
}
if (0) {
-err: (void)__db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ /* Don't try to strerror() DB_VERIFY_FATAL; it's private. */
+err: if (ret == DB_VERIFY_FATAL)
+ ret = DB_VERIFY_BAD;
+ (void)__db_err(dbenv, "%s: %s", name, db_strerror(ret));
}
if (LF_ISSET(DB_SALVAGE) &&
@@ -385,13 +396,13 @@ done: if (!LF_ISSET(DB_SALVAGE) && dbp->db_feedback != NULL)
dbp->db_feedback(dbp, DB_VERIFY, 100);
if (F_ISSET(fhp, DB_FH_VALID))
- (void)__os_closehandle(fhp);
+ (void)__os_closehandle(dbenv, fhp);
if (dbp)
(void)dbp->close(dbp, 0);
if (vdp)
- (void)__db_vrfy_dbinfo_destroy(vdp);
+ (void)__db_vrfy_dbinfo_destroy(dbenv, vdp);
if (real_name)
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
if ((ret == 0 && isbad == 1) || ret == DB_VERIFY_FATAL)
ret = DB_VERIFY_BAD;
@@ -417,10 +428,11 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
db_pgno_t freelist;
- int t_ret, ret, nr, swapped;
+ size_t nr;
+ int isbad, ret, swapped;
u_int8_t mbuf[DBMETASIZE];
- swapped = ret = t_ret = 0;
+ isbad = ret = swapped = 0;
freelist = 0;
dbenv = dbp->dbenv;
meta = (DBMETA *)mbuf;
@@ -432,29 +444,43 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
* may be zero; this is okay, as we want page zero anyway and
* 0*0 == 0.
*/
- if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
- goto err;
-
- if ((ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, (size_t *)&nr)) != 0)
- goto err;
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0 ||
+ (ret = __os_read(dbenv, fhp, mbuf, DBMETASIZE, &nr)) != 0) {
+ __db_err(dbenv,
+ "Metadata page %lu cannot be read: %s",
+ (u_long)PGNO_BASE_MD, db_strerror(ret));
+ return (ret);
+ }
if (nr != DBMETASIZE) {
- EPRINT((dbp->dbenv,
- "Incomplete metadata page %lu", (u_long)PGNO_BASE_MD));
- t_ret = DB_VERIFY_FATAL;
- goto err;
+ EPRINT((dbenv,
+ "Page %lu: Incomplete metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
+
+ if ((ret = __db_chk_meta(dbenv, dbp, meta, 1)) != 0) {
+ EPRINT((dbenv,
+ "Page %lu: metadata page corrupted, (u_long)PGNO_BASE_MD"));
+ isbad = 1;
+ if (ret != -1) {
+ EPRINT((dbenv,
+ "Page %lu: could not check metadata page",
+ (u_long)PGNO_BASE_MD));
+ return (DB_VERIFY_FATAL);
+ }
}
/*
* Check all of the fields that we can.
+ *
+ * 08-11: Current page number. Must == pgno.
+ * Note that endianness doesn't matter--it's zero.
*/
-
- /* 08-11: Current page number. Must == pgno. */
- /* Note that endianness doesn't matter--it's zero. */
if (meta->pgno != PGNO_BASE_MD) {
- EPRINT((dbp->dbenv, "Bad pgno: was %lu, should be %lu",
- (u_long)meta->pgno, (u_long)PGNO_BASE_MD));
- ret = DB_VERIFY_BAD;
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: pgno incorrectly set to %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pgno));
}
/* 12-15: Magic number. Must be one of valid set. */
@@ -466,9 +492,10 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
&dbp->type))
swapped = 1;
else {
- EPRINT((dbp->dbenv,
- "Bad magic number: %lu", (u_long)meta->magic));
- ret = DB_VERIFY_BAD;
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: bad magic number %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->magic));
}
}
@@ -478,12 +505,19 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
*/
if (swapped)
M_32_SWAP(meta->version);
- if ((dbp->type == DB_BTREE && meta->version != DB_BTREEVERSION) ||
- (dbp->type == DB_HASH && meta->version != DB_HASHVERSION) ||
- (dbp->type == DB_QUEUE && meta->version != DB_QAMVERSION)) {
- ret = DB_VERIFY_BAD;
- EPRINT((dbp->dbenv, "%s%s", "Old or incorrect DB ",
- "version; extraneous errors may result"));
+ if ((dbp->type == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbp->type == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbp->type == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
+ isbad = 1;
+ EPRINT((dbenv,
+ "Page %lu: unsupported DB version %lu; extraneous errors may result",
+ (u_long)PGNO_BASE_MD, (u_long)meta->version));
}
/*
@@ -495,9 +529,9 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
if (IS_VALID_PAGESIZE(meta->pagesize))
dbp->pgsize = meta->pagesize;
else {
- EPRINT((dbp->dbenv,
- "Bad page size: %lu", (u_long)meta->pagesize));
- ret = DB_VERIFY_BAD;
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page size %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->pagesize));
/*
* Now try to settle on a pagesize to use.
@@ -516,8 +550,9 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
if ((dbp->type == DB_BTREE && meta->type != P_BTREEMETA) ||
(dbp->type == DB_HASH && meta->type != P_HASHMETA) ||
(dbp->type == DB_QUEUE && meta->type != P_QAMMETA)) {
- ret = DB_VERIFY_BAD;
- EPRINT((dbp->dbenv, "Bad page type: %lu", (u_long)meta->type));
+ isbad = 1;
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)PGNO_BASE_MD, (u_long)meta->type));
}
/*
@@ -547,21 +582,16 @@ __db_vrfy_pagezero(dbp, vdp, fhp, flags)
pip->free = freelist;
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
return (ret);
/* Set up the dbp's fileid. We don't use the regular open path. */
memcpy(dbp->fileid, meta->uid, DB_FILE_ID_LEN);
- if (0) {
-err: __db_err(dbenv, "%s", db_strerror(ret));
- }
-
if (swapped == 1)
F_SET(dbp, DB_AM_SWAP);
- if (t_ret != 0)
- ret = t_ret;
- return (ret);
+
+ return (isbad ? DB_VERIFY_BAD : 0);
}
/*
@@ -578,12 +608,14 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
u_int32_t flags;
{
DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t i;
int ret, t_ret, isbad;
- ret = isbad = t_ret = 0;
dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ ret = isbad = t_ret = 0;
if ((ret = __db_fchk(dbenv,
"__db_vrfy_walkpages", flags, OKFLAGS)) != 0)
@@ -598,11 +630,17 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
if (LF_ISSET(DB_SALVAGE) && (__db_salvage_isdone(vdp, i) != 0))
continue;
- /* If an individual page get fails, keep going. */
- if ((t_ret = memp_fget(dbp->mpf, &i, 0, &h)) != 0) {
+ /*
+ * If an individual page get fails, keep going if and only
+ * if we're salvaging.
+ */
+ if ((t_ret = mpf->get(mpf, &i, 0, &h)) != 0) {
if (ret == 0)
ret = t_ret;
- continue;
+ if (LF_ISSET(DB_SALVAGE))
+ continue;
+ else
+ return (ret);
}
if (LF_ISSET(DB_SALVAGE)) {
@@ -619,63 +657,75 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
}
} else {
/*
+ * If we are not salvaging, and we get any error
+ * other than DB_VERIFY_BAD, return immediately;
+ * it may not be safe to proceed. If we get
+ * DB_VERIFY_BAD, keep going; listing more errors
+ * may make it easier to diagnose problems and
+ * determine the magnitude of the corruption.
+ */
+
+ /*
* Verify info common to all page
* types.
*/
- if (i != PGNO_BASE_MD)
- if ((t_ret = __db_vrfy_common(dbp,
- vdp, h, i, flags)) == DB_VERIFY_BAD)
+ if (i != PGNO_BASE_MD) {
+ ret = __db_vrfy_common(dbp, vdp, h, i, flags);
+ if (ret == DB_VERIFY_BAD)
isbad = 1;
+ else if (ret != 0)
+ goto err;
+ }
switch (TYPE(h)) {
case P_INVALID:
- t_ret = __db_vrfy_invalid(dbp,
- vdp, h, i, flags);
+ ret = __db_vrfy_invalid(dbp, vdp, h, i, flags);
break;
case __P_DUPLICATE:
isbad = 1;
- EPRINT((dbp->dbenv,
- "Old-style duplicate page: %lu",
+ EPRINT((dbenv,
+ "Page %lu: old-style duplicate page",
(u_long)i));
break;
case P_HASH:
- t_ret = __ham_vrfy(dbp,
+ ret = __ham_vrfy(dbp,
vdp, h, i, flags);
break;
case P_IBTREE:
case P_IRECNO:
case P_LBTREE:
case P_LDUP:
- t_ret = __bam_vrfy(dbp,
+ ret = __bam_vrfy(dbp,
vdp, h, i, flags);
break;
case P_LRECNO:
- t_ret = __ram_vrfy_leaf(dbp,
+ ret = __ram_vrfy_leaf(dbp,
vdp, h, i, flags);
break;
case P_OVERFLOW:
- t_ret = __db_vrfy_overflow(dbp,
+ ret = __db_vrfy_overflow(dbp,
vdp, h, i, flags);
break;
case P_HASHMETA:
- t_ret = __ham_vrfy_meta(dbp,
+ ret = __ham_vrfy_meta(dbp,
vdp, (HMETA *)h, i, flags);
break;
case P_BTREEMETA:
- t_ret = __bam_vrfy_meta(dbp,
+ ret = __bam_vrfy_meta(dbp,
vdp, (BTMETA *)h, i, flags);
break;
case P_QAMMETA:
- t_ret = __qam_vrfy_meta(dbp,
+ ret = __qam_vrfy_meta(dbp,
vdp, (QMETA *)h, i, flags);
break;
case P_QAMDATA:
- t_ret = __qam_vrfy_data(dbp,
+ ret = __qam_vrfy_data(dbp,
vdp, (QPAGE *)h, i, flags);
break;
default:
- EPRINT((dbp->dbenv,
- "Unknown page type: %lu", (u_long)TYPE(h)));
+ EPRINT((dbenv,
+ "Page %lu: unknown page type %lu",
+ (u_long)i, (u_long)TYPE(h)));
isbad = 1;
break;
}
@@ -683,12 +733,10 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
/*
* Set up error return.
*/
- if (t_ret == DB_VERIFY_BAD)
+ if (ret == DB_VERIFY_BAD)
isbad = 1;
- else if (t_ret == DB_VERIFY_FATAL)
+ else if (ret != 0)
goto err;
- else
- ret = t_ret;
/*
* Provide feedback to the application about our
@@ -701,14 +749,21 @@ __db_vrfy_walkpages(dbp, vdp, handle, callback, flags)
(i + 1) * 50 / (vdp->last_pgno + 1));
}
- if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
- ret = t_ret;
+ /*
+ * Just as with the page get, bail if and only if we're
+ * not salvaging.
+ */
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0) {
+ if (ret == 0)
+ ret = t_ret;
+ if (!LF_ISSET(DB_SALVAGE))
+ return (ret);
+ }
}
if (0) {
-err: if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0)
return (ret == 0 ? t_ret : ret);
- return (DB_VERIFY_BAD);
}
return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
@@ -786,8 +841,8 @@ __db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags)
*/
if ((ret = __db_vrfy_getpageinfo(vdp, 0, &pip)) != 0)
goto err;
- hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS);
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ hassubs = F_ISSET(pip, VRFY_HAS_SUBDBS) ? 1 : 0;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
goto err;
if (isbad == 0 && hassubs)
@@ -855,23 +910,23 @@ __db_vrfy_structure(dbp, vdp, dbname, meta_pgno, flags)
if ((ret = __db_vrfy_pgset_get(pgset, i, &p)) != 0)
goto err;
if (p == 0) {
- EPRINT((dbp->dbenv,
- "Unreferenced page %lu", (u_long)i));
+ EPRINT((dbenv,
+ "Page %lu: unreferenced page", (u_long)i));
isbad = 1;
}
if (F_ISSET(pip, VRFY_IS_ALLZEROES)) {
- EPRINT((dbp->dbenv,
- "Totally zeroed page %lu", (u_long)i));
+ EPRINT((dbenv,
+ "Page %lu: totally zeroed page", (u_long)i));
isbad = 1;
}
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
goto err;
pip = NULL;
}
err: if (pip != NULL)
- (void)__db_vrfy_putpageinfo(vdp, pip);
+ (void)__db_vrfy_putpageinfo(dbenv, vdp, pip);
return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD : ret);
}
@@ -936,10 +991,13 @@ __db_vrfy_common(dbp, vdp, h, pgno, flags)
db_pgno_t pgno;
u_int32_t flags;
{
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
int ret, t_ret;
u_int8_t *p;
+ dbenv = dbp->dbenv;
+
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
return (ret);
@@ -957,8 +1015,8 @@ __db_vrfy_common(dbp, vdp, h, pgno, flags)
if (pgno != 0 && PGNO(h) == 0) {
for (p = (u_int8_t *)h; p < (u_int8_t *)h + dbp->pgsize; p++)
if (*p != 0) {
- EPRINT((dbp->dbenv,
- "Page %lu should be zeroed and is not",
+ EPRINT((dbenv,
+ "Page %lu: partially zeroed page",
(u_long)pgno));
ret = DB_VERIFY_BAD;
goto err;
@@ -976,19 +1034,19 @@ __db_vrfy_common(dbp, vdp, h, pgno, flags)
}
if (PGNO(h) != pgno) {
- EPRINT((dbp->dbenv,
- "Bad page number: %lu should be %lu",
- (u_long)h->pgno, (u_long)pgno));
+ EPRINT((dbenv, "Page %lu: bad page number %lu",
+ (u_long)pgno, (u_long)h->pgno));
ret = DB_VERIFY_BAD;
}
if (!__db_is_valid_pagetype(h->type)) {
- EPRINT((dbp->dbenv, "Bad page type: %lu", (u_long)h->type));
+ EPRINT((dbenv, "Page %lu: bad page type %lu",
+ (u_long)pgno, (u_long)h->type));
ret = DB_VERIFY_BAD;
}
pip->type = h->type;
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return (ret);
@@ -1007,22 +1065,24 @@ __db_vrfy_invalid(dbp, vdp, h, pgno, flags)
db_pgno_t pgno;
u_int32_t flags;
{
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
int ret, t_ret;
+ dbenv = dbp->dbenv;
+
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
return (ret);
pip->next_pgno = pip->prev_pgno = 0;
if (!IS_VALID_PGNO(NEXT_PGNO(h))) {
- EPRINT((dbp->dbenv,
- "Invalid next_pgno %lu on page %lu",
- (u_long)NEXT_PGNO(h), (u_long)pgno));
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
+ (u_long)pgno, (u_long)NEXT_PGNO(h)));
ret = DB_VERIFY_BAD;
} else
pip->next_pgno = NEXT_PGNO(h);
- if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
@@ -1048,9 +1108,12 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
db_pgno_t pgno;
u_int32_t flags;
{
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
int isbad, ret, t_ret;
+ dbenv = dbp->dbenv;
+
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
return (ret);
isbad = 0;
@@ -1066,12 +1129,12 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
if (TYPE(h) != P_IBTREE && TYPE(h) != P_IRECNO) {
if (!IS_VALID_PGNO(PREV_PGNO(h)) || PREV_PGNO(h) == pip->pgno) {
isbad = 1;
- EPRINT((dbp->dbenv, "Page %lu: Invalid prev_pgno %lu",
+ EPRINT((dbenv, "Page %lu: invalid prev_pgno %lu",
(u_long)pip->pgno, (u_long)PREV_PGNO(h)));
}
if (!IS_VALID_PGNO(NEXT_PGNO(h)) || NEXT_PGNO(h) == pip->pgno) {
isbad = 1;
- EPRINT((dbp->dbenv, "Page %lu: Invalid next_pgno %lu",
+ EPRINT((dbenv, "Page %lu: invalid next_pgno %lu",
(u_long)pip->pgno, (u_long)NEXT_PGNO(h)));
}
pip->prev_pgno = PREV_PGNO(h);
@@ -1089,8 +1152,7 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
if (TYPE(h) != P_OVERFLOW) {
if (BKEYDATA_PSIZE(0) * NUM_ENT(h) > dbp->pgsize) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Page %lu: Too many entries: %lu",
+ EPRINT((dbenv, "Page %lu: too many entries: %lu",
(u_long)pgno, (u_long)NUM_ENT(h)));
}
pip->entries = NUM_ENT(h);
@@ -1106,8 +1168,8 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
case P_IRECNO:
if (LEVEL(h) < LEAFLEVEL + 1 || LEVEL(h) > MAXBTREELEVEL) {
isbad = 1;
- EPRINT((dbp->dbenv, "Bad btree level %lu on page %lu",
- (u_long)LEVEL(h), (u_long)pgno));
+ EPRINT((dbenv, "Page %lu: bad btree level %lu",
+ (u_long)pgno, (u_long)LEVEL(h)));
}
pip->bt_level = LEVEL(h);
break;
@@ -1116,17 +1178,17 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
case P_LRECNO:
if (LEVEL(h) != LEAFLEVEL) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Btree leaf page %lu has incorrect level %lu",
+ EPRINT((dbenv,
+ "Page %lu: btree leaf page has incorrect level %lu",
(u_long)pgno, (u_long)LEVEL(h)));
}
break;
default:
if (LEVEL(h) != 0) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Nonzero level %lu in non-btree database page %lu",
- (u_long)LEVEL(h), (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: nonzero level %lu in non-btree database",
+ (u_long)pgno, (u_long)LEVEL(h)));
}
break;
}
@@ -1139,7 +1201,7 @@ __db_vrfy_datapage(dbp, vdp, h, pgno, flags)
* by offset and length--cover the right part of the page
* without overlaps, gaps, or violations of the page boundary.
*/
- if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
@@ -1161,11 +1223,14 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
db_pgno_t pgno;
u_int32_t flags;
{
+ DB_ENV *dbenv;
DBTYPE dbtype, magtype;
VRFY_PAGEINFO *pip;
int isbad, ret, t_ret;
isbad = 0;
+ dbenv = dbp->dbenv;
+
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
return (ret);
@@ -1190,31 +1255,37 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
/* magic number valid */
if (!__db_is_valid_magicno(meta->magic, &magtype)) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Magic number invalid on page %lu", (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: invalid magic number", (u_long)pgno));
}
if (magtype != dbtype) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Magic number does not match type of page %lu",
+ EPRINT((dbenv,
+ "Page %lu: magic number does not match database type",
(u_long)pgno));
}
/* version */
- if ((dbtype == DB_BTREE && meta->version != DB_BTREEVERSION) ||
- (dbtype == DB_HASH && meta->version != DB_HASHVERSION) ||
- (dbtype == DB_QUEUE && meta->version != DB_QAMVERSION)) {
+ if ((dbtype == DB_BTREE &&
+ (meta->version > DB_BTREEVERSION ||
+ meta->version < DB_BTREEOLDVER)) ||
+ (dbtype == DB_HASH &&
+ (meta->version > DB_HASHVERSION ||
+ meta->version < DB_HASHOLDVER)) ||
+ (dbtype == DB_QUEUE &&
+ (meta->version > DB_QAMVERSION ||
+ meta->version < DB_QAMOLDVER))) {
isbad = 1;
- EPRINT((dbp->dbenv, "%s%s", "Old of incorrect DB ",
- "version; extraneous errors may result"));
+ EPRINT((dbenv,
+ "Page %lu: unsupported database version %lu; extraneous errors may result",
+ (u_long)pgno, (u_long)meta->version));
}
/* pagesize */
if (meta->pagesize != dbp->pgsize) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Invalid pagesize %lu on page %lu",
- (u_long)meta->pagesize, (u_long)pgno));
+ EPRINT((dbenv, "Page %lu: invalid pagesize %lu",
+ (u_long)pgno, (u_long)meta->pagesize));
}
/* free list */
@@ -1224,9 +1295,9 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
*/
if (pgno != PGNO_BASE_MD && meta->free != PGNO_INVALID) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Nonempty free list on subdatabase metadata page %lu",
- pgno));
+ EPRINT((dbenv,
+ "Page %lu: nonempty free list on subdatabase metadata page",
+ (u_long)pgno));
}
/* Can correctly be PGNO_INVALID--that's just the end of the list. */
@@ -1234,9 +1305,9 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
pip->free = meta->free;
else if (!IS_VALID_PGNO(meta->free)) {
isbad = 1;
- EPRINT((dbp->dbenv,
- "Nonsensical free list pgno %lu on page %lu",
- (u_long)meta->free, (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: nonsensical free list pgno %lu",
+ (u_long)pgno, (u_long)meta->free));
}
/*
@@ -1245,7 +1316,7 @@ __db_vrfy_meta(dbp, vdp, meta, pgno, flags)
*/
F_CLR(pip, VRFY_INCOMPLETE);
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
@@ -1264,51 +1335,56 @@ __db_vrfy_freelist(dbp, vdp, meta, flags)
u_int32_t flags;
{
DB *pgset;
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
- db_pgno_t pgno;
+ db_pgno_t cur_pgno, next_pgno;
int p, ret, t_ret;
pgset = vdp->pgset;
DB_ASSERT(pgset != NULL);
+ dbenv = dbp->dbenv;
if ((ret = __db_vrfy_getpageinfo(vdp, meta, &pip)) != 0)
return (ret);
- for (pgno = pip->free; pgno != PGNO_INVALID; pgno = pip->next_pgno) {
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ for (next_pgno = pip->free;
+ next_pgno != PGNO_INVALID; next_pgno = pip->next_pgno) {
+ cur_pgno = pip->pgno;
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
return (ret);
/* This shouldn't happen, but just in case. */
- if (!IS_VALID_PGNO(pgno)) {
- EPRINT((dbp->dbenv,
- "Invalid next_pgno on free list page %lu",
- (u_long)pgno));
+ if (!IS_VALID_PGNO(next_pgno)) {
+ EPRINT((dbenv,
+ "Page %lu: invalid next_pgno %lu on free list page",
+ (u_long)cur_pgno, (u_long)next_pgno));
return (DB_VERIFY_BAD);
}
/* Detect cycles. */
- if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
+ if ((ret = __db_vrfy_pgset_get(pgset, next_pgno, &p)) != 0)
return (ret);
if (p != 0) {
- EPRINT((dbp->dbenv,
- "Page %lu encountered a second time on free list",
- (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: page %lu encountered a second time on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
return (DB_VERIFY_BAD);
}
- if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ if ((ret = __db_vrfy_pgset_inc(pgset, next_pgno)) != 0)
return (ret);
- if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
+ if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
return (ret);
if (pip->type != P_INVALID) {
- EPRINT((dbp->dbenv,
- "Non-invalid page %lu on free list", (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: non-invalid page %lu on free list",
+ (u_long)cur_pgno, (u_long)next_pgno));
ret = DB_VERIFY_BAD; /* unsafe to continue */
break;
}
}
- if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((t_ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
ret = t_ret;
return (ret);
}
@@ -1328,6 +1404,7 @@ __db_vrfy_subdbs(dbp, vdp, dbname, flags)
DB *mdbp;
DBC *dbc;
DBT key, data;
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
db_pgno_t meta_pgno;
int ret, t_ret, isbad;
@@ -1335,19 +1412,22 @@ __db_vrfy_subdbs(dbp, vdp, dbname, flags)
isbad = 0;
dbc = NULL;
+ dbenv = dbp->dbenv;
- if ((ret = __db_master_open(dbp, dbname, DB_RDONLY, 0, &mdbp)) != 0)
+ if ((ret =
+ __db_master_open(dbp, NULL, dbname, DB_RDONLY, 0, &mdbp)) != 0)
return (ret);
- if ((ret =
- __db_icursor(mdbp, NULL, DB_BTREE, PGNO_INVALID, 0, &dbc)) != 0)
+ if ((ret = __db_icursor(mdbp,
+ NULL, DB_BTREE, PGNO_INVALID, 0, DB_LOCK_INVALIDID, &dbc)) != 0)
goto err;
memset(&key, 0, sizeof(key));
memset(&data, 0, sizeof(data));
while ((ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) == 0) {
if (data.size != sizeof(db_pgno_t)) {
- EPRINT((dbp->dbenv, "Database entry of invalid size"));
+ EPRINT((dbenv,
+ "Subdatabase entry not page-number size"));
isbad = 1;
goto err;
}
@@ -1358,8 +1438,8 @@ __db_vrfy_subdbs(dbp, vdp, dbname, flags)
*/
DB_NTOHL(&meta_pgno);
if (meta_pgno == PGNO_INVALID || meta_pgno > vdp->last_pgno) {
- EPRINT((dbp->dbenv,
- "Database entry references invalid page %lu",
+ EPRINT((dbenv,
+ "Subdatabase entry references invalid page %lu",
(u_long)meta_pgno));
isbad = 1;
goto err;
@@ -1367,7 +1447,7 @@ __db_vrfy_subdbs(dbp, vdp, dbname, flags)
if ((ret = __db_vrfy_getpageinfo(vdp, meta_pgno, &pip)) != 0)
goto err;
type = pip->type;
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
goto err;
switch (type) {
case P_BTREEMETA:
@@ -1390,8 +1470,8 @@ __db_vrfy_subdbs(dbp, vdp, dbname, flags)
break;
case P_QAMMETA:
default:
- EPRINT((dbp->dbenv,
- "Database entry references page %lu of invalid type %lu",
+ EPRINT((dbenv,
+ "Subdatabase entry references page %lu of invalid type %lu",
(u_long)meta_pgno, (u_long)type));
ret = DB_VERIFY_BAD;
goto err;
@@ -1416,9 +1496,9 @@ err: if (dbc != NULL && (t_ret = __db_c_close(dbc)) != 0 && ret == 0)
* Provide feedback during top-down database structure traversal.
* (See comment at the beginning of __db_vrfy_structure.)
*
- * PUBLIC: int __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+ * PUBLIC: void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
*/
-int
+void
__db_vrfy_struct_feedback(dbp, vdp)
DB *dbp;
VRFY_DBINFO *vdp;
@@ -1426,7 +1506,7 @@ __db_vrfy_struct_feedback(dbp, vdp)
int progress;
if (dbp->db_feedback == NULL)
- return (0);
+ return;
if (vdp->pgs_remaining > 0)
vdp->pgs_remaining--;
@@ -1434,8 +1514,6 @@ __db_vrfy_struct_feedback(dbp, vdp)
/* Don't allow a feedback call of 100 until we're really done. */
progress = 100 - (vdp->pgs_remaining * 50 / (vdp->last_pgno + 1));
dbp->db_feedback(dbp, DB_VERIFY, progress == 100 ? 99 : progress);
-
- return (0);
}
/*
@@ -1453,6 +1531,8 @@ __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
DB *mdbp, *pgset;
DBC *pgsc;
DBT key, data;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
HASH *h_internal;
HMETA *hmeta;
PAGE *h, *currpg;
@@ -1460,36 +1540,45 @@ __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
u_int32_t bucket;
int t_ret, ret;
- currpg = h = NULL;
- pgsc = NULL;
pgset = NULL;
+ pgsc = NULL;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ currpg = h = NULL;
LF_CLR(DB_NOORDERCHK);
/* Open the master database and get the meta_pgno for the subdb. */
if ((ret = db_create(&mdbp, NULL, 0)) != 0)
return (ret);
- if ((ret = __db_master_open(dbp, name, DB_RDONLY, 0, &mdbp)) != 0)
+ if ((ret = __db_master_open(dbp, NULL, name, DB_RDONLY, 0, &mdbp)) != 0)
goto err;
memset(&key, 0, sizeof(key));
key.data = (void *)subdb;
+ key.size = (u_int32_t)strlen(subdb);
memset(&data, 0, sizeof(data));
- if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) != 0)
+ if ((ret = mdbp->get(mdbp, NULL, &key, &data, 0)) != 0)
goto err;
if (data.size != sizeof(db_pgno_t)) {
- EPRINT((dbp->dbenv, "Database entry of invalid size"));
+ EPRINT((dbenv, "Subdatabase entry of invalid size"));
ret = DB_VERIFY_BAD;
goto err;
}
memcpy(&meta_pgno, data.data, data.size);
- if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &h)) != 0)
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
goto err;
- if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
goto err;
switch (TYPE(h)) {
@@ -1506,18 +1595,24 @@ __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
goto err;
while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
- if ((ret = memp_fget(dbp->mpf, &p, 0, &currpg)) != 0)
+ if ((ret = mpf->get(mpf, &p, 0, &currpg)) != 0)
goto err;
if ((ret = __bam_vrfy_itemorder(dbp,
NULL, currpg, p, NUM_ENT(currpg), 1,
F_ISSET(&btmeta->dbmeta, BTM_DUP), flags)) != 0)
goto err;
- if ((ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
goto err;
currpg = NULL;
}
- if ((ret = pgsc->c_close(pgsc)) != 0)
- goto err;
+
+ /*
+ * The normal exit condition for the loop above is DB_NOTFOUND.
+ * If we see that, zero it and continue on to cleanup.
+ * Otherwise, it's a real error and will be returned.
+ */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
break;
case P_HASHMETA:
hmeta = (HMETA *)h;
@@ -1525,16 +1620,21 @@ __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
/*
* Make sure h_charkey is right.
*/
- if (h_internal == NULL || h_internal->h_hash == NULL) {
- EPRINT((dbp->dbenv,
- "DB_ORDERCHKONLY requires that a hash function be set"));
+ if (h_internal == NULL) {
+ EPRINT((dbenv,
+ "Page %lu: DB->h_internal field is NULL",
+ (u_long)meta_pgno));
ret = DB_VERIFY_BAD;
goto err;
}
+ if (h_internal->h_hash == NULL)
+ h_internal->h_hash = hmeta->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
if (hmeta->h_charkey !=
h_internal->h_hash(dbp, CHARKEY, sizeof(CHARKEY))) {
- EPRINT((dbp->dbenv,
- "Incorrect hash function for database"));
+ EPRINT((dbenv,
+ "Page %lu: incorrect hash function for database",
+ (u_long)meta_pgno));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -1546,34 +1646,35 @@ __db_vrfy_orderchkonly(dbp, vdp, name, subdb, flags)
for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
pgno = BS_TO_PAGE(bucket, hmeta->spares);
while (pgno != PGNO_INVALID) {
- if ((ret = memp_fget(dbp->mpf,
+ if ((ret = mpf->get(mpf,
&pgno, 0, &currpg)) != 0)
goto err;
if ((ret = __ham_vrfy_hashing(dbp,
- NUM_ENT(currpg),hmeta, bucket, pgno,
+ NUM_ENT(currpg), hmeta, bucket, pgno,
flags, h_internal->h_hash)) != 0)
goto err;
pgno = NEXT_PGNO(currpg);
- if ((ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ if ((ret = mpf->put(mpf, currpg, 0)) != 0)
goto err;
currpg = NULL;
}
}
break;
default:
- EPRINT((dbp->dbenv, "Database meta page %lu of bad type %lu",
+ EPRINT((dbenv, "Page %lu: database metapage of bad type %lu",
(u_long)meta_pgno, (u_long)TYPE(h)));
ret = DB_VERIFY_BAD;
break;
}
-err: if (pgsc != NULL)
- (void)pgsc->c_close(pgsc);
- if (pgset != NULL)
- (void)pgset->close(pgset, 0);
- if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+err: if (pgsc != NULL && (t_ret = pgsc->c_close(pgsc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pgset != NULL &&
+ (t_ret = pgset->close(pgset, 0)) != 0 && ret == 0)
ret = t_ret;
- if (currpg != NULL && (t_ret = memp_fput(dbp->mpf, currpg, 0)) != 0)
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
+ ret = t_ret;
+ if (currpg != NULL && (t_ret = mpf->put(mpf, currpg, 0)) != 0)
ret = t_ret;
if ((t_ret = mdbp->close(mdbp, 0)) != 0)
ret = t_ret;
@@ -1584,11 +1685,8 @@ err: if (pgsc != NULL)
* __db_salvage --
* Walk through a page, salvaging all likely or plausible (w/
* DB_AGGRESSIVE) key/data pairs.
- *
- * PUBLIC: int __db_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
- * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
*/
-int
+static int
__db_salvage(dbp, vdp, pgno, h, handle, callback, flags)
DB *dbp;
VRFY_DBINFO *vdp;
@@ -1659,24 +1757,29 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
u_int32_t flags;
{
DBT unkdbt, key, *dbt;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
u_int32_t pgtype;
int ret, err_ret;
void *ovflbuf;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+
memset(&unkdbt, 0, sizeof(DBT));
- unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
unkdbt.data = "UNKNOWN";
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, 0, &ovflbuf)) != 0)
+ if ((ret = __os_malloc(dbenv, dbp->pgsize, &ovflbuf)) != 0)
return (ret);
err_ret = 0;
while ((ret = __db_salvage_getnext(vdp, &pgno, &pgtype)) == 0) {
dbt = NULL;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) {
err_ret = ret;
continue;
}
@@ -1699,17 +1802,11 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
* a database with no dups. What to do?
*/
if ((ret = __db_safe_goff(dbp,
- vdp, pgno, &key, &ovflbuf, flags)) != 0) {
- err_ret = ret;
- continue;
- }
- if ((ret = __db_prdbt(&key,
- 0, " ", handle, callback, 0, NULL)) != 0) {
- err_ret = ret;
- continue;
- }
- if ((ret = __db_prdbt(&unkdbt,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ vdp, pgno, &key, &ovflbuf, flags)) != 0 ||
+ (ret = __db_prdbt(&key,
+ 0, " ", handle, callback, 0, vdp)) != 0 ||
+ (ret = __db_prdbt(&unkdbt,
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
case SALVAGE_HASH:
@@ -1727,11 +1824,11 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
DB_ASSERT(0);
break;
}
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
err_ret = ret;
}
- __os_free(ovflbuf, 0);
+ __os_free(dbenv, ovflbuf);
if (err_ret != 0 && ret == 0)
ret = err_ret;
@@ -1743,8 +1840,8 @@ __db_salvage_unknowns(dbp, vdp, handle, callback, flags)
* Offset of the ith inp array entry, which we can compare to the offset
* the entry stores.
*/
-#define INP_OFFSET(h, i) \
- ((db_indx_t)((u_int8_t *)(h)->inp + (i) - (u_int8_t *)(h)))
+#define INP_OFFSET(dbp, h, i) \
+ ((db_indx_t)((u_int8_t *)((P_INP(dbp,(h))) + (i)) - (u_int8_t *)(h)))
/*
* __db_vrfy_inpitem --
@@ -1770,33 +1867,35 @@ __db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
u_int32_t flags, *himarkp, *offsetp;
{
BKEYDATA *bk;
- db_indx_t offset, len;
+ DB_ENV *dbenv;
+ db_indx_t *inp, offset, len;
+
+ dbenv = dbp->dbenv;
DB_ASSERT(himarkp != NULL);
+ inp = P_INP(dbp, h);
/*
* Check that the inp array, which grows from the beginning of the
* page forward, has not collided with the data, which grow from the
* end of the page backward.
*/
- if (h->inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) {
+ if (inp + i >= (db_indx_t *)((u_int8_t *)h + *himarkp)) {
/* We've collided with the data. We need to bail. */
- EPRINT((dbp->dbenv,
- "Page %lu entries listing %lu overlaps data",
+ EPRINT((dbenv, "Page %lu: entries listing %lu overlaps data",
(u_long)pgno, (u_long)i));
return (DB_VERIFY_FATAL);
}
- offset = h->inp[i];
+ offset = inp[i];
/*
* Check that the item offset is reasonable: it points somewhere
* after the inp array and before the end of the page.
*/
- if (offset <= INP_OFFSET(h, i) || offset > dbp->pgsize) {
- EPRINT((dbp->dbenv,
- "Bad offset %lu at page %lu index %lu",
- (u_long)offset, (u_long)pgno, (u_long)i));
+ if (offset <= INP_OFFSET(dbp, h, i) || offset > dbp->pgsize) {
+ EPRINT((dbenv, "Page %lu: bad offset %lu at page index %lu",
+ (u_long)pgno, (u_long)offset, (u_long)i));
return (DB_VERIFY_BAD);
}
@@ -1808,7 +1907,7 @@ __db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
/*
* Check that the item length remains on-page.
*/
- bk = GET_BKEYDATA(h, i);
+ bk = GET_BKEYDATA(dbp, h, i);
/*
* We need to verify the type of the item here;
@@ -1826,16 +1925,16 @@ __db_vrfy_inpitem(dbp, h, pgno, i, is_btree, flags, himarkp, offsetp)
len = BOVERFLOW_SIZE;
break;
default:
- EPRINT((dbp->dbenv,
- "Item %lu on page %lu of unrecognizable type",
- i, pgno));
+ EPRINT((dbenv,
+ "Page %lu: item %lu of unrecognizable type",
+ (u_long)pgno, (u_long)i));
return (DB_VERIFY_BAD);
}
if ((size_t)(offset + len) > dbp->pgsize) {
- EPRINT((dbp->dbenv,
- "Item %lu on page %lu extends past page boundary",
- (u_long)i, (u_long)pgno));
+ EPRINT((dbenv,
+ "Page %lu: item %lu extends past page boundary",
+ (u_long)pgno, (u_long)i));
return (DB_VERIFY_BAD);
}
}
@@ -1861,9 +1960,11 @@ __db_vrfy_duptype(dbp, vdp, pgno, flags)
db_pgno_t pgno;
u_int32_t flags;
{
+ DB_ENV *dbenv;
VRFY_PAGEINFO *pip;
int ret, isbad;
+ dbenv = dbp->dbenv;
isbad = 0;
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
@@ -1873,8 +1974,8 @@ __db_vrfy_duptype(dbp, vdp, pgno, flags)
case P_IBTREE:
case P_LDUP:
if (!LF_ISSET(ST_DUPSORT)) {
- EPRINT((dbp->dbenv,
- "Sorted duplicate set at page %lu in unsorted-dup database",
+ EPRINT((dbenv,
+ "Page %lu: sorted duplicate set in unsorted-dup database",
(u_long)pgno));
isbad = 1;
}
@@ -1882,21 +1983,29 @@ __db_vrfy_duptype(dbp, vdp, pgno, flags)
case P_IRECNO:
case P_LRECNO:
if (LF_ISSET(ST_DUPSORT)) {
- EPRINT((dbp->dbenv,
- "Unsorted duplicate set at page %lu in sorted-dup database",
+ EPRINT((dbenv,
+ "Page %lu: unsorted duplicate set in sorted-dup database",
(u_long)pgno));
isbad = 1;
}
break;
default:
- EPRINT((dbp->dbenv,
- "Duplicate page %lu of inappropriate type %lu",
- (u_long)pgno, (u_long)pip->type));
+ /*
+ * If the page is entirely zeroed, its pip->type will be a lie
+ * (we assumed it was a hash page, as they're allowed to be
+ * zeroed); handle this case specially.
+ */
+ if (F_ISSET(pip, VRFY_IS_ALLZEROES))
+ ZEROPG_ERR_PRINT(dbenv, pgno, "duplicate page");
+ else
+ EPRINT((dbenv,
+ "Page %lu: duplicate page of inappropriate type %lu",
+ (u_long)pgno, (u_long)pip->type));
isbad = 1;
break;
}
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((ret = __db_vrfy_putpageinfo(dbenv, vdp, pip)) != 0)
return (ret);
return (isbad == 1 ? DB_VERIFY_BAD : 0);
}
@@ -1934,14 +2043,17 @@ __db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags)
int (*callback) __P((void *, const void *));
u_int32_t flags;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
int ret, t_ret;
+ mpf = dbp->mpf;
+
if (pgno == PGNO_INVALID || !IS_VALID_PGNO(pgno))
return (DB_VERIFY_BAD);
/* We have a plausible page. Try it. */
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
switch (TYPE(h)) {
@@ -1972,7 +2084,7 @@ __db_salvage_duptree(dbp, vdp, pgno, key, handle, callback, flags)
/* NOTREACHED */
}
-err: if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0 && ret == 0)
+err: if ((t_ret = mpf->put(mpf, h, 0)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
@@ -1994,16 +2106,18 @@ __db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
BTMETA *btmeta;
DB *pgset;
DBC *pgsc;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t p, meta_pgno;
int ret, err_ret;
- err_ret = 0;
- pgsc = NULL;
pgset = NULL;
+ pgsc = NULL;
+ mpf = dbp->mpf;
+ err_ret = 0;
meta_pgno = PGNO_BASE_MD;
- if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &h)) != 0)
return (ret);
if (TYPE(h) == P_BTREEMETA)
@@ -2028,7 +2142,7 @@ __db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
/* We think we've got subdbs. Mark it so. */
*hassubsp = 1;
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
return (ret);
/*
@@ -2048,7 +2162,7 @@ __db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
goto err;
while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
- if ((ret = memp_fget(dbp->mpf, &p, 0, &h)) != 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &h)) != 0) {
err_ret = ret;
continue;
}
@@ -2061,7 +2175,7 @@ __db_salvage_subdbs(dbp, vdp, handle, callback, flags, hassubsp)
else if ((ret = __db_salvage_subdbpg(
dbp, vdp, h, handle, callback, flags)) != 0)
err_ret = ret;
-nextpg: if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+nextpg: if ((ret = mpf->put(mpf, h, 0)) != 0)
err_ret = ret;
}
@@ -2079,7 +2193,7 @@ err: if (pgsc != NULL)
(void)pgsc->c_close(pgsc);
if (pgset != NULL)
(void)pgset->close(pgset, 0);
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
return (ret);
}
@@ -2087,12 +2201,8 @@ err: if (pgsc != NULL)
* __db_salvage_subdbpg --
* Given a known-good leaf page in the master database, salvage all
* leaf pages corresponding to each subdb.
- *
- * PUBLIC: int __db_salvage_subdbpg
- * PUBLIC: __P((DB *, VRFY_DBINFO *, PAGE *, void *,
- * PUBLIC: int (*)(void *, const void *), u_int32_t));
*/
-int
+static int
__db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
DB *dbp;
VRFY_DBINFO *vdp;
@@ -2106,16 +2216,20 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
DB *pgset;
DBC *pgsc;
DBT key;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
PAGE *subpg;
db_indx_t i;
db_pgno_t meta_pgno, p;
int ret, err_ret, t_ret;
char *subdbname;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
ret = err_ret = 0;
subdbname = NULL;
- if ((ret = __db_vrfy_pgset(dbp->dbenv, dbp->pgsize, &pgset)) != 0)
+ if ((ret = __db_vrfy_pgset(dbenv, dbp->pgsize, &pgset)) != 0)
return (ret);
/*
@@ -2123,8 +2237,8 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
* corresponding to that entry.
*/
for (i = 0; i < NUM_ENT(master); i += P_INDX) {
- bkkey = GET_BKEYDATA(master, i);
- bkdata = GET_BKEYDATA(master, i + O_INDX);
+ bkkey = GET_BKEYDATA(dbp, master, i);
+ bkdata = GET_BKEYDATA(dbp, master, i + O_INDX);
/* Get the subdatabase name. */
if (B_TYPE(bkkey->type) == B_OVERFLOW) {
@@ -2140,13 +2254,13 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
}
/* Nul-terminate it. */
- if ((ret = __os_realloc(dbp->dbenv,
- key.size + 1, NULL, &subdbname)) != 0)
+ if ((ret = __os_realloc(dbenv,
+ key.size + 1, &subdbname)) != 0)
goto err;
subdbname[key.size] = '\0';
} else if (B_TYPE(bkkey->type == B_KEYDATA)) {
- if ((ret = __os_realloc(dbp->dbenv,
- bkkey->len + 1, NULL, &subdbname)) != 0)
+ if ((ret = __os_realloc(dbenv,
+ bkkey->len + 1, &subdbname)) != 0)
goto err;
memcpy(subdbname, bkkey->data, bkkey->len);
subdbname[bkkey->len] = '\0';
@@ -2159,9 +2273,15 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
}
memcpy(&meta_pgno, bkdata->data, sizeof(db_pgno_t));
+ /*
+ * Subdatabase meta pgnos are stored in network byte
+ * order for cross-endian compatibility. Swap if appropriate.
+ */
+ DB_NTOHL(&meta_pgno);
+
/* If we can't get the subdb meta page, just skip the subdb. */
if (!IS_VALID_PGNO(meta_pgno) ||
- (ret = memp_fget(dbp->mpf, &meta_pgno, 0, &subpg)) != 0) {
+ (ret = mpf->get(mpf, &meta_pgno, 0, &subpg)) != 0) {
err_ret = ret;
continue;
}
@@ -2177,7 +2297,7 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
if ((ret =
__db_vrfy_common(dbp, vdp, subpg, meta_pgno, flags)) != 0) {
err_ret = ret;
- (void)memp_fput(dbp->mpf, subpg, 0);
+ (void)mpf->put(mpf, subpg, 0);
continue;
}
switch (TYPE(subpg)) {
@@ -2185,7 +2305,7 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
if ((ret = __bam_vrfy_meta(dbp,
vdp, (BTMETA *)subpg, meta_pgno, flags)) != 0) {
err_ret = ret;
- (void)memp_fput(dbp->mpf, subpg, 0);
+ (void)mpf->put(mpf, subpg, 0);
continue;
}
break;
@@ -2193,7 +2313,7 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
if ((ret = __ham_vrfy_meta(dbp,
vdp, (HMETA *)subpg, meta_pgno, flags)) != 0) {
err_ret = ret;
- (void)memp_fput(dbp->mpf, subpg, 0);
+ (void)mpf->put(mpf, subpg, 0);
continue;
}
break;
@@ -2204,7 +2324,7 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
/* NOTREACHED */
}
- if ((ret = memp_fput(dbp->mpf, subpg, 0)) != 0) {
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0) {
err_ret = ret;
continue;
}
@@ -2223,14 +2343,14 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
if ((ret = pgset->cursor(pgset, NULL, &pgsc, 0)) != 0)
goto err;
while ((ret = __db_vrfy_pgset_next(pgsc, &p)) == 0) {
- if ((ret = memp_fget(dbp->mpf, &p, 0, &subpg)) != 0) {
+ if ((ret = mpf->get(mpf, &p, 0, &subpg)) != 0) {
err_ret = ret;
continue;
}
if ((ret = __db_salvage(dbp, vdp, p, subpg,
handle, callback, flags)) != 0)
err_ret = ret;
- if ((ret = memp_fput(dbp->mpf, subpg, 0)) != 0)
+ if ((ret = mpf->put(mpf, subpg, 0)) != 0)
err_ret = ret;
}
@@ -2243,7 +2363,7 @@ __db_salvage_subdbpg(dbp, vdp, master, handle, callback, flags)
goto err;
}
err: if (subdbname)
- __os_free(subdbname, 0);
+ __os_free(dbenv, subdbname);
if ((t_ret = pgset->close(pgset, 0)) != 0)
ret = t_ret;
@@ -2268,10 +2388,13 @@ __db_meta2pgset(dbp, vdp, pgno, flags, pgset)
u_int32_t flags;
DB *pgset;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
int ret, t_ret;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ mpf = dbp->mpf;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
switch (TYPE(h)) {
@@ -2286,7 +2409,7 @@ __db_meta2pgset(dbp, vdp, pgno, flags, pgset)
break;
}
- if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
return (t_ret);
return (ret);
}
@@ -2305,7 +2428,6 @@ __db_guesspgsize(dbenv, fhp)
size_t nr;
u_int32_t guess;
u_int8_t type;
- int ret;
for (guess = DB_MAX_PGSIZE; guess >= DB_MIN_PGSIZE; guess >>= 1) {
/*
@@ -2321,11 +2443,11 @@ __db_guesspgsize(dbenv, fhp)
* our previous guess; that last one was probably the page size.
*/
for (i = 1; i <= 3; i++) {
- if ((ret = __os_seek(dbenv, fhp, guess,
- i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET)) != 0)
+ if (__os_seek(dbenv, fhp, guess,
+ i, SSZ(DBMETA, type), 0, DB_OS_SEEK_SET) != 0)
break;
- if ((ret = __os_read(dbenv,
- fhp, &type, 1, &nr)) != 0 || nr == 0)
+ if (__os_read(dbenv,
+ fhp, &type, 1, &nr) != 0 || nr == 0)
break;
if (type == P_INVALID || type >= P_PAGETYPE_MAX)
return (guess << 1);
diff --git a/bdb/db/db_vrfyutil.c b/bdb/db/db_vrfyutil.c
index 89dccdcc760..44344ceed11 100644
--- a/bdb/db/db_vrfyutil.c
+++ b/bdb/db/db_vrfyutil.c
@@ -1,16 +1,16 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_vrfyutil.c,v 11.11 2000/11/28 21:36:04 bostic Exp $
+ * $Id: db_vrfyutil.c,v 11.29 2002/08/08 03:57:50 bostic Exp $
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_vrfyutil.c,v 11.11 2000/11/28 21:36:04 bostic Exp $";
+static const char revid[] = "$Id: db_vrfyutil.c,v 11.29 2002/08/08 03:57:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,10 +20,11 @@ static const char revid[] = "$Id: db_vrfyutil.c,v 11.11 2000/11/28 21:36:04 bost
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_verify.h"
-#include "db_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/db_am.h"
+static int __db_vrfy_pageinfo_create __P((DB_ENV *, VRFY_PAGEINFO **));
static int __db_vrfy_pgset_iinc __P((DB *, db_pgno_t, int));
/*
@@ -34,7 +35,7 @@ static int __db_vrfy_pgset_iinc __P((DB *, db_pgno_t, int));
* PUBLIC: __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
*/
int
-__db_vrfy_dbinfo_create (dbenv, pgsize, vdpp)
+__db_vrfy_dbinfo_create(dbenv, pgsize, vdpp)
DB_ENV *dbenv;
u_int32_t pgsize;
VRFY_DBINFO **vdpp;
@@ -53,14 +54,14 @@ __db_vrfy_dbinfo_create (dbenv, pgsize, vdpp)
if ((ret = db_create(&cdbp, dbenv, 0)) != 0)
goto err;
- if ((ret = cdbp->set_flags(cdbp, DB_DUP | DB_DUPSORT)) != 0)
+ if ((ret = cdbp->set_flags(cdbp, DB_DUP)) != 0)
goto err;
if ((ret = cdbp->set_pagesize(cdbp, pgsize)) != 0)
goto err;
if ((ret =
- cdbp->open(cdbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ cdbp->open(cdbp, NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
goto err;
if ((ret = db_create(&pgdbp, dbenv, 0)) != 0)
@@ -69,8 +70,8 @@ __db_vrfy_dbinfo_create (dbenv, pgsize, vdpp)
if ((ret = pgdbp->set_pagesize(pgdbp, pgsize)) != 0)
goto err;
- if ((ret =
- pgdbp->open(pgdbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
+ if ((ret = pgdbp->open(pgdbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) != 0)
goto err;
if ((ret = __db_vrfy_pgset(dbenv, pgsize, &pgset)) != 0)
@@ -90,7 +91,7 @@ err: if (cdbp != NULL)
if (pgdbp != NULL)
(void)pgdbp->close(pgdbp, 0);
if (vdp != NULL)
- __os_free(vdp, sizeof(VRFY_DBINFO));
+ __os_free(dbenv, vdp);
return (ret);
}
@@ -99,10 +100,11 @@ err: if (cdbp != NULL)
* Destructor for VRFY_DBINFO. Destroys VRFY_PAGEINFOs and deallocates
* structure.
*
- * PUBLIC: int __db_vrfy_dbinfo_destroy __P((VRFY_DBINFO *));
+ * PUBLIC: int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *));
*/
int
-__db_vrfy_dbinfo_destroy(vdp)
+__db_vrfy_dbinfo_destroy(dbenv, vdp)
+ DB_ENV *dbenv;
VRFY_DBINFO *vdp;
{
VRFY_CHILDINFO *c, *d;
@@ -112,7 +114,7 @@ __db_vrfy_dbinfo_destroy(vdp)
for (c = LIST_FIRST(&vdp->subdbs); c != NULL; c = d) {
d = LIST_NEXT(c, links);
- __os_free(c, 0);
+ __os_free(NULL, c);
}
if ((t_ret = vdp->pgdbp->close(vdp->pgdbp, 0)) != 0)
@@ -126,7 +128,7 @@ __db_vrfy_dbinfo_destroy(vdp)
DB_ASSERT(LIST_FIRST(&vdp->activepips) == NULL);
- __os_free(vdp, sizeof(VRFY_DBINFO));
+ __os_free(dbenv, vdp);
return (ret);
}
@@ -192,7 +194,7 @@ __db_vrfy_getpageinfo(vdp, pgno, pipp)
return (ret);
/* Case 3 */
- if ((ret = __db_vrfy_pageinfo_create(&pip)) != 0)
+ if ((ret = __db_vrfy_pageinfo_create(pgdbp->dbenv, &pip)) != 0)
return (ret);
LIST_INSERT_HEAD(&vdp->activepips, pip, links);
@@ -208,10 +210,12 @@ found: pip->pi_refcount++;
* __db_vrfy_putpageinfo --
* Put back a VRFY_PAGEINFO that we're done with.
*
- * PUBLIC: int __db_vrfy_putpageinfo __P((VRFY_DBINFO *, VRFY_PAGEINFO *));
+ * PUBLIC: int __db_vrfy_putpageinfo __P((DB_ENV *,
+ * PUBLIC: VRFY_DBINFO *, VRFY_PAGEINFO *));
*/
int
-__db_vrfy_putpageinfo(vdp, pip)
+__db_vrfy_putpageinfo(dbenv, vdp, pip)
+ DB_ENV *dbenv;
VRFY_DBINFO *vdp;
VRFY_PAGEINFO *pip;
{
@@ -255,7 +259,7 @@ __db_vrfy_putpageinfo(vdp, pip)
#endif
DB_ASSERT(pip->pi_refcount == 0);
- __os_free(pip, 0);
+ __os_ufree(dbenv, pip);
return (0);
}
@@ -280,7 +284,8 @@ __db_vrfy_pgset(dbenv, pgsize, dbpp)
return (ret);
if ((ret = dbp->set_pagesize(dbp, pgsize)) != 0)
goto err;
- if ((ret = dbp->open(dbp, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) == 0)
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0600)) == 0)
*dbpp = dbp;
else
err: (void)dbp->close(dbp, 0);
@@ -382,7 +387,7 @@ __db_vrfy_pgset_iinc(dbp, pgno, i)
F_SET(&data, DB_DBT_USERMEM);
if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) {
- DB_ASSERT(data.size = sizeof(int));
+ DB_ASSERT(data.size == sizeof(int));
memcpy(&val, data.data, sizeof(int));
} else if (ret != DB_NOTFOUND)
return (ret);
@@ -463,8 +468,10 @@ __db_vrfy_childput(vdp, pgno, cip)
db_pgno_t pgno;
VRFY_CHILDINFO *cip;
{
- DBT key, data;
DB *cdbp;
+ DBC *cc;
+ DBT key, data;
+ VRFY_CHILDINFO *oldcip;
int ret;
cdbp = vdp->cdbp;
@@ -474,17 +481,44 @@ __db_vrfy_childput(vdp, pgno, cip)
key.data = &pgno;
key.size = sizeof(db_pgno_t);
+ /*
+ * We want to avoid adding multiple entries for a single child page;
+ * we only need to verify each child once, even if a child (such
+ * as an overflow key) is multiply referenced.
+ *
+ * However, we also need to make sure that when walking the list
+ * of children, we encounter them in the order they're referenced
+ * on a page. (This permits us, for example, to verify the
+ * prev_pgno/next_pgno chain of Btree leaf pages.)
+ *
+ * Check the child database to make sure that this page isn't
+ * already a child of the specified page number. If it's not,
+ * put it at the end of the duplicate set.
+ */
+ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
+ return (ret);
+ for (ret = __db_vrfy_ccset(cc, pgno, &oldcip); ret == 0;
+ ret = __db_vrfy_ccnext(cc, &oldcip))
+ if (oldcip->pgno == cip->pgno) {
+ /*
+ * Found a matching child. Return without
+ * putting it again.
+ */
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+ return (0);
+ }
+ if (ret != DB_NOTFOUND) {
+ (void)__db_vrfy_ccclose(cc);
+ return (ret);
+ }
+ if ((ret = __db_vrfy_ccclose(cc)) != 0)
+ return (ret);
+
data.data = cip;
data.size = sizeof(VRFY_CHILDINFO);
- /*
- * Don't add duplicate (data) entries for a given child, and accept
- * DB_KEYEXIST as a successful return; we only need to verify
- * each child once, even if a child (such as an overflow key) is
- * multiply referenced.
- */
- ret = cdbp->put(cdbp, NULL, &key, &data, DB_NODUPDATA);
- return (ret == DB_KEYEXIST ? 0 : ret);
+ return (cdbp->put(cdbp, NULL, &key, &data, 0));
}
/*
@@ -568,19 +602,26 @@ __db_vrfy_ccclose(dbc)
/*
* __db_vrfy_pageinfo_create --
* Constructor for VRFY_PAGEINFO; allocates and initializes.
- *
- * PUBLIC: int __db_vrfy_pageinfo_create __P((VRFY_PAGEINFO **));
*/
-int
-__db_vrfy_pageinfo_create(pgipp)
+static int
+__db_vrfy_pageinfo_create(dbenv, pgipp)
+ DB_ENV *dbenv;
VRFY_PAGEINFO **pgipp;
{
VRFY_PAGEINFO *pgip;
int ret;
- if ((ret = __os_calloc(NULL,
- 1, sizeof(VRFY_PAGEINFO), (void **)&pgip)) != 0)
+ /*
+ * pageinfo structs are sometimes allocated here and sometimes
+ * allocated by fetching them from a database with DB_DBT_MALLOC.
+ * There's no easy way for the destructor to tell which was
+ * used, and so we always allocate with __os_umalloc so we can free
+ * with __os_ufree.
+ */
+ if ((ret = __os_umalloc(dbenv,
+ sizeof(VRFY_PAGEINFO), (void **)&pgip)) != 0)
return (ret);
+ memset(pgip, 0, sizeof(VRFY_PAGEINFO));
DB_ASSERT(pgip->pi_refcount == 0);
@@ -607,7 +648,8 @@ __db_salvage_init(vdp)
if ((ret = dbp->set_pagesize(dbp, 1024)) != 0)
goto err;
- if ((ret = dbp->open(dbp, NULL, NULL, DB_BTREE, DB_CREATE, 0)) != 0)
+ if ((ret = dbp->open(dbp,
+ NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0)) != 0)
goto err;
vdp->salvage_pages = dbp;
diff --git a/bdb/db185/db185.c b/bdb/db185/db185.c
index 84327542485..99d37bcf341 100644
--- a/bdb/db185/db185.c
+++ b/bdb/db185/db185.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db185.c,v 11.15 2001/01/23 21:27:03 bostic Exp $";
+ "$Id: db185.c,v 11.28 2002/05/09 01:55:14 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -38,6 +38,15 @@ static int db185_put __P((const DB185 *, DBT185 *, const DBT185 *, u_int));
static int db185_seq __P((const DB185 *, DBT185 *, DBT185 *, u_int));
static int db185_sync __P((const DB185 *, u_int));
+/*
+ * EXTERN: #ifdef _DB185_INT_H_
+ * EXTERN: DB185 *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #else
+ * EXTERN: DB *__db185_open
+ * EXTERN: __P((const char *, int, int, DBTYPE, const void *));
+ * EXTERN: #endif
+ */
DB185 *
__db185_open(file, oflags, mode, type, openinfo)
const char *file;
@@ -153,7 +162,7 @@ __db185_open(file, oflags, mode, type, openinfo)
if (oflags & O_CREAT && __os_exists(file, NULL) != 0)
if (__os_openhandle(NULL, file,
oflags, mode, &fh) == 0)
- (void)__os_closehandle(&fh);
+ (void)__os_closehandle(NULL, &fh);
(void)dbp->set_re_source(dbp, file);
if (O_RDONLY)
@@ -220,15 +229,12 @@ __db185_open(file, oflags, mode, type, openinfo)
* to the underlying DB structure, and vice-versa. This has to be
* done BEFORE the DB::open method call because the hash callback
* is exercised as part of hash database initialiation.
- *
- * XXX
- * Overload the cj_internal field for this purpose.
*/
db185p->dbp = dbp;
- dbp->cj_internal = db185p;
+ dbp->api_internal = db185p;
/* Open the database. */
- if ((ret = dbp->open(dbp,
+ if ((ret = dbp->open(dbp, NULL,
file, NULL, type, __db_oflags(oflags), mode)) != 0)
goto err;
@@ -238,10 +244,10 @@ __db185_open(file, oflags, mode, type, openinfo)
return (db185p);
-einval: ret = EINVAL;
-
-err: if (db185p != NULL)
- __os_free(db185p, sizeof(DB185));
+err: if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ if (db185p != NULL)
+ __os_free(NULL, db185p);
if (dbp != NULL)
(void)dbp->close(dbp, 0);
@@ -260,11 +266,13 @@ db185_close(db185p)
ret = dbp->close(dbp, 0);
- __os_free(db185p, sizeof(DB185));
+ __os_free(NULL, db185p);
if (ret == 0)
return (0);
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
__os_set_errno(ret);
return (-1);
}
@@ -299,11 +307,10 @@ db185_del(db185p, key185, flags)
return (1);
}
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
__os_set_errno(ret);
return (-1);
-
-einval: __os_set_errno(EINVAL);
- return (-1);
}
static int
@@ -318,6 +325,8 @@ db185_fd(db185p)
if ((ret = dbp->fd(dbp, &fd)) == 0)
return (fd);
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+ ret = EINVAL;
__os_set_errno(ret);
return (-1);
}
@@ -354,11 +363,10 @@ db185_get(db185p, key185, data185, flags)
return (1);
}
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
__os_set_errno(ret);
return (-1);
-
-einval: __os_set_errno(EINVAL);
- return (-1);
}
static int
@@ -371,7 +379,7 @@ db185_put(db185p, key185, data185, flags)
DB *dbp;
DBC *dbcp_put;
DBT key, data;
- int ret;
+ int ret, t_ret;
dbp = db185p->dbp;
@@ -394,23 +402,18 @@ db185_put(db185p, key185, data185, flags)
if (dbp->type != DB_RECNO)
goto einval;
- if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0) {
- __os_set_errno(ret);
- return (-1);
- }
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp_put, 0)) != 0)
+ break;
if ((ret =
- dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) != 0) {
- (void)dbcp_put->c_close(dbcp_put);
- __os_set_errno(ret);
- return (-1);
+ dbcp_put->c_get(dbcp_put, &key, &data, DB_SET)) == 0) {
+ memset(&data, 0, sizeof(data));
+ data.data = data185->data;
+ data.size = data185->size;
+ ret = dbcp_put->c_put(dbcp_put, &key, &data,
+ flags == R_IAFTER ? DB_AFTER : DB_BEFORE);
}
- memset(&data, 0, sizeof(data));
- data.data = data185->data;
- data.size = data185->size;
- ret = dbcp_put->c_put(dbcp_put,
- &key, &data, flags == R_IAFTER ? DB_AFTER : DB_BEFORE);
- (void)dbcp_put->c_close(dbcp_put);
- __os_set_errno(ret);
+ if ((t_ret = dbcp_put->c_close(dbcp_put)) != 0 && ret == 0)
+ ret = t_ret;
break;
case R_NOOVERWRITE:
ret = dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE);
@@ -436,10 +439,10 @@ db185_put(db185p, key185, data185, flags)
case DB_KEYEXIST:
return (1);
}
- __os_set_errno(ret);
- return (-1);
-einval: __os_set_errno(EINVAL);
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
+ __os_set_errno(ret);
return (-1);
}
@@ -496,11 +499,10 @@ db185_seq(db185p, key185, data185, flags)
return (1);
}
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
__os_set_errno(ret);
return (-1);
-
-einval: __os_set_errno(EINVAL);
- return (-1);
}
static int
@@ -534,11 +536,10 @@ db185_sync(db185p, flags)
if ((ret = dbp->sync(dbp, 0)) == 0)
return (0);
+ if (ret < 0) /* DB 1.85 can't handle DB 2.0's errors. */
+einval: ret = EINVAL;
__os_set_errno(ret);
return (-1);
-
-einval: __os_set_errno(EINVAL);
- return (-1);
}
static void
@@ -564,7 +565,7 @@ db185_compare(dbp, a, b)
DB *dbp;
const DBT *a, *b;
{
- return (((DB185 *)dbp->cj_internal)->compare(a, b));
+ return (((DB185 *)dbp->api_internal)->compare(a, b));
}
/*
@@ -576,7 +577,7 @@ db185_prefix(dbp, a, b)
DB *dbp;
const DBT *a, *b;
{
- return (((DB185 *)dbp->cj_internal)->prefix(a, b));
+ return (((DB185 *)dbp->api_internal)->prefix(a, b));
}
/*
@@ -589,5 +590,5 @@ db185_hash(dbp, key, len)
const void *key;
u_int32_t len;
{
- return (((DB185 *)dbp->cj_internal)->hash(key, (size_t)len));
+ return (((DB185 *)dbp->api_internal)->hash(key, (size_t)len));
}
diff --git a/bdb/db185/db185_int.h b/bdb/db185/db185_int.in
index 172019d3f00..a4a3ce19c17 100644
--- a/bdb/db185/db185_int.h
+++ b/bdb/db185/db185_int.in
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,11 +36,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: db185_int.h,v 11.7 2001/01/22 22:22:46 krinsky Exp $
+ * $Id: db185_int.in,v 11.12 2002/01/11 15:51:51 bostic Exp $
*/
-#ifndef _DB185_H_
-#define _DB185_H_
+#ifndef _DB185_INT_H_
+#define _DB185_INT_H_
/* Routine flags. */
#define R_CURSOR 1 /* del, put, seq */
@@ -126,4 +126,4 @@ typedef struct {
u_char bval; /* delimiting byte (variable-length records */
char *bfname; /* btree file name */
} RECNOINFO;
-#endif /* !_DB185_H_ */
+#endif /* !_DB185_INT_H_ */
diff --git a/bdb/db_archive/db_archive.c b/bdb/db_archive/db_archive.c
index 7c91e42f390..dc8718e4c03 100644
--- a/bdb/db_archive/db_archive.c
+++ b/bdb/db_archive/db_archive.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,29 +9,25 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_archive.c,v 11.18 2001/01/18 18:36:56 bostic Exp $";
+ "$Id: db_archive.c,v 11.36 2002/03/28 20:13:34 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#include <stdlib.h>
#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#endif
#include "db_int.h"
-#include "common_ext.h"
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-const char
- *progname = "db_archive"; /* Program name. */
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -40,16 +36,19 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
u_int32_t flags;
int ch, e_close, exitval, ret, verbose;
- char **file, *home, **list;
+ char **file, *home, **list, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
flags = 0;
e_close = exitval = verbose = 0;
- home = NULL;
- while ((ch = getopt(argc, argv, "ah:lsVv")) != EOF)
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "ah:lP:sVv")) != EOF)
switch (ch) {
case 'a':
LF_SET(DB_ARCH_ABS);
@@ -60,24 +59,33 @@ main(argc, argv)
case 'l':
LF_SET(DB_ARCH_LOG);
break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
case 's':
LF_SET(DB_ARCH_DATA);
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case 'v':
verbose = 1;
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 0)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -99,6 +107,11 @@ main(argc, argv)
if (verbose)
(void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
/*
* If attaching to a pre-existing environment fails, create a
* private one and try again.
@@ -112,8 +125,8 @@ main(argc, argv)
}
/* Get the list of names. */
- if ((ret = log_archive(dbenv, &list, flags, NULL)) != 0) {
- dbenv->err(dbenv, ret, "log_archive");
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
goto shutdown;
}
@@ -121,7 +134,7 @@ main(argc, argv)
if (list != NULL) {
for (file = list; *file != NULL; ++file)
printf("%s\n", *file);
- __os_free(list, 0);
+ free(list);
}
if (0) {
@@ -136,18 +149,20 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- (void)fprintf(stderr, "usage: db_archive [-alsVv] [-h home]\n");
- exit (1);
+ (void)fprintf(stderr,
+ "usage: db_archive [-alsVv] [-h home] [-P password]\n");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -159,6 +174,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_checkpoint/db_checkpoint.c b/bdb/db_checkpoint/db_checkpoint.c
index c7d16e02334..a59572c5f76 100644
--- a/bdb/db_checkpoint/db_checkpoint.c
+++ b/bdb/db_checkpoint/db_checkpoint.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_checkpoint.c,v 11.25 2001/01/18 18:36:57 bostic Exp $";
+ "$Id: db_checkpoint.c,v 11.46 2002/08/08 03:50:31 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -31,25 +31,17 @@ static const char revid[] =
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "common_ext.h"
-#include "clib_ext.h"
-
-char *check __P((DB_ENV *, long, long));
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
-DB_ENV *dbenv;
-const char
- *progname = "db_checkpoint"; /* Program name. */
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -58,13 +50,16 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
time_t now;
long argval;
u_int32_t flags, kbytes, minutes, seconds;
int ch, e_close, exitval, once, ret, verbose;
- char *home, *logfile;
+ char *home, *logfile, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
/*
* !!!
@@ -76,8 +71,8 @@ main(argc, argv)
kbytes = minutes = 0;
e_close = exitval = once = verbose = 0;
flags = 0;
- home = logfile = NULL;
- while ((ch = getopt(argc, argv, "1h:k:L:p:Vv")) != EOF)
+ home = logfile = passwd = NULL;
+ while ((ch = getopt(argc, argv, "1h:k:L:P:p:Vv")) != EOF)
switch (ch) {
case '1':
once = 1;
@@ -87,40 +82,50 @@ main(argc, argv)
home = optarg;
break;
case 'k':
- (void)__db_getlong(NULL, progname,
- optarg, 1, (long)MAX_UINT32_T, &argval);
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
kbytes = argval;
break;
case 'L':
logfile = optarg;
break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
case 'p':
- (void)__db_getlong(NULL, progname,
- optarg, 1, (long)MAX_UINT32_T, &argval);
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
minutes = argval;
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case 'v':
verbose = 1;
break;
case '?':
default:
- usage();
- goto shutdown;
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 0)
- usage();
+ return (usage());
if (once == 0 && kbytes == 0 && minutes == 0) {
(void)fprintf(stderr,
"%s: at least one of -1, -k and -p must be specified\n",
progname);
- exit (1);
+ return (EXIT_FAILURE);
}
/* Handle possible interruptions. */
@@ -144,6 +149,11 @@ main(argc, argv)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
/* Initialize the environment. */
if ((ret = dbenv->open(dbenv,
home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
@@ -152,10 +162,10 @@ main(argc, argv)
}
/* Register the standard pgin/pgout functions, in case we do I/O. */
- if ((ret =
- memp_register(dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
dbenv->err(dbenv, ret,
- "failed to register access method functions");
+ "DB_ENV->memp_register: failed to register access method functions");
goto shutdown;
}
@@ -171,15 +181,8 @@ main(argc, argv)
dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
}
- ret = txn_checkpoint(dbenv, kbytes, minutes, flags);
- while (ret == DB_INCOMPLETE) {
- if (verbose)
- dbenv->errx(dbenv,
- "checkpoint did not finish, retrying\n");
- (void)__os_sleep(dbenv, 2, 0);
- ret = txn_checkpoint(dbenv, 0, 0, flags);
- }
- if (ret != 0) {
+ if ((ret = dbenv->txn_checkpoint(dbenv,
+ kbytes, minutes, flags)) != 0) {
dbenv->err(dbenv, ret, "txn_checkpoint");
goto shutdown;
}
@@ -208,19 +211,21 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- (void)fprintf(stderr,
- "usage: db_checkpoint [-1Vv] [-h home] [-k kbytes] [-L file] [-p min]\n");
- exit(1);
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_checkpoint [-1Vv]",
+ "[-h home] [-k kbytes] [-L file] [-P password] [-p min]");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -232,6 +237,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_deadlock/db_deadlock.c b/bdb/db_deadlock/db_deadlock.c
index ac151db127a..523918b9ea4 100644
--- a/bdb/db_deadlock/db_deadlock.c
+++ b/bdb/db_deadlock/db_deadlock.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_deadlock.c,v 11.19 2001/01/18 18:36:57 bostic Exp $";
+ "$Id: db_deadlock.c,v 11.38 2002/08/08 03:50:32 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -31,19 +31,15 @@ static const char revid[] =
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#endif
#include "db_int.h"
-#include "clib_ext.h"
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-const char
- *progname = "db_deadlock"; /* Program name. */
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -52,36 +48,49 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
u_int32_t atype;
time_t now;
- long usecs;
- u_int32_t flags;
+ long secs, usecs;
int ch, e_close, exitval, ret, verbose;
- char *home, *logfile;
+ char *home, *logfile, *str;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
atype = DB_LOCK_DEFAULT;
home = logfile = NULL;
- usecs = 0;
- flags = 0;
+ secs = usecs = 0;
e_close = exitval = verbose = 0;
while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
switch (ch) {
case 'a':
switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
case 'o':
atype = DB_LOCK_OLDEST;
break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
case 'y':
atype = DB_LOCK_YOUNGEST;
break;
default:
- usage();
+ return (usage());
/* NOTREACHED */
}
if (optarg[1] != '\0')
- usage();
+ return (usage());
break;
case 'h':
home = optarg;
@@ -90,42 +99,40 @@ main(argc, argv)
logfile = optarg;
break;
case 't':
- (void)__db_getlong(NULL,
- progname, optarg, 1, LONG_MAX, &usecs);
- usecs *= 1000000;
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (usage());
+
break;
+
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case 'v':
verbose = 1;
break;
- case 'w':
- LF_SET(DB_LOCK_CONFLICT);
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 0)
- usage();
-
- if (usecs == 0 && !LF_ISSET(DB_LOCK_CONFLICT)) {
- fprintf(stderr,
- "%s: at least one of -t and -w must be specified\n",
- progname);
- exit(1);
- }
-
- /*
- * We detect every 100ms (100000 us) when we're running in
- * DB_LOCK_CONFLICT mode.
- */
- if (usecs == 0)
- usecs = 100000;
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -166,13 +173,15 @@ main(argc, argv)
dbenv->errx(dbenv, "running at %.24s", ctime(&now));
}
- if ((ret = lock_detect(dbenv, flags, atype, NULL)) != 0) {
- dbenv->err(dbenv, ret, "lock_detect");
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
goto shutdown;
}
- /* Make a pass every "usecs" usecs. */
- (void)__os_sleep(dbenv, 0, usecs);
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
}
if (0) {
@@ -193,19 +202,21 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- (void)fprintf(stderr,
- "usage: db_deadlock [-Vvw] [-a o | y] [-h home] [-L file] [-t sec]\n");
- exit(1);
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_deadlock [-Vv]",
+ "[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -217,6 +228,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_dump/db_dump.c b/bdb/db_dump/db_dump.c
index ba24dd6cc09..143884a3fa8 100644
--- a/bdb/db_dump/db_dump.c
+++ b/bdb/db_dump/db_dump.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_dump.c,v 11.41 2001/01/18 18:36:57 bostic Exp $";
+ "$Id: db_dump.c,v 11.80 2002/08/08 03:50:34 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -24,25 +24,17 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "lock.h"
-
-void configure __P((char *));
-int db_init __P((char *));
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+int db_init __P((DB_ENV *, char *, int, u_int32_t, int *));
int dump __P((DB *, int, int));
-int dump_sub __P((DB *, char *, int, int));
+int dump_sub __P((DB_ENV *, DB *, char *, int, int));
int is_sub __P((DB *, int *));
int main __P((int, char *[]));
int show_subs __P((DB *));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-const char
- *progname = "db_dump"; /* Program name. */
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -51,19 +43,25 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
DB *dbp;
+ u_int32_t cache;
int ch, d_close;
- int e_close, exitval;
- int lflag, nflag, pflag, ret, rflag, Rflag, subs, keyflag;
- char *dopt, *home, *subname;
+ int e_close, exitval, keyflag, lflag, nflag, pflag, private;
+ int ret, Rflag, rflag, resize, subs;
+ char *dopt, *home, *passwd, *subname;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
dbp = NULL;
d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
keyflag = 0;
- dopt = home = subname = NULL;
- while ((ch = getopt(argc, argv, "d:f:h:klNprRs:V")) != EOF)
+ cache = MEGABYTE;
+ private = 0;
+ dopt = home = passwd = subname = NULL;
+ while ((ch = getopt(argc, argv, "d:f:h:klNpP:rRs:V")) != EOF)
switch (ch) {
case 'd':
dopt = optarg;
@@ -72,7 +70,7 @@ main(argc, argv)
if (freopen(optarg, "w", stdout) == NULL) {
fprintf(stderr, "%s: %s: reopen: %s\n",
progname, optarg, strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
break;
case 'h':
@@ -86,11 +84,14 @@ main(argc, argv)
break;
case 'N':
nflag = 1;
- if ((ret = db_env_set_panicstate(0)) != 0) {
- fprintf(stderr,
- "%s: db_env_set_panicstate: %s\n",
- progname, db_strerror(ret));
- return (1);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
case 'p':
@@ -108,42 +109,42 @@ main(argc, argv)
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 1)
- usage();
+ return (usage());
if (dopt != NULL && pflag) {
fprintf(stderr,
"%s: the -d and -p options may not both be specified\n",
progname);
- exit (1);
+ return (EXIT_FAILURE);
}
if (lflag && subname != NULL) {
fprintf(stderr,
"%s: the -l and -s options may not both be specified\n",
progname);
- exit (1);
+ return (EXIT_FAILURE);
}
if (keyflag && rflag) {
fprintf(stderr, "%s: %s",
"the -k and -r or -R options may not both be specified\n",
progname);
- exit(1);
+ return (EXIT_FAILURE);
}
if (subname != NULL && rflag) {
fprintf(stderr, "%s: %s",
"the -s and -r or R options may not both be specified\n",
progname);
- exit(1);
+ return (EXIT_FAILURE);
}
/* Handle possible interruptions. */
@@ -153,7 +154,7 @@ main(argc, argv)
* Create an environment object and initialize it for error
* reporting.
*/
- if ((ret = db_env_create(&dbenv, 0)) != 0) {
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
goto err;
@@ -162,14 +163,24 @@ main(argc, argv)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
-
- if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
- dbenv->err(dbenv, ret, "set_mutexlocks");
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
goto err;
}
/* Initialize the environment. */
- if (db_init(home) != 0)
+ if (db_init(dbenv, home, rflag, cache, &private) != 0)
goto err;
/* Create the DB object and open the file. */
@@ -185,17 +196,31 @@ main(argc, argv)
*/
if (rflag) {
if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
- DB_SALVAGE | (Rflag ? DB_AGGRESSIVE : 0))) != 0)
+ DB_SALVAGE |
+ (Rflag ? DB_AGGRESSIVE : 0) |
+ (pflag ? DB_PRINTABLE : 0))) != 0)
goto err;
exitval = 0;
goto done;
}
- if ((ret = dbp->open(dbp,
+ if ((ret = dbp->open(dbp, NULL,
argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
dbp->err(dbp, ret, "open: %s", argv[0]);
goto err;
}
+ if (private != 0) {
+ if ((ret = __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
if (dopt != NULL) {
if (__db_dump(dbp, dopt, NULL)) {
@@ -217,7 +242,7 @@ main(argc, argv)
if (subname == NULL && is_sub(dbp, &subs))
goto err;
if (subs) {
- if (dump_sub(dbp, argv[0], pflag, keyflag))
+ if (dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
goto err;
} else
if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
@@ -231,7 +256,7 @@ err: exitval = 1;
}
done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
exitval = 1;
- dbp->err(dbp, ret, "close");
+ dbenv->err(dbenv, ret, "close");
}
if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
exitval = 1;
@@ -242,7 +267,7 @@ done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
/*
@@ -250,24 +275,36 @@ done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
* Initialize the environment.
*/
int
-db_init(home)
+db_init(dbenv, home, is_salvage, cache, is_privatep)
+ DB_ENV *dbenv;
char *home;
+ int is_salvage;
+ u_int32_t cache;
+ int *is_privatep;
{
int ret;
/*
- * Try and use the underlying environment when opening a database. We
- * wish to use the buffer pool so our information is as up-to-date as
- * possible, even if the mpool cache hasn't been flushed; we wish to
- * use the locking system, if present, so that we are safe to use with
- * transactions. (We don't need to use transactions explicitly, as
- * we're read-only.)
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
*
- * Note that in CDB, too, this will configure our environment
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
* appropriately, and our cursors will (correctly) do locking as CDB
* read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
*/
- if (dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0) == 0)
+ *is_privatep = 0;
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
return (0);
/*
@@ -280,7 +317,9 @@ db_init(home)
* an mpool region exists). Create one, but make it private so that
* no files are actually created.
*/
- if ((ret = dbenv->open(dbenv, home,
+ *is_privatep = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
return (0);
@@ -305,18 +344,20 @@ is_sub(dbp, yesno)
switch (dbp->type) {
case DB_BTREE:
case DB_RECNO:
- if ((ret = dbp->stat(dbp, &btsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
dbp->err(dbp, ret, "DB->stat");
return (ret);
}
*yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ free(btsp);
break;
case DB_HASH:
- if ((ret = dbp->stat(dbp, &hsp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
dbp->err(dbp, ret, "DB->stat");
return (ret);
}
*yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ free(hsp);
break;
case DB_QUEUE:
break;
@@ -332,7 +373,8 @@ is_sub(dbp, yesno)
* Dump out the records for a DB containing subdatabases.
*/
int
-dump_sub(parent_dbp, parent_name, pflag, keyflag)
+dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
DB *parent_dbp;
char *parent_name;
int pflag, keyflag;
@@ -369,7 +411,7 @@ dump_sub(parent_dbp, parent_name, pflag, keyflag)
free(subdb);
return (1);
}
- if ((ret = dbp->open(dbp,
+ if ((ret = dbp->open(dbp, NULL,
parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
dbp->err(dbp, ret,
"DB->open: %s:%s", parent_name, subdb);
@@ -449,7 +491,10 @@ dump(dbp, pflag, keyflag)
{
DBC *dbcp;
DBT key, data;
- int ret, is_recno;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
/*
* Get a cursor and step through the database, printing out each
@@ -460,47 +505,95 @@ dump(dbp, pflag, keyflag)
return (1);
}
+ failed = 0;
memset(&key, 0, sizeof(key));
memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
keyflag = is_recno ? keyflag : 1;
- while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
- if ((keyflag && (ret = __db_prdbt(&key,
- pflag, " ", stdout, __db_verify_callback,
- is_recno, NULL)) != 0) || (ret =
- __db_prdbt(&data, pflag, " ", stdout,
- __db_verify_callback, 0, NULL)) != 0) {
- dbp->errx(dbp, NULL);
- return (1);
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
}
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
if (ret != DB_NOTFOUND) {
dbp->err(dbp, ret, "DBcursor->get");
- return (1);
+ failed = 1;
}
+err: if (data.data != NULL)
+ free(data.data);
+
if ((ret = dbcp->c_close(dbcp)) != 0) {
dbp->err(dbp, ret, "DBcursor->close");
- return (1);
+ failed = 1;
}
(void)__db_prfooter(stdout, __db_verify_callback);
- return (0);
+ return (failed);
}
/*
* usage --
* Display the usage message.
*/
-void
+int
usage()
{
- (void)fprintf(stderr, "usage: %s\n",
-"db_dump [-klNprRV] [-d ahr] [-f output] [-h home] [-s database] db_file");
- exit(1);
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_dump [-klNprRV]",
+ "[-d ahr] [-f output] [-h home] [-P password] [-s database] db_file");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -512,6 +605,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_dump185/db_dump185.c b/bdb/db_dump185/db_dump185.c
index 4b57fffd2dc..97164f34a9a 100644
--- a/bdb/db_dump185/db_dump185.c
+++ b/bdb/db_dump185/db_dump185.c
@@ -1,15 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#ifndef lint
static char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static char revid[] =
- "$Id: db_dump185.c,v 11.8 2001/01/10 17:26:21 bostic Exp $";
+ "$Id: db_dump185.c,v 11.17 2002/08/08 03:50:35 bostic Exp $";
#endif
#include <sys/types.h>
@@ -65,9 +65,11 @@ typedef struct hashhdr186 { /* Disk resident portion */
int32_t h_charkey; /* value of hash(CHARKEY) */
#define NCACHED 32 /* number of bit maps and spare points */
int32_t spares[NCACHED];/* spare pages for overflow */
- u_int16_t bitmaps[NCACHED]; /* address of overflow page bitmaps */
+ /* address of overflow page bitmaps */
+ u_int16_t bitmaps[NCACHED];
} HASHHDR186;
typedef struct htab186 { /* Memory resident data structure */
+ void *unused[2];
HASHHDR186 hdr; /* Header */
} HTAB186;
@@ -171,7 +173,7 @@ void db_hash __P((DB *, int));
void dbt_dump __P((DBT *));
void dbt_print __P((DBT *));
int main __P((int, char *[]));
-void usage __P((void));
+int usage __P((void));
int
main(argc, argv)
@@ -191,7 +193,7 @@ main(argc, argv)
if (freopen(optarg, "w", stdout) == NULL) {
fprintf(stderr, "db_dump185: %s: %s\n",
optarg, strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
break;
case 'p':
@@ -199,20 +201,20 @@ main(argc, argv)
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 1)
- usage();
+ return (usage());
if ((dbp = dbopen(argv[0], O_RDONLY, 0, DB_BTREE, NULL)) == NULL) {
if ((dbp =
dbopen(argv[0], O_RDONLY, 0, DB_HASH, NULL)) == NULL) {
fprintf(stderr,
"db_dump185: %s: %s\n", argv[0], strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
db_hash(dbp, pflag);
} else
@@ -236,9 +238,9 @@ main(argc, argv)
if (rval == -1) {
fprintf(stderr, "db_dump185: seq: %s\n", strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
- return (0);
+ return (EXIT_SUCCESS);
}
/*
@@ -345,9 +347,9 @@ dbt_print(dbtp)
* usage --
* Display the usage message.
*/
-void
+int
usage()
{
(void)fprintf(stderr, "usage: db_dump185 [-p] [-f file] db_file\n");
- exit(1);
+ return (EXIT_FAILURE);
}
diff --git a/bdb/db_load/db_load.c b/bdb/db_load/db_load.c
index 33e2eb5e02b..d27fca04ec0 100644
--- a/bdb/db_load/db_load.c
+++ b/bdb/db_load/db_load.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_load.c,v 11.33 2001/01/22 17:25:07 krinsky Exp $";
+ "$Id: db_load.c,v 11.71 2002/08/08 03:50:36 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -25,33 +25,45 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "clib_ext.h"
-
-void badend __P((void));
-void badnum __P((void));
-int configure __P((DB *, char **, char **, int *));
-int db_init __P((char *));
-int dbt_rdump __P((DBT *));
-int dbt_rprint __P((DBT *));
-int dbt_rrecno __P((DBT *, int));
-int digitize __P((int, int *));
-int load __P((char *, DBTYPE, char **, int, u_int32_t));
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ char *hdrbuf; /* Input file header. */
+ u_long lineno; /* Input file line number. */
+ u_long origline; /* Original file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+ char *home; /* Env home. */
+ char *passwd; /* Env passwd. */
+ int private; /* Private env. */
+ u_int32_t cache; /* Env cache size. */
+} LDG;
+
+void badend __P((DB_ENV *));
+void badnum __P((DB_ENV *));
+int configure __P((DB_ENV *, DB *, char **, char **, int *));
+int convprintable __P((DB_ENV *, char *, char **));
+int db_init __P((DB_ENV *, char *, u_int32_t, int *));
+int dbt_rdump __P((DB_ENV *, DBT *));
+int dbt_rprint __P((DB_ENV *, DBT *));
+int dbt_rrecno __P((DB_ENV *, DBT *, int));
+int digitize __P((DB_ENV *, int, int *));
+int env_create __P((DB_ENV **, LDG *));
+int load __P((DB_ENV *, char *, DBTYPE, char **, u_int, LDG *, int *));
int main __P((int, char *[]));
-int rheader __P((DB *, DBTYPE *, char **, int *, int *));
-void usage __P((void));
-void version_check __P((void));
+int rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int usage __P((void));
+int version_check __P((const char *));
-int endodata; /* Reached the end of a database. */
-int endofile; /* Reached the end of the input. */
-int existed; /* Tried to load existing key. */
-u_long lineno; /* Input file line number. */
-int version = 1; /* Input version. */
+#define G(f) ((LDG *)dbenv->app_private)->f
-DB_ENV *dbenv;
-const char
- *progname = "db_load"; /* Program name. */
+ /* Flags to the load function. */
+#define LDF_NOHEADER 0x01 /* No dump header. */
+#define LDF_NOOVERWRITE 0x02 /* Don't overwrite existing rows. */
+#define LDF_PASSWORD 0x04 /* Encrypt created databases. */
int
main(argc, argv)
@@ -61,24 +73,35 @@ main(argc, argv)
extern char *optarg;
extern int optind;
DBTYPE dbtype;
- u_int32_t db_nooverwrite;
- int ch, exitval, no_header, ret;
- char **clist, **clp, *home;
-
- version_check();
-
- home = NULL;
- db_nooverwrite = 0;
- exitval = no_header = 0;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t ldf;
+ int ch, existed, exitval, ret;
+ char **clist, **clp;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+ ldg.cache = MEGABYTE;
+ ldg.hdrbuf = NULL;
+ ldg.home = NULL;
+ ldg.passwd = NULL;
+
+ if ((ret = version_check(ldg.progname)) != 0)
+ return (ret);
+
+ ldf = 0;
+ exitval = 0;
dbtype = DB_UNKNOWN;
/* Allocate enough room for configuration arguments. */
if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
- fprintf(stderr, "%s: %s\n", progname, strerror(ENOMEM));
- exit(1);
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
}
- while ((ch = getopt(argc, argv, "c:f:h:nTt:V")) != EOF)
+ while ((ch = getopt(argc, argv, "c:f:h:nP:Tt:V")) != EOF)
switch (ch) {
case 'c':
*clp++ = optarg;
@@ -86,18 +109,28 @@ main(argc, argv)
case 'f':
if (freopen(optarg, "r", stdin) == NULL) {
fprintf(stderr, "%s: %s: reopen: %s\n",
- progname, optarg, strerror(errno));
- exit(1);
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
case 'h':
- home = optarg;
+ ldg.home = optarg;
break;
case 'n':
- db_nooverwrite = DB_NOOVERWRITE;
+ ldf |= LDF_NOOVERWRITE;
+ break;
+ case 'P':
+ ldg.passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (ldg.passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ ldg.progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ ldf |= LDF_PASSWORD;
break;
case 'T':
- no_header = 1;
+ ldf |= LDF_NOHEADER;
break;
case 't':
if (strcmp(optarg, "btree") == 0) {
@@ -116,21 +149,19 @@ main(argc, argv)
dbtype = DB_QUEUE;
break;
}
- usage();
- /* NOTREACHED */
+ return (usage());
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case '?':
default:
- usage();
- /* NOTREACHED */
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 1)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -139,19 +170,12 @@ main(argc, argv)
* Create an environment object initialized for error reporting, and
* then open it.
*/
- if ((ret = db_env_create(&dbenv, 0)) != 0) {
- fprintf(stderr,
- "%s: db_env_create: %s\n", progname, db_strerror(ret));
- goto shutdown;
- }
- dbenv->set_errfile(dbenv, stderr);
- dbenv->set_errpfx(dbenv, progname);
- if (db_init(home) != 0)
+ if (env_create(&dbenv, &ldg) != 0)
goto shutdown;
- while (!endofile)
- if (load(argv[0],
- dbtype, clist, no_header, db_nooverwrite) != 0)
+ while (!ldg.endofile)
+ if (load(dbenv, argv[0], dbtype, clist, ldf,
+ &ldg, &existed) != 0)
goto shutdown;
if (0) {
@@ -160,13 +184,20 @@ shutdown: exitval = 1;
if ((ret = dbenv->close(dbenv, 0)) != 0) {
exitval = 1;
fprintf(stderr,
- "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
}
/* Resend any caught signal. */
__db_util_sigresend();
+ free(clist);
- /* Return 0 on success, 1 if keys existed already, and 2 on failure. */
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
}
@@ -175,32 +206,39 @@ shutdown: exitval = 1;
* Load a database.
*/
int
-load(name, argtype, clist, no_header, db_nooverwrite)
+load(dbenv, name, argtype, clist, flags, ldg, existedp)
+ DB_ENV *dbenv;
char *name, **clist;
DBTYPE argtype;
- int no_header;
- u_int32_t db_nooverwrite;
+ u_int flags;
+ LDG *ldg;
+ int *existedp;
{
DB *dbp;
DBT key, rkey, data, *readp, *writep;
DBTYPE dbtype;
DB_TXN *ctxn, *txn;
db_recno_t recno, datarecno;
- int checkprint, hexkeys, keys, ret, rval;
- int keyflag, ascii_recno;
+ u_int32_t put_flags;
+ int ascii_recno, checkprint, hexkeys, keyflag, keys, resize, ret, rval;
char *subdb;
- endodata = 0;
+ *existedp = 0;
+
+ put_flags = LF_ISSET(LDF_NOOVERWRITE) ? DB_NOOVERWRITE : 0;
+ G(endodata) = 0;
+
subdb = NULL;
ctxn = txn = NULL;
memset(&key, 0, sizeof(DBT));
memset(&data, 0, sizeof(DBT));
memset(&rkey, 0, sizeof(DBT));
+retry_db:
/* Create the DB object. */
if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
- return (1);
+ goto err;
}
dbtype = DB_UNKNOWN;
@@ -208,13 +246,14 @@ load(name, argtype, clist, no_header, db_nooverwrite)
hexkeys = -1;
keyflag = -1;
/* Read the header -- if there's no header, we expect flat text. */
- if (no_header) {
+ if (LF_ISSET(LDF_NOHEADER)) {
checkprint = 1;
dbtype = argtype;
} else {
- if (rheader(dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ if (rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
goto err;
- if (endofile)
+ if (G(endofile))
goto done;
}
@@ -223,7 +262,7 @@ load(name, argtype, clist, no_header, db_nooverwrite)
* configuration changes to all databases that are loaded, e.g., all
* subdatabases.)
*/
- if (configure(dbp, clist, &subdb, &keyflag))
+ if (configure(dbenv, dbp, clist, &subdb, &keyflag))
goto err;
if (keys != 1) {
@@ -250,8 +289,8 @@ load(name, argtype, clist, no_header, db_nooverwrite)
if (argtype != DB_UNKNOWN) {
if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
- if (keyflag != 1 && argtype != DB_RECNO
- && argtype != DB_QUEUE) {
+ if (keyflag != 1 && argtype != DB_RECNO &&
+ argtype != DB_QUEUE) {
dbenv->errx(dbenv,
"improper database type conversion specified");
goto err;
@@ -267,27 +306,48 @@ load(name, argtype, clist, no_header, db_nooverwrite)
if (keyflag == -1)
keyflag = 0;
- /*
+ /*
* Recno keys have only been printed in hexadecimal starting
- * with db_dump format version 3 (DB 3.2).
+ * with db_dump format version 3 (DB 3.2).
*
- * !!!
- * Note that version is set in rheader(), which must be called before
+ * !!!
+ * Note that version is set in rheader(), which must be called before
* this assignment.
*/
- hexkeys = (version >= 3 && keyflag == 1 && checkprint == 0);
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
ascii_recno = 1;
else
ascii_recno = 0;
+ /* If configured with a password, encrypt databases we create. */
+ if (LF_ISSET(LDF_PASSWORD) &&
+ (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) {
+ dbp->err(dbp, ret, "DB->set_flags: DB_ENCRYPT");
+ goto err;
+ }
+
/* Open the DB file. */
- if ((ret = dbp->open(dbp,
- name, subdb, dbtype, DB_CREATE, __db_omode("rwrwrw"))) != 0) {
+ if ((ret = dbp->open(dbp, NULL, name, subdb, dbtype,
+ DB_CREATE | (TXN_ON(dbenv) ? DB_AUTO_COMMIT : 0),
+ __db_omode("rwrwrw"))) != 0) {
dbp->err(dbp, ret, "DB->open: %s", name);
goto err;
}
+ if (ldg->private != 0) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &ldg->cache, &resize)) != 0)
+ goto err;
+ if (resize) {
+ dbp->close(dbp, 0);
+ dbp = NULL;
+ dbenv->close(dbenv, 0);
+ if ((ret = env_create(&dbenv, ldg)) != 0)
+ goto err;
+ goto retry_db;
+ }
+ }
/* Initialize the key/data pair. */
readp = &key;
@@ -314,39 +374,40 @@ key_data: if ((readp->data =
goto err;
}
- if (TXN_ON(dbenv) && (ret = txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
goto err;
/* Get each key/data pair and add them to the database. */
for (recno = 1; !__db_util_interrupted(); ++recno) {
if (!keyflag)
if (checkprint) {
- if (dbt_rprint(&data))
+ if (dbt_rprint(dbenv, &data))
goto err;
} else {
- if (dbt_rdump(&data))
+ if (dbt_rdump(dbenv, &data))
goto err;
}
else
if (checkprint) {
- if (dbt_rprint(readp))
+ if (dbt_rprint(dbenv, readp))
goto err;
- if (!endodata && dbt_rprint(&data))
+ if (!G(endodata) && dbt_rprint(dbenv, &data))
goto fmt;
} else {
if (ascii_recno) {
- if (dbt_rrecno(readp, hexkeys))
+ if (dbt_rrecno(dbenv, readp, hexkeys))
goto err;
} else
- if (dbt_rdump(readp))
+ if (dbt_rdump(dbenv, readp))
goto err;
- if (!endodata && dbt_rdump(&data)) {
+ if (!G(endodata) && dbt_rdump(dbenv, &data)) {
fmt: dbenv->errx(dbenv,
"odd number of key/data pairs");
goto err;
}
}
- if (endodata)
+ if (G(endodata))
break;
if (readp != writep) {
if (sscanf(readp->data, "%ud", &datarecno) != 1)
@@ -359,20 +420,19 @@ fmt: dbenv->errx(dbenv,
!keyflag ? recno : recno * 2 - 1);
}
retry: if (txn != NULL)
- if ((ret = txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
goto err;
- switch (ret =
- dbp->put(dbp, txn, writep, &data, db_nooverwrite)) {
+ switch (ret = dbp->put(dbp, ctxn, writep, &data, put_flags)) {
case 0:
if (ctxn != NULL) {
if ((ret =
- txn_commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
goto err;
ctxn = NULL;
}
break;
case DB_KEYEXIST:
- existed = 1;
+ *existedp = 1;
dbenv->errx(dbenv,
"%s: line %d: key already exists, not loaded:",
name,
@@ -384,7 +444,7 @@ retry: if (txn != NULL)
case DB_LOCK_DEADLOCK:
/* If we have a child txn, retry--else it's fatal. */
if (ctxn != NULL) {
- if ((ret = txn_abort(ctxn)) != 0)
+ if ((ret = ctxn->abort(ctxn)) != 0)
goto err;
ctxn = NULL;
goto retry;
@@ -393,20 +453,20 @@ retry: if (txn != NULL)
default:
dbenv->err(dbenv, ret, NULL);
if (ctxn != NULL) {
- (void)txn_abort(ctxn);
+ (void)ctxn->abort(ctxn);
ctxn = NULL;
}
goto err;
}
if (ctxn != NULL) {
- if ((ret = txn_abort(ctxn)) != 0)
+ if ((ret = ctxn->abort(ctxn)) != 0)
goto err;
ctxn = NULL;
}
}
done: rval = 0;
DB_ASSERT(ctxn == NULL);
- if (txn != NULL && (ret = txn_commit(txn, 0)) != 0) {
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
txn = NULL;
goto err;
}
@@ -415,15 +475,18 @@ done: rval = 0;
err: rval = 1;
DB_ASSERT(ctxn == NULL);
if (txn != NULL)
- (void)txn_abort(txn);
+ (void)txn->abort(txn);
}
/* Close the database. */
- if ((ret = dbp->close(dbp, 0)) != 0) {
- dbp->err(dbp, ret, "DB->close");
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
rval = 1;
}
+ if (G(hdrbuf) != NULL)
+ free(G(hdrbuf));
+ G(hdrbuf) = NULL;
/* Free allocated memory. */
if (subdb != NULL)
free(subdb);
@@ -441,12 +504,16 @@ err: rval = 1;
* Initialize the environment.
*/
int
-db_init(home)
+db_init(dbenv, home, cache, is_private)
+ DB_ENV *dbenv;
char *home;
+ u_int32_t cache;
+ int *is_private;
{
u_int32_t flags;
int ret;
+ *is_private = 0;
/* We may be loading into a live environment. Try and join. */
flags = DB_USE_ENVIRON |
DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
@@ -467,11 +534,16 @@ db_init(home)
*/
LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
LF_SET(DB_CREATE | DB_PRIVATE);
+ *is_private = 1;
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
return (0);
/* An environment is required. */
- dbenv->err(dbenv, ret, "DBENV->open");
+ dbenv->err(dbenv, ret, "DB_ENV->open");
return (1);
}
@@ -481,14 +553,14 @@ db_init(home)
case '1': \
if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
dbp->err(dbp, ret, "%s: set_flags: %s", \
- progname, name); \
+ G(progname), name); \
return (1); \
} \
break; \
case '0': \
break; \
default: \
- badnum(); \
+ badnum(dbenv); \
return (1); \
} \
continue; \
@@ -514,7 +586,8 @@ db_init(home)
* Handle command-line configuration options.
*/
int
-configure(dbp, clp, subdbp, keysp)
+configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
DB *dbp;
char **clp, **subdbp;
int *keysp;
@@ -534,6 +607,8 @@ configure(dbp, clp, subdbp, keysp)
if (strcmp(name, "database") == 0 ||
strcmp(name, "subdatabase") == 0) {
+ if (*subdbp != NULL)
+ free(*subdbp);
if ((*subdbp = strdup(value)) == NULL) {
dbp->err(dbp, ENOMEM, NULL);
return (1);
@@ -546,7 +621,7 @@ configure(dbp, clp, subdbp, keysp)
else if (strcmp(value, "0") == 0)
*keysp = 0;
else {
- badnum();
+ badnum(dbenv);
return (1);
}
continue;
@@ -558,6 +633,7 @@ configure(dbp, clp, subdbp, keysp)
NUMBER(name, value, "bt_minkey", set_bt_minkey);
NUMBER(name, value, "db_lorder", set_lorder);
NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
FLAG(name, value, "duplicates", DB_DUP);
FLAG(name, value, "dupsort", DB_DUPSORT);
NUMBER(name, value, "h_ffactor", set_h_ffactor);
@@ -568,13 +644,13 @@ configure(dbp, clp, subdbp, keysp)
FLAG(name, value, "renumber", DB_RENUMBER);
dbp->errx(dbp,
- "unknown command-line configuration keyword");
+ "unknown command-line configuration keyword \"%s\"", name);
return (1);
}
return (0);
nameerr:
- dbp->err(dbp, ret, "%s: %s=%s", progname, name, value);
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
return (1);
}
@@ -583,35 +659,91 @@ nameerr:
* Read the header message.
*/
int
-rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
+rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
DB *dbp;
DBTYPE *dbtypep;
char **subdbp;
int *checkprintp, *keysp;
{
long val;
- int first, ret;
- char *name, *value, *p, buf[128];
+ int ch, first, hdr, linelen, buflen, ret, start;
+ char *buf, *name, *p, *value;
*dbtypep = DB_UNKNOWN;
*checkprintp = 0;
+ name = p = NULL;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if (G(hdrbuf) == NULL) {
+ hdr = 0;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+ G(hdrbuf) = buf;
+ G(origline) = G(lineno);
+ } else {
+ hdr = 1;
+ buf = G(hdrbuf);
+ G(lineno) = G(origline);
+ }
+ start = 0;
for (first = 1;; first = 0) {
- ++lineno;
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ buf = &G(hdrbuf)[start];
+ if (hdr == 0) {
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
- /* If we don't see the expected information, it's an error. */
- if (fgets(buf, sizeof(buf), stdin) == NULL) {
- if (!first || ferror(stdin))
- goto badfmt;
- endofile = 1;
- break;
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen + start == buflen) {
+ G(hdrbuf) = (char *)realloc(G(hdrbuf),
+ buflen *= 2);
+ if (G(hdrbuf) == NULL)
+ goto memerr;
+ buf = &G(hdrbuf)[start];
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen++] = '\0';
+ } else
+ linelen = strlen(buf) + 1;
+ start += linelen;
+
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ name = NULL;
}
- if ((p = strchr(name = buf, '=')) == NULL)
+ /* If we don't see the expected information, it's an error. */
+ if ((name = strdup(buf)) == NULL)
+ goto memerr;
+ if ((p = strchr(name, '=')) == NULL)
goto badfmt;
*p++ = '\0';
- if ((p = strchr(value = p, '\n')) == NULL)
- goto badfmt;
- *p = '\0';
+
+ value = p--;
+
if (name[0] == '\0' || value[0] == '\0')
goto badfmt;
@@ -622,13 +754,13 @@ rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
* Version 1 didn't have a "VERSION" header line. We
* only support versions 1, 2, and 3 of the dump format.
*/
- version = atoi(value);
+ G(version) = atoi(value);
- if (version > 3) {
+ if (G(version) > 3) {
dbp->errx(dbp,
"line %lu: VERSION %d is unsupported",
- lineno, version);
- return (1);
+ G(lineno), G(version));
+ goto err;
}
continue;
}
@@ -660,14 +792,14 @@ rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
*dbtypep = DB_QUEUE;
continue;
}
- dbp->errx(dbp, "line %lu: unknown type", lineno);
- return (1);
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ goto err;
}
if (strcmp(name, "database") == 0 ||
strcmp(name, "subdatabase") == 0) {
- if ((*subdbp = strdup(value)) == NULL) {
- dbp->err(dbp, ENOMEM, NULL);
- return (1);
+ if ((ret = convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ goto err;
}
continue;
}
@@ -677,8 +809,8 @@ rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
else if (strcmp(value, "0") == 0)
*keysp = 0;
else {
- badnum();
- return (1);
+ badnum(dbenv);
+ goto err;
}
continue;
}
@@ -689,6 +821,8 @@ rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
NUMBER(name, value, "bt_minkey", set_bt_minkey);
NUMBER(name, value, "db_lorder", set_lorder);
NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "chksum", DB_CHKSUM_SHA1);
FLAG(name, value, "duplicates", DB_DUP);
FLAG(name, value, "dupsort", DB_DUPSORT);
NUMBER(name, value, "h_ffactor", set_h_ffactor);
@@ -699,18 +833,81 @@ rheader(dbp, dbtypep, subdbp, checkprintp, keysp)
FLAG(name, value, "renumber", DB_RENUMBER);
dbp->errx(dbp,
- "unknown input-file header configuration keyword");
- return (1);
+ "unknown input-file header configuration keyword \"%s\"",
+ name);
+ goto err;
}
- return (0);
-
+ ret = 0;
+ if (0) {
nameerr:
- dbp->err(dbp, ret, "%s: %s=%s", progname, name, value);
- return (1);
-
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ ret = 1;
+ }
+ if (0)
+err: ret = 1;
+ if (0) {
badfmt:
- dbp->errx(dbp, "line %lu: unexpected format", lineno);
- return (1);
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ ret = 1;
+ }
+ if (name != NULL) {
+ *p = '=';
+ free(name);
+ }
+ return (ret);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ if ((outstr = (char *)malloc(strlen(instr))) == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = digitize(dbenv, *instr, &e1) << 4;
+ c |= digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
}
/*
@@ -718,7 +915,8 @@ badfmt:
* Read a printable line into a DBT structure.
*/
int
-dbt_rprint(dbtp)
+dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
DBT *dbtp;
{
u_int32_t len;
@@ -726,31 +924,31 @@ dbt_rprint(dbtp)
int c1, c2, e, escape, first;
char buf[32];
- ++lineno;
+ ++G(lineno);
first = 1;
e = escape = 0;
for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
if (c1 == EOF) {
if (len == 0) {
- endofile = endodata = 1;
+ G(endofile) = G(endodata) = 1;
return (0);
}
- badend();
+ badend(dbenv);
return (1);
}
if (first) {
first = 0;
- if (version > 1) {
+ if (G(version) > 1) {
if (c1 != ' ') {
buf[0] = c1;
if (fgets(buf + 1,
sizeof(buf) - 1, stdin) == NULL ||
strcmp(buf, "DATA=END\n") != 0) {
- badend();
+ badend(dbenv);
return (1);
}
- endodata = 1;
+ G(endodata) = 1;
return (0);
}
continue;
@@ -759,10 +957,11 @@ dbt_rprint(dbtp)
if (escape) {
if (c1 != '\\') {
if ((c2 = getchar()) == EOF) {
- badend();
+ badend(dbenv);
return (1);
}
- c1 = digitize(c1, &e) << 4 | digitize(c2, &e);
+ c1 = digitize(dbenv,
+ c1, &e) << 4 | digitize(dbenv, c2, &e);
if (e)
return (1);
}
@@ -794,7 +993,8 @@ dbt_rprint(dbtp)
* Read a byte dump line into a DBT structure.
*/
int
-dbt_rdump(dbtp)
+dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
DBT *dbtp;
{
u_int32_t len;
@@ -802,38 +1002,38 @@ dbt_rdump(dbtp)
int c1, c2, e, first;
char buf[32];
- ++lineno;
+ ++G(lineno);
first = 1;
e = 0;
for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
if (c1 == EOF) {
if (len == 0) {
- endofile = endodata = 1;
+ G(endofile) = G(endodata) = 1;
return (0);
}
- badend();
+ badend(dbenv);
return (1);
}
if (first) {
first = 0;
- if (version > 1) {
+ if (G(version) > 1) {
if (c1 != ' ') {
buf[0] = c1;
if (fgets(buf + 1,
sizeof(buf) - 1, stdin) == NULL ||
strcmp(buf, "DATA=END\n") != 0) {
- badend();
+ badend(dbenv);
return (1);
}
- endodata = 1;
+ G(endodata) = 1;
return (0);
}
continue;
}
}
if ((c2 = getchar()) == EOF) {
- badend();
+ badend(dbenv);
return (1);
}
if (len >= dbtp->ulen - 10) {
@@ -846,7 +1046,7 @@ dbt_rdump(dbtp)
p = (u_int8_t *)dbtp->data + len;
}
++len;
- *p++ = digitize(c1, &e) << 4 | digitize(c2, &e);
+ *p++ = digitize(dbenv, c1, &e) << 4 | digitize(dbenv, c2, &e);
if (e)
return (1);
}
@@ -860,21 +1060,22 @@ dbt_rdump(dbtp)
* Read a record number dump line into a DBT structure.
*/
int
-dbt_rrecno(dbtp, ishex)
+dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
DBT *dbtp;
int ishex;
{
char buf[32], *p, *q;
- ++lineno;
+ ++G(lineno);
if (fgets(buf, sizeof(buf), stdin) == NULL) {
- endofile = endodata = 1;
+ G(endofile) = G(endodata) = 1;
return (0);
}
if (strcmp(buf, "DATA=END\n") == 0) {
- endodata = 1;
+ G(endodata) = 1;
return (0);
}
@@ -904,8 +1105,8 @@ dbt_rrecno(dbtp, ishex)
}
if (__db_getulong(NULL,
- progname, buf + 1, 0, 0, (u_long *)dbtp->data)) {
-bad: badend();
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: badend(dbenv);
return (1);
}
@@ -918,7 +1119,8 @@ bad: badend();
* Convert a character to an integer.
*/
int
-digitize(c, errorp)
+digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
int c, *errorp;
{
switch (c) { /* Don't depend on ASCII ordering. */
@@ -951,7 +1153,8 @@ digitize(c, errorp)
* Display the bad number message.
*/
void
-badnum()
+badnum(dbenv)
+ DB_ENV *dbenv;
{
dbenv->errx(dbenv,
"boolean name=value pairs require a value of 0 or 1");
@@ -962,7 +1165,8 @@ badnum()
* Display the bad end to input message.
*/
void
-badend()
+badend(dbenv)
+ DB_ENV *dbenv;
{
dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
}
@@ -971,17 +1175,18 @@ badend()
* usage --
* Display the usage message.
*/
-void
+int
usage()
{
(void)fprintf(stderr, "%s\n\t%s\n",
- "usage: db_load [-nTV]",
- "[-c name=value] [-f file] [-h home] [-t btree | hash | recno] db_file");
- exit(1);
+ "usage: db_load [-nTV] [-c name=value] [-f file]",
+ "[-h home] [-P password] [-t btree | hash | recno | queue] db_file");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -993,6 +1198,35 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
+}
+
+int
+env_create(dbenvp, ldg)
+ DB_ENV **dbenvp;
+ LDG *ldg;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ if ((ret = db_env_create(dbenvp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg->progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv = *dbenvp;
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg->progname);
+ if (ldg->passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ ldg->passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ return (ret);
+ }
+ if ((ret = db_init(dbenv, ldg->home, ldg->cache, &ldg->private)) != 0)
+ return (ret);
+ dbenv->app_private = ldg;
+
+ return (0);
}
diff --git a/bdb/db_printlog/README b/bdb/db_printlog/README
index 7d8da505e49..d59f4c77f55 100644
--- a/bdb/db_printlog/README
+++ b/bdb/db_printlog/README
@@ -1,4 +1,4 @@
-# $Id: README,v 10.5 1999/11/21 23:08:01 bostic Exp $
+# $Id: README,v 10.6 2002/06/20 14:52:54 bostic Exp $
Berkeley DB log dump utility. This utility dumps out a DB log in human
readable form, a record at a time, to assist in recovery and transaction
@@ -10,14 +10,23 @@ commit.awk Output transaction ID of committed transactions.
count.awk Print out the number of log records for transactions
that we encountered.
+dbname.awk Take a comma-separated list of database names and spit
+ out all the log records that affect those databases.
+
fileid.awk Take a comma-separated list of file numbers and spit out
all the log records that affect those file numbers.
+logstat.awk Display log record count/size statistics.
+
pgno.awk Take a comma-separated list of page numbers and spit
out all the log records that affect those page numbers.
range.awk Print out a range of the log.
+rectype.awk Print out a range of the log -- command line should
+ set RECTYPE to the a comma separated list of the
+ rectypes (or partial strings of rectypes) sought.
+
status.awk Read through db_printlog output and list the transactions
encountered, and whether they commited or aborted.
diff --git a/bdb/db_printlog/db_printlog.c b/bdb/db_printlog/db_printlog.c
index 8b9fb74a6a9..af6d00d593a 100644
--- a/bdb/db_printlog/db_printlog.c
+++ b/bdb/db_printlog/db_printlog.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,14 +9,15 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_printlog.c,v 11.23 2001/01/18 18:36:58 bostic Exp $";
+ "$Id: db_printlog.c,v 11.52 2002/08/08 03:50:38 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -24,21 +25,20 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "btree.h"
-#include "db_am.h"
-#include "hash.h"
-#include "log.h"
-#include "qam.h"
-#include "txn.h"
-
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-const char
- *progname = "db_printlog"; /* Program name. */
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
+int print_app_record __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int open_rep_db __P((DB_ENV *, DB **, DBC **));
int
main(argc, argv)
@@ -47,42 +47,63 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
- DBT data;
+ const char *progname = "db_printlog";
+ DB *dbp;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data, keydbt;
DB_LSN key;
- int ch, e_close, exitval, nflag, ret;
- char *home;
+ int ch, e_close, exitval, nflag, rflag, ret, repflag;
+ char *home, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
- e_close = exitval = 0;
- nflag = 0;
- home = NULL;
- while ((ch = getopt(argc, argv, "h:NV")) != EOF)
+ dbp = NULL;
+ dbc = NULL;
+ logc = NULL;
+ e_close = exitval = nflag = rflag = repflag = 0;
+ home = passwd = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:rRV")) != EOF)
switch (ch) {
case 'h':
home = optarg;
break;
case 'N':
nflag = 1;
- if ((ret = db_env_set_panicstate(0)) != 0) {
- fprintf(stderr,
- "%s: db_env_set_panicstate: %s\n",
- progname, db_strerror(ret));
- return (1);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'R':
+ repflag = 1;
+ break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc > 0)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -101,8 +122,29 @@ main(argc, argv)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
- if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
- dbenv->err(dbenv, ret, "set_mutexlocks");
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
+
+ /*
+ * Set up an app-specific dispatch function so that we can gracefully
+ * handle app-specific log records.
+ */
+ if ((ret = dbenv->set_app_dispatch(dbenv, print_app_record)) != 0) {
+ dbenv->err(dbenv, ret, "app_dispatch");
goto shutdown;
}
@@ -110,8 +152,19 @@ main(argc, argv)
* An environment is required, but as all we're doing is reading log
* files, we create one if it doesn't already exist. If we create
* it, create it private so it automatically goes away when we're done.
+ * If we are reading the replication database, do not open the env
+ * with logging, because we don't want to log the opens.
*/
- if ((ret = dbenv->open(dbenv, home,
+ if (repflag) {
+ if ((ret = dbenv->open(dbenv, home,
+ DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0))
+ != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ } else if ((ret = dbenv->open(dbenv, home,
DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
(ret = dbenv->open(dbenv, home,
DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
@@ -120,33 +173,48 @@ main(argc, argv)
}
/* Initialize print callbacks. */
- if ((ret = __bam_init_print(dbenv)) != 0 ||
- (ret = __crdel_init_print(dbenv)) != 0 ||
- (ret = __db_init_print(dbenv)) != 0 ||
- (ret = __qam_init_print(dbenv)) != 0 ||
- (ret = __ham_init_print(dbenv)) != 0 ||
- (ret = __log_init_print(dbenv)) != 0 ||
- (ret = __txn_init_print(dbenv)) != 0) {
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __dbreg_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __fop_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
dbenv->err(dbenv, ret, "callback: initialization");
goto shutdown;
}
+ /* Allocate a log cursor. */
+ if (repflag) {
+ if ((ret = open_rep_db(dbenv, &dbp, &dbc)) != 0)
+ goto shutdown;
+ } else if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
memset(&data, 0, sizeof(data));
+ memset(&keydbt, 0, sizeof(keydbt));
while (!__db_util_interrupted()) {
- if ((ret = log_get(dbenv, &key, &data, DB_NEXT)) != 0) {
+ if (repflag) {
+ ret = dbc->c_get(dbc,
+ &keydbt, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret == 0)
+ key = ((REP_CONTROL *)keydbt.data)->lsn;
+ } else
+ ret = logc->get(logc,
+ &key, &data, rflag ? DB_PREV : DB_NEXT);
+ if (ret != 0) {
if (ret == DB_NOTFOUND)
break;
- dbenv->err(dbenv, ret, "log_get");
+ dbenv->err(dbenv,
+ ret, repflag ? "DB_LOGC->get" : "DBC->get");
goto shutdown;
}
- /*
- * XXX
- * We use DB_TXN_ABORT as our op because that's the only op
- * that calls the underlying recovery function without any
- * consideration as to the contents of the transaction list.
- */
- ret = __db_dispatch(dbenv, &data, &key, DB_TXN_ABORT, NULL);
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data, &key, DB_TXN_PRINT, NULL);
/*
* XXX
@@ -163,6 +231,22 @@ main(argc, argv)
if (0) {
shutdown: exitval = 1;
}
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0)
+ exitval = 1;
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0)
+ exitval = 1;
+
+ /*
+ * The dtab is allocated by __db_add_recovery (called by *_init_print)
+ * using the library malloc function (__os_malloc). It thus needs to be
+ * freed using the corresponding free (__os_free).
+ */
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
exitval = 1;
fprintf(stderr,
@@ -172,18 +256,20 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- fprintf(stderr, "usage: db_printlog [-NV] [-h home]\n");
- exit (1);
+ fprintf(stderr, "%s\n",
+ "usage: db_printlog [-NrV] [-h home] [-P password]");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -195,6 +281,80 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+/* Print an unknown, application-specific log record as best we can. */
+int
+print_app_record(dbenv, dbt, lsnp, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsnp;
+ db_recops op;
+{
+ int ch;
+ u_int32_t i, rectype;
+
+ DB_ASSERT(op == DB_TXN_PRINT);
+ COMPQUIET(dbenv, NULL);
+
+ /*
+ * Fetch the rectype, which always must be at the beginning of the
+ * record (if dispatching is to work at all).
+ */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ /*
+ * Applications may wish to customize the output here based on the
+ * rectype. We just print the entire log record in the generic
+ * mixed-hex-and-printable format we use for binary data.
+ */
+ printf("[%lu][%lu]application specific record: rec: %lu\n",
+ (u_long)lsnp->file, (u_long)lsnp->offset, (u_long)rectype);
+ printf("\tdata: ");
+ for (i = 0; i < dbt->size; i++) {
+ ch = ((u_int8_t *)dbt->data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
}
+ printf("\n\n");
+
+ return (0);
+}
+
+int
+open_rep_db(dbenv, dbpp, dbcp)
+ DB_ENV *dbenv;
+ DB **dbpp;
+ DBC **dbcp;
+{
+ int ret;
+
+ DB *dbp;
+ *dbpp = NULL;
+ *dbcp = NULL;
+
+ if ((ret = db_create(dbpp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (ret);
+ }
+
+ dbp = *dbpp;
+ if ((ret =
+ dbp->open(dbp, NULL, "__db.rep.db", NULL, DB_BTREE, 0, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ return (0);
+
+err: if (*dbpp != NULL)
+ (void)(*dbpp)->close(*dbpp, 0);
+ return (ret);
}
diff --git a/bdb/db_printlog/dbname.awk b/bdb/db_printlog/dbname.awk
index d070335127c..47955994579 100644
--- a/bdb/db_printlog/dbname.awk
+++ b/bdb/db_printlog/dbname.awk
@@ -1,4 +1,4 @@
-# $Id: dbname.awk,v 1.2 2000/08/03 15:06:39 ubell Exp $
+# $Id: dbname.awk,v 1.5 2002/05/07 05:45:51 ubell Exp $
#
# Take a comma-separated list of database names and spit out all the
# log records that affect those databases.
@@ -16,7 +16,7 @@ NR == 1 {
myfile = -1;
}
-/^\[.*log_register/ {
+/^\[.*dbreg_register/ {
register = 1;
}
/opcode:/ {
@@ -58,8 +58,6 @@ NR == 1 {
}
}
-
-
/^\[/{
if (printme == 1) {
printf("%s\n", rec);
@@ -69,6 +67,8 @@ NR == 1 {
rec = $0
}
+
+TXN == 1 && /txn_regop/ {printme = 1}
/^ /{
rec = sprintf("%s\n%s", rec, $0);
}
diff --git a/bdb/db_printlog/logstat.awk b/bdb/db_printlog/logstat.awk
new file mode 100644
index 00000000000..1009343eba4
--- /dev/null
+++ b/bdb/db_printlog/logstat.awk
@@ -0,0 +1,36 @@
+# $Id: logstat.awk,v 1.1 2002/05/10 15:19:13 bostic Exp $
+#
+# Output accumulated log record count/size statistics.
+BEGIN {
+ l_file = 0;
+ l_offset = 0;
+}
+
+/^\[/{
+ gsub("[][: ]", " ", $1)
+ split($1, a)
+
+ if (a[1] == l_file) {
+ l[a[3]] += a[2] - l_offset
+ ++n[a[3]]
+ } else
+ ++s[a[3]]
+
+ l_file = a[1]
+ l_offset = a[2]
+}
+
+END {
+ # We can't figure out the size of the first record in each log file,
+ # use the average for other records we found as an estimate.
+ for (i in s)
+ if (s[i] != 0 && n[i] != 0) {
+ l[i] += s[i] * (l[i]/n[i])
+ n[i] += s[i]
+ delete s[i]
+ }
+ for (i in l)
+ printf "%s: %d (n: %d, avg: %.2f)\n", i, l[i], n[i], l[i]/n[i]
+ for (i in s)
+ printf "%s: unknown (n: %d, unknown)\n", i, s[i]
+}
diff --git a/bdb/db_printlog/status.awk b/bdb/db_printlog/status.awk
index 42e24b078b9..13df0b6194a 100644
--- a/bdb/db_printlog/status.awk
+++ b/bdb/db_printlog/status.awk
@@ -1,14 +1,17 @@
-# $Id: status.awk,v 10.2 1999/11/21 18:01:43 bostic Exp $
+# $Id: status.awk,v 10.3 2002/04/11 01:35:24 margo Exp $
#
# Read through db_printlog output and list all the transactions encountered
# and whether they commited or aborted.
#
# 1 = started
# 2 = commited
+# 3 = explicitly aborted
+# 4 = other
BEGIN {
cur_txn = 0
}
/^\[/{
+ in_regop = 0
if (status[$5] == 0) {
status[$5] = 1;
txns[cur_txn] = $5;
@@ -16,11 +19,28 @@ BEGIN {
}
}
/txn_regop/ {
- status[$5] = 2
+ txnid = $5
+ in_regop = 1
+}
+/opcode:/ {
+ if (in_regop == 1) {
+ if ($2 == 1)
+ status[txnid] = 2
+ else if ($2 == 3)
+ status[txnid] = 3
+ else
+ status[txnid] = 4
+ }
}
END {
for (i = 0; i < cur_txn; i++) {
- printf("%s\t%s\n",
- txns[i], status[txns[i]] == 1 ? "ABORT" : "COMMIT");
+ if (status[txns[i]] == 1)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 2)
+ printf("%s\tCOMMIT\n", txns[i]);
+ if (status[txns[i]] == 3)
+ printf("%s\tABORT\n", txns[i]);
+ if (status[txns[i]] == 4)
+ printf("%s\tOTHER\n", txns[i]);
}
}
diff --git a/bdb/db_recover/db_recover.c b/bdb/db_recover/db_recover.c
index 59ab8bcef15..b6414267f93 100644
--- a/bdb/db_recover/db_recover.c
+++ b/bdb/db_recover/db_recover.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_recover.c,v 11.17 2001/01/18 18:36:58 bostic Exp $";
+ "$Id: db_recover.c,v 11.33 2002/03/28 20:13:42 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -34,17 +34,12 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "common_ext.h"
-#include "txn.h"
+#include "dbinc/txn.h"
-int main __P((int, char *[]));
-void read_timestamp __P((char *, time_t *));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-const char
- *progname = "db_recover"; /* Program name. */
+int main __P((int, char *[]));
+int read_timestamp __P((const char *, char *, time_t *));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -53,43 +48,60 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
DB_TXNREGION *region;
time_t now, timestamp;
u_int32_t flags;
- int ch, exitval, fatal_recover, ret, verbose;
- char *home;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
- home = NULL;
+ home = passwd = NULL;
timestamp = 0;
- exitval = fatal_recover = verbose = 0;
- while ((ch = getopt(argc, argv, "ch:t:Vv")) != EOF)
+ exitval = fatal_recover = retain_env = verbose = 0;
+ while ((ch = getopt(argc, argv, "ceh:P:t:Vv")) != EOF)
switch (ch) {
case 'c':
fatal_recover = 1;
break;
+ case 'e':
+ retain_env = 1;
+ break;
case 'h':
home = optarg;
break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
case 't':
- read_timestamp(optarg, &timestamp);
+ if ((ret =
+ read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case 'v':
verbose = 1;
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 0)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -101,7 +113,7 @@ main(argc, argv)
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
- exit (1);
+ return (EXIT_FAILURE);
}
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
@@ -111,7 +123,13 @@ main(argc, argv)
}
if (timestamp &&
(ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
- dbenv->err(dbenv, ret, "DBENV->set_timestamp");
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
goto shutdown;
}
@@ -119,18 +137,21 @@ main(argc, argv)
* Initialize the environment -- we don't actually do anything
* else, that all that's needed to run recovery.
*
- * Note that we specify a private environment, as we're about to
- * create a region, and we don't want to to leave it around. If
- * we leave the region around, the application that should create
- * it will simply join it instead, and will then be running with
- * incorrectly sized (and probably terribly small) caches.
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
*/
flags = 0;
LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
- DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON);
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
- dbenv->err(dbenv, ret, "DBENV->open");
+ dbenv->err(dbenv, ret, "DB_ENV->open");
goto shutdown;
}
@@ -158,7 +179,7 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
@@ -194,8 +215,9 @@ shutdown: exitval = 1;
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-void
-read_timestamp(arg, timep)
+int
+read_timestamp(progname, arg, timep)
+ const char *progname;
char *arg;
time_t *timep;
{
@@ -208,7 +230,7 @@ read_timestamp(arg, timep)
if ((t = localtime(&now)) == NULL) {
fprintf(stderr,
"%s: localtime: %s\n", progname, strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
/* [[CC]YY]MMDDhhmm[.SS] */
if ((p = strchr(arg, '.')) == NULL)
@@ -226,7 +248,7 @@ read_timestamp(arg, timep)
t->tm_year = ATOI2(arg);
t->tm_year *= 100;
yearset = 1;
- /* FALLTHOUGH */
+ /* FALLTHROUGH */
case 10: /* YYMMDDhhmm */
if (yearset) {
yearset = ATOI2(arg);
@@ -258,20 +280,22 @@ read_timestamp(arg, timep)
terr: fprintf(stderr,
"%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
progname);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
-void
+int
usage()
{
- (void)fprintf(stderr,
- "usage: db_recover [-cVv] [-h home] [-t [[CC]YY]MMDDhhmm[.SS]]\n");
- exit(1);
+ (void)fprintf(stderr, "%s\n",
+"usage: db_recover [-ceVv] [-h home] [-P password] [-t [[CC]YY]MMDDhhmm[.SS]]");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -283,6 +307,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_stat/db_stat.c b/bdb/db_stat/db_stat.c
index 9d80caa4889..a2b01b71e0a 100644
--- a/bdb/db_stat/db_stat.c
+++ b/bdb/db_stat/db_stat.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_stat.c,v 11.42 2001/01/18 18:36:59 bostic Exp $";
+ "$Id: db_stat.c,v 11.125 2002/08/08 15:26:15 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -35,41 +35,33 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "mp.h"
+#include "dbinc/db_page.h"
#define PCT(f, t, pgsize) \
((t) == 0 ? 0 : \
(((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
-typedef enum { T_NOTSET, T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_TXN } test_t;
+typedef enum { T_NOTSET,
+ T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_REP, T_TXN } test_t;
int argcheck __P((char *, const char *));
-int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *));
-int db_init __P((char *, test_t));
+int btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_init __P((DB_ENV *, char *, test_t, u_int32_t, int *));
void dl __P((const char *, u_long));
void dl_bytes __P((const char *, u_long, u_long, u_long));
-int env_stats __P((DB_ENV *));
-int hash_stats __P((DB_ENV *, DB *));
-int lock_ok __P((char *));
-int lock_stats __P((DB_ENV *));
-int log_stats __P((DB_ENV *));
+int env_stats __P((DB_ENV *, u_int32_t));
+int hash_stats __P((DB_ENV *, DB *, int));
+int lock_stats __P((DB_ENV *, char *, u_int32_t));
+int log_stats __P((DB_ENV *, u_int32_t));
int main __P((int, char *[]));
-int mpool_ok __P((char *));
-int mpool_stats __P((DB_ENV *));
+int mpool_stats __P((DB_ENV *, char *, u_int32_t));
void prflags __P((u_int32_t, const FN *));
-int queue_stats __P((DB_ENV *, DB *));
+int queue_stats __P((DB_ENV *, DB *, int));
+int rep_stats __P((DB_ENV *, u_int32_t));
int txn_compare __P((const void *, const void *));
-int txn_stats __P((DB_ENV *));
-void usage __P((void));
-void version_check __P((void));
-
-DB_ENV *dbenv;
-char *internal;
-const char
- *progname = "db_stat"; /* Program name. */
+int txn_stats __P((DB_ENV *, u_int32_t));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -78,72 +70,114 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
DB_BTREE_STAT *sp;
DB *alt_dbp, *dbp;
test_t ttype;
- int ch, checked, d_close, e_close, exitval, nflag, ret;
- char *db, *home, *subdb;
+ u_int32_t cache;
+ int ch, checked, d_close, e_close, exitval, fast, flags;
+ int nflag, private, resize, ret;
+ char *db, *home, *internal, *passwd, *subdb;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
dbp = NULL;
ttype = T_NOTSET;
- nflag = 0;
- d_close = e_close = exitval = 0;
- db = home = subdb = NULL;
- while ((ch = getopt(argc, argv, "C:cd:eh:lM:mNs:tV")) != EOF)
+ cache = MEGABYTE;
+ d_close = e_close = exitval = fast = flags = nflag = private = 0;
+ db = home = internal = passwd = subdb = NULL;
+
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNP:rs:tVZ")) != EOF)
switch (ch) {
case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_LOCK;
- if (!argcheck(internal = optarg, "Acflmo"))
- usage();
+ if (!argcheck(internal = optarg, "Aclmop"))
+ return (usage());
break;
case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_LOCK;
break;
case 'd':
- db = optarg;
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
ttype = T_DB;
+ db = optarg;
break;
case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_ENV;
break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
case 'h':
home = optarg;
break;
case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_LOG;
break;
case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_MPOOL;
- if (!argcheck(internal = optarg, "Ahlm"))
- usage();
+ if (!argcheck(internal = optarg, "Ahm"))
+ return (usage());
break;
case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
ttype = T_MPOOL;
break;
case 'N':
nflag = 1;
- if ((ret = db_env_set_panicstate(0)) != 0) {
- fprintf(stderr,
- "%s: db_env_set_panicstate: %s\n",
- progname, db_strerror(ret));
- return (1);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
+ case 'r':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_REP;
+ break;
case 's':
- subdb = optarg;
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
ttype = T_DB;
+ subdb = optarg;
break;
case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
ttype = T_TXN;
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
@@ -151,12 +185,14 @@ main(argc, argv)
switch (ttype) {
case T_DB:
if (db == NULL)
- usage();
+ return (usage());
break;
case T_NOTSET:
- usage();
+ return (usage());
/* NOTREACHED */
default:
+ if (fast != 0)
+ return (usage());
break;
}
@@ -167,7 +203,7 @@ main(argc, argv)
* Create an environment object and initialize it for error
* reporting.
*/
- if ((ret = db_env_create(&dbenv, 0)) != 0) {
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
goto shutdown;
@@ -177,29 +213,59 @@ main(argc, argv)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
- if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
- dbenv->err(dbenv, ret, "set_mutexlocks");
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
goto shutdown;
}
/* Initialize the environment. */
- if (db_init(home, ttype) != 0)
+ if (db_init(dbenv, home, ttype, cache, &private) != 0)
goto shutdown;
switch (ttype) {
case T_DB:
/* Create the DB object and open the file. */
+ if (flags != 0)
+ return (usage());
if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
goto shutdown;
}
+ d_close = 1;
- if ((ret =
- dbp->open(dbp, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
- dbp->err(dbp, ret, "open: %s", db);
+ if ((ret = dbp->open(dbp,
+ NULL, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", db);
goto shutdown;
}
+ /* Check if cache is too small for this DB's pagesize. */
+ if (private) {
+ if ((ret =
+ __db_util_cache(dbenv, dbp, &cache, &resize)) != 0)
+ goto shutdown;
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
+ }
+
/*
* See if we can open this db read/write to update counts.
* If its a master-db then we cannot. So check to see,
@@ -207,9 +273,9 @@ main(argc, argv)
*/
checked = 0;
if (subdb == NULL && dbp->type == DB_BTREE) {
- if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
- dbp->err(dbp, ret, "dbp->stat");
- return (1);
+ if ((ret = dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ goto shutdown;
}
checked = 1;
}
@@ -221,58 +287,68 @@ main(argc, argv)
dbenv->err(dbenv, ret, "db_create");
goto shutdown;
}
- if ((ret = dbp->open(alt_dbp,
- db, subdb, DB_UNKNOWN, 0, 0)) == 0) {
- (void)dbp->close(dbp, 0);
- dbp = alt_dbp;
+ if ((ret = dbp->open(alt_dbp, NULL,
+ db, subdb, DB_UNKNOWN, 0, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->open: %s:%s", db, subdb);
+ (void)alt_dbp->close(alt_dbp, 0);
+ goto shutdown;
}
+
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+
/* Need to run again to update counts */
checked = 0;
}
- d_close = 1;
switch (dbp->type) {
case DB_BTREE:
case DB_RECNO:
- if (btree_stats(dbenv, dbp, checked == 1 ? sp : NULL))
+ if (btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
goto shutdown;
break;
case DB_HASH:
- if (hash_stats(dbenv, dbp))
+ if (hash_stats(dbenv, dbp, fast))
goto shutdown;
break;
case DB_QUEUE:
- if (queue_stats(dbenv, dbp))
+ if (queue_stats(dbenv, dbp, fast))
goto shutdown;
break;
case DB_UNKNOWN:
- abort(); /* Impossible. */
- /* NOTREACHED */
+ dbenv->errx(dbenv, "Unknown database type.");
+ goto shutdown;
}
break;
case T_ENV:
- if (env_stats(dbenv))
- exitval = 1;
+ if (env_stats(dbenv, flags))
+ goto shutdown;
break;
case T_LOCK:
- if (lock_stats(dbenv))
- exitval = 1;
+ if (lock_stats(dbenv, internal, flags))
+ goto shutdown;
break;
case T_LOG:
- if (log_stats(dbenv))
- exitval = 1;
+ if (log_stats(dbenv, flags))
+ goto shutdown;
break;
case T_MPOOL:
- if (mpool_stats(dbenv))
- exitval = 1;
+ if (mpool_stats(dbenv, internal, flags))
+ goto shutdown;
+ break;
+ case T_REP:
+ if (rep_stats(dbenv, flags))
+ goto shutdown;
break;
case T_TXN:
- if (txn_stats(dbenv))
- exitval = 1;
+ if (txn_stats(dbenv, flags))
+ goto shutdown;
break;
case T_NOTSET:
- abort(); /* Impossible. */
- /* NOTREACHED */
+ dbenv->errx(dbenv, "Unknown statistics flag.");
+ goto shutdown;
}
if (0) {
@@ -280,7 +356,7 @@ shutdown: exitval = 1;
}
if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
exitval = 1;
- dbp->err(dbp, ret, "close");
+ dbenv->err(dbenv, ret, "close");
}
if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
exitval = 1;
@@ -291,7 +367,7 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
/*
@@ -299,8 +375,9 @@ shutdown: exitval = 1;
* Display environment statistics.
*/
int
-env_stats(dbenvp)
- DB_ENV *dbenvp;
+env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
{
REGENV renv;
REGION *rp, regs[1024];
@@ -308,15 +385,15 @@ env_stats(dbenvp)
const char *lable;
n = sizeof(regs) / sizeof(regs[0]);
- if ((ret = __db_e_stat(dbenvp, &renv, regs, &n)) != 0) {
- dbenvp->err(dbenvp, ret, "__db_e_stat");
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
return (1);
}
printf("%d.%d.%d\tEnvironment version.\n",
renv.majver, renv.minver, renv.patch);
printf("%lx\tMagic number.\n", (u_long)renv.magic);
- printf("%d\tPanic value.\n", renv.panic);
+ printf("%d\tPanic value.\n", renv.envpanic);
/* Adjust the reference count for us... */
printf("%d\tReferences.\n", renv.refcnt - 1);
@@ -370,10 +447,11 @@ env_stats(dbenvp)
* Display btree/recno statistics.
*/
int
-btree_stats(dbenvp, dbp, msp)
- DB_ENV *dbenvp;
+btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
DB *dbp;
DB_BTREE_STAT *msp;
+ int fast;
{
static const FN fn[] = {
{ BTM_DUP, "duplicates" },
@@ -387,12 +465,12 @@ btree_stats(dbenvp, dbp, msp)
DB_BTREE_STAT *sp;
int ret;
- COMPQUIET(dbenvp, NULL);
+ COMPQUIET(dbenv, NULL);
if (msp != NULL)
sp = msp;
- else if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
- dbp->err(dbp, ret, "dbp->stat");
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
return (1);
}
@@ -447,6 +525,8 @@ btree_stats(dbenvp, dbp, msp)
dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+ free(sp);
+
return (0);
}
@@ -455,9 +535,10 @@ btree_stats(dbenvp, dbp, msp)
* Display hash statistics.
*/
int
-hash_stats(dbenvp, dbp)
- DB_ENV *dbenvp;
+hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
DB *dbp;
+ int fast;
{
static const FN fn[] = {
{ DB_HASH_DUP, "duplicates" },
@@ -467,10 +548,10 @@ hash_stats(dbenvp, dbp)
DB_HASH_STAT *sp;
int ret;
- COMPQUIET(dbenvp, NULL);
+ COMPQUIET(dbenv, NULL);
- if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
- dbp->err(dbp, ret, "dbp->stat");
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
return (1);
}
@@ -478,6 +559,7 @@ hash_stats(dbenvp, dbp)
printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
prflags(sp->hash_metaflags, fn);
dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
@@ -506,6 +588,8 @@ hash_stats(dbenvp, dbp)
dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+ free(sp);
+
return (0);
}
@@ -514,17 +598,18 @@ hash_stats(dbenvp, dbp)
* Display queue statistics.
*/
int
-queue_stats(dbenvp, dbp)
- DB_ENV *dbenvp;
+queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
DB *dbp;
+ int fast;
{
DB_QUEUE_STAT *sp;
int ret;
- COMPQUIET(dbenvp, NULL);
+ COMPQUIET(dbenv, NULL);
- if ((ret = dbp->stat(dbp, &sp, NULL, 0)) != 0) {
- dbp->err(dbp, ret, "dbp->stat");
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
return (1);
}
@@ -536,6 +621,9 @@ queue_stats(dbenvp, dbp)
else
printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
dl("Number of database pages.\n", (u_long)sp->qs_pages);
dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
@@ -543,7 +631,9 @@ queue_stats(dbenvp, dbp)
PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
printf(
- "%lu\tLast allocated record number.\n", (u_long)sp->qs_cur_recno);
+ "%lu\tNext available record number.\n", (u_long)sp->qs_cur_recno);
+
+ free(sp);
return (0);
}
@@ -553,46 +643,68 @@ queue_stats(dbenvp, dbp)
* Display lock statistics.
*/
int
-lock_stats(dbenvp)
- DB_ENV *dbenvp;
+lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
{
DB_LOCK_STAT *sp;
int ret;
if (internal != NULL) {
- __lock_dump_region(dbenvp, internal, stdout);
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
return (0);
}
- if ((ret = lock_stat(dbenvp, &sp, NULL)) != 0) {
- dbenvp->err(dbenvp, ret, NULL);
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
return (1);
}
- dl("Last allocated locker ID.\n", (u_long)sp->st_lastid);
+ dl("Last allocated locker ID.\n", (u_long)sp->st_id);
+ dl("Current maximum unused locker ID.\n", (u_long)sp->st_cur_maxid);
dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
- dl("Maximum number of objects possible.\n", (u_long)sp->st_maxobjects);
- dl("Current locks.\n", (u_long)sp->st_nlocks);
- dl("Maximum number of locks so far.\n", (u_long)sp->st_maxnlocks);
- dl("Current number of lockers.\n", (u_long)sp->st_nlockers);
- dl("Maximum number lockers so far.\n", (u_long)sp->st_maxnlockers);
- dl("Current number lock objects.\n", (u_long)sp->st_nobjects);
- dl("Maximum number of lock objects so far.\n",
+ dl("Maximum number of lock objects possible.\n",
+ (u_long)sp->st_maxobjects);
+ dl("Number of current locks.\n", (u_long)sp->st_nlocks);
+ dl("Maximum number of locks at any one time.\n",
+ (u_long)sp->st_maxnlocks);
+ dl("Number of current lockers.\n", (u_long)sp->st_nlockers);
+ dl("Maximum number of lockers at any one time.\n",
+ (u_long)sp->st_maxnlockers);
+ dl("Number of current lock objects.\n", (u_long)sp->st_nobjects);
+ dl("Maximum number of lock objects at any one time.\n",
(u_long)sp->st_maxnobjects);
- dl("Number of lock requests.\n", (u_long)sp->st_nrequests);
- dl("Number of lock releases.\n", (u_long)sp->st_nreleases);
- dl("Number of lock requests that would have waited.\n",
+ dl("Total number of locks requested.\n", (u_long)sp->st_nrequests);
+ dl("Total number of locks released.\n", (u_long)sp->st_nreleases);
+ dl(
+ "Total number of lock requests failing because DB_LOCK_NOWAIT was set.\n",
(u_long)sp->st_nnowaits);
- dl("Number of lock conflicts.\n", (u_long)sp->st_nconflicts);
+ dl(
+ "Total number of locks not immediately available due to conflicts.\n",
+ (u_long)sp->st_nconflicts);
dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
- dl_bytes("Lock region size",
+ dl("Lock timeout value.\n", (u_long)sp->st_locktimeout);
+ dl("Number of locks that have timed out.\n",
+ (u_long)sp->st_nlocktimeouts);
+ dl("Transaction timeout value.\n", (u_long)sp->st_txntimeout);
+ dl("Number of transactions that have timed out.\n",
+ (u_long)sp->st_ntxntimeouts);
+
+ dl_bytes("The size of the lock region.",
(u_long)0, (u_long)0, (u_long)sp->st_regsize);
- dl("The number of region locks granted without waiting.\n",
- (u_long)sp->st_region_nowait);
dl("The number of region locks granted after waiting.\n",
(u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
return (0);
}
@@ -602,31 +714,32 @@ lock_stats(dbenvp)
* Display log statistics.
*/
int
-log_stats(dbenvp)
- DB_ENV *dbenvp;
+log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
{
DB_LOG_STAT *sp;
int ret;
- if ((ret = log_stat(dbenvp, &sp, NULL)) != 0) {
- dbenvp->err(dbenvp, ret, NULL);
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
return (1);
}
printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
printf("%lu\tLog version number.\n", (u_long)sp->st_version);
- dl_bytes("Log region size",
- (u_long)0, (u_long)0, (u_long)sp->st_regsize);
dl_bytes("Log record cache size",
(u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
printf("%#o\tLog file mode.\n", sp->st_mode);
- if (sp->st_lg_max % MEGABYTE == 0)
- printf("%luMb\tLog file size.\n",
- (u_long)sp->st_lg_max / MEGABYTE);
- else if (sp->st_lg_max % 1024 == 0)
- printf("%luKb\tLog file size.\n", (u_long)sp->st_lg_max / 1024);
+ if (sp->st_lg_size % MEGABYTE == 0)
+ printf("%luMb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / MEGABYTE);
+ else if (sp->st_lg_size % 1024 == 0)
+ printf("%luKb\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size / 1024);
else
- printf("%lu\tLog file size.\n", (u_long)sp->st_lg_max);
+ printf("%lu\tCurrent log file size.\n",
+ (u_long)sp->st_lg_size);
dl_bytes("Log bytes written",
(u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
dl_bytes("Log bytes written since last checkpoint",
@@ -637,10 +750,20 @@ log_stats(dbenvp)
dl("Total log file flushes.\n", (u_long)sp->st_scount);
printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
- dl("The number of region locks granted without waiting.\n",
- (u_long)sp->st_region_nowait);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+
+ dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
dl("The number of region locks granted after waiting.\n",
(u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ free(sp);
return (0);
}
@@ -650,34 +773,41 @@ log_stats(dbenvp)
* Display mpool statistics.
*/
int
-mpool_stats(dbenvp)
- DB_ENV *dbenvp;
+mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
{
DB_MPOOL_FSTAT **fsp;
DB_MPOOL_STAT *gsp;
int ret;
if (internal != NULL) {
- __memp_dump_region(dbenvp, internal, stdout);
- return (1);
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
}
- if ((ret = memp_stat(dbenvp, &gsp, &fsp, NULL)) != 0) {
- dbenvp->err(dbenvp, ret, NULL);
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
return (1);
}
dl_bytes("Total cache size",
(u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
dl("Number of caches.\n", (u_long)gsp->st_ncache);
- dl("Pool individual cache size.\n", (u_long)gsp->st_regsize);
+ dl_bytes("Pool individual cache size",
+ (u_long)0, (u_long)0, (u_long)gsp->st_regsize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
(gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
printf(".\n");
- dl("Requested pages mapped into the process' address space.\n",
- (u_long)gsp->st_map);
dl("Requested pages not found in the cache.\n",
(u_long)gsp->st_cache_miss);
dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
@@ -688,11 +818,13 @@ mpool_stats(dbenvp)
(u_long)gsp->st_ro_evict);
dl("Dirty pages forced from the cache.\n",
(u_long)gsp->st_rw_evict);
- dl("Dirty buffers written by trickle-sync thread.\n",
+ dl("Dirty pages written by trickle-sync thread.\n",
(u_long)gsp->st_page_trickle);
- dl("Current clean buffer count.\n",
+ dl("Current total page count.\n",
+ (u_long)gsp->st_pages);
+ dl("Current clean page count.\n",
(u_long)gsp->st_page_clean);
- dl("Current dirty buffer count.\n",
+ dl("Current dirty page count.\n",
(u_long)gsp->st_page_dirty);
dl("Number of hash buckets used for page location.\n",
(u_long)gsp->st_hash_buckets);
@@ -702,15 +834,33 @@ mpool_stats(dbenvp)
(u_long)gsp->st_hash_longest);
dl("Total number of hash buckets examined for page location.\n",
(u_long)gsp->st_hash_examined);
+ dl("The number of hash bucket locks granted without waiting.\n",
+ (u_long)gsp->st_hash_nowait);
+ dl("The number of hash bucket locks granted after waiting.\n",
+ (u_long)gsp->st_hash_wait);
+ dl("The maximum number of times any hash bucket lock was waited for.\n",
+ (u_long)gsp->st_hash_max_wait);
dl("The number of region locks granted without waiting.\n",
(u_long)gsp->st_region_nowait);
dl("The number of region locks granted after waiting.\n",
(u_long)gsp->st_region_wait);
+ dl("The number of page allocations.\n",
+ (u_long)gsp->st_alloc);
+ dl("The number of hash buckets examined during allocations\n",
+ (u_long)gsp->st_alloc_buckets);
+ dl("The max number of hash buckets examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_buckets);
+ dl("The number of pages examined during allocations\n",
+ (u_long)gsp->st_alloc_pages);
+ dl("The max number of pages examined for an allocation\n",
+ (u_long)gsp->st_alloc_max_pages);
for (; fsp != NULL && *fsp != NULL; ++fsp) {
printf("%s\n", DB_LINE);
printf("Pool File: %s\n", (*fsp)->file_name);
dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
dl("Requested pages found in the cache",
(u_long)(*fsp)->st_cache_hit);
if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
@@ -718,8 +868,6 @@ mpool_stats(dbenvp)
((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
100);
printf(".\n");
- dl("Requested pages mapped into the process' address space.\n",
- (u_long)(*fsp)->st_map);
dl("Requested pages not found in the cache.\n",
(u_long)(*fsp)->st_cache_miss);
dl("Pages created in the cache.\n",
@@ -730,6 +878,123 @@ mpool_stats(dbenvp)
(u_long)(*fsp)->st_page_out);
}
+ free(gsp);
+
+ return (0);
+}
+
+/*
+ * rep_stats --
+ * Display replication statistics.
+ */
+int
+rep_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_REP_STAT *sp;
+ int is_client, ret;
+ const char *p;
+
+ if ((ret = dbenv->rep_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ is_client = 0;
+ switch (sp->st_status) {
+ case DB_REP_MASTER:
+ printf("Environment configured as a replication master.\n");
+ break;
+ case DB_REP_CLIENT:
+ printf("Environment configured as a replication client.\n");
+ is_client = 1;
+ break;
+ case DB_REP_LOGSONLY:
+ printf("Environment configured as a logs-only replica.\n");
+ is_client = 1;
+ break;
+ default:
+ printf("Environment not configured for replication.\n");
+ break;
+ }
+
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_next_lsn.file, (u_long)sp->st_next_lsn.offset,
+ is_client ? "Next LSN expected." : "Next LSN to be used.");
+ p = sp->st_waiting_lsn.file == 0 ?
+ "Not waiting for any missed log records." :
+ "LSN of first missed log record being waited for.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_waiting_lsn.file, (u_long)sp->st_waiting_lsn.offset,
+ p);
+
+ dl("Number of duplicate master conditions detected.\n",
+ (u_long)sp->st_dupmasters);
+ if (sp->st_env_id != DB_EID_INVALID)
+ dl("Current environment ID.\n", (u_long)sp->st_env_id);
+ else
+ printf("No current environment ID.\n");
+ dl("Current environment priority.\n", (u_long)sp->st_env_priority);
+ dl("Current generation number.\n", (u_long)sp->st_gen);
+ dl("Number of duplicate log records received.\n",
+ (u_long)sp->st_log_duplicated);
+ dl("Number of log records currently queued.\n",
+ (u_long)sp->st_log_queued);
+ dl("Maximum number of log records ever queued at once.\n",
+ (u_long)sp->st_log_queued_max);
+ dl("Total number of log records queued.\n",
+ (u_long)sp->st_log_queued_total);
+ dl("Number of log records received and appended to the log.\n",
+ (u_long)sp->st_log_records);
+ dl("Number of log records missed and requested.\n",
+ (u_long)sp->st_log_requested);
+ if (sp->st_master != DB_EID_INVALID)
+ dl("Current master ID.\n", (u_long)sp->st_master);
+ else
+ printf("No current master ID.\n");
+ dl("Number of times the master has changed.\n",
+ (u_long)sp->st_master_changes);
+ dl("Number of messages received with a bad generation number.\n",
+ (u_long)sp->st_msgs_badgen);
+ dl("Number of messages received and processed.\n",
+ (u_long)sp->st_msgs_processed);
+ dl("Number of messages ignored due to pending recovery.\n",
+ (u_long)sp->st_msgs_recover);
+ dl("Number of failed message sends.\n",
+ (u_long)sp->st_msgs_send_failures);
+ dl("Number of messages sent.\n", (u_long)sp->st_msgs_sent);
+ dl("Number of new site messages received.\n", (u_long)sp->st_newsites);
+ dl("Transmission limited.\n", (u_long)sp->st_nthrottles);
+ dl("Number of outdated conditions detected.\n",
+ (u_long)sp->st_outdated);
+ dl("Number of transactions applied.\n", (u_long)sp->st_txns_applied);
+
+ dl("Number of elections held.\n", (u_long)sp->st_elections);
+ dl("Number of elections won.\n", (u_long)sp->st_elections_won);
+
+ if (sp->st_election_status == 0)
+ printf("No election in progress.\n");
+ else {
+ dl("Current election phase.\n", (u_long)sp->st_election_status);
+ dl("Election winner.\n",
+ (u_long)sp->st_election_cur_winner);
+ dl("Election generation number.\n",
+ (u_long)sp->st_election_gen);
+ printf("%lu/%lu\tMaximum LSN of election winner.\n",
+ (u_long)sp->st_election_lsn.file,
+ (u_long)sp->st_election_lsn.offset);
+ dl("Number of sites expected to participate in elections.\n",
+ (u_long)sp->st_election_nsites);
+ dl("Election priority.\n", (u_long)sp->st_election_priority);
+ dl("Election tiebreaker value.\n",
+ (u_long)sp->st_election_tiebreaker);
+ dl("Votes received this election round.\n",
+ (u_long)sp->st_election_votes);
+ }
+
+ free(sp);
+
return (0);
}
@@ -738,16 +1003,17 @@ mpool_stats(dbenvp)
* Display transaction statistics.
*/
int
-txn_stats(dbenvp)
- DB_ENV *dbenvp;
+txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
{
DB_TXN_STAT *sp;
u_int32_t i;
int ret;
const char *p;
- if ((ret = txn_stat(dbenvp, &sp, NULL)) != 0) {
- dbenvp->err(dbenvp, ret, NULL);
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
return (1);
}
@@ -755,12 +1021,6 @@ txn_stats(dbenvp)
"No checkpoint LSN." : "File/offset for last checkpoint LSN.";
printf("%lu/%lu\t%s\n",
(u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
- p = sp->st_pending_ckp.file == 0 ?
- "No pending checkpoint LSN." :
- "File/offset for last pending checkpoint LSN.";
- printf("%lu/%lu\t%s\n",
- (u_long)sp->st_pending_ckp.file,
- (u_long)sp->st_pending_ckp.offset, p);
if (sp->st_time_ckp == 0)
printf("0\tNo checkpoint timestamp.\n");
else
@@ -775,19 +1035,30 @@ txn_stats(dbenvp)
dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
dl_bytes("Transaction region size",
(u_long)0, (u_long)0, (u_long)sp->st_regsize);
- dl("The number of region locks granted without waiting.\n",
- (u_long)sp->st_region_nowait);
dl("The number of region locks granted after waiting.\n",
(u_long)sp->st_region_wait);
+ dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
qsort(sp->st_txnarray,
sp->st_nactive, sizeof(sp->st_txnarray[0]), txn_compare);
- for (i = 0; i < sp->st_nactive; ++i)
- printf("\tid: %lx; initial LSN file/offest %lu/%lu\n",
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; begin LSN: file/offset %lu/%lu",
(u_long)sp->st_txnarray[i].txnid,
(u_long)sp->st_txnarray[i].lsn.file,
(u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ free(sp);
return (0);
}
@@ -837,43 +1108,35 @@ dl_bytes(msg, gbytes, mbytes, bytes)
u_long gbytes, mbytes, bytes;
{
const char *sep;
- u_long sbytes;
- int showbytes;
- sbytes = bytes;
- while (bytes > MEGABYTE) {
+ /* Normalize the values. */
+ while (bytes >= MEGABYTE) {
++mbytes;
bytes -= MEGABYTE;
}
- while (mbytes > GIGABYTE / MEGABYTE) {
+ while (mbytes >= GIGABYTE / MEGABYTE) {
++gbytes;
- --mbytes;
+ mbytes -= GIGABYTE / MEGABYTE;
}
sep = "";
- showbytes = 0;
if (gbytes > 0) {
printf("%luGB", gbytes);
sep = " ";
- showbytes = 1;
}
if (mbytes > 0) {
printf("%s%luMB", sep, mbytes);
sep = " ";
- showbytes = 1;
}
- if (bytes > 1024) {
+ if (bytes >= 1024) {
printf("%s%luKB", sep, bytes / 1024);
bytes %= 1024;
sep = " ";
- showbytes = 1;
}
if (bytes > 0)
printf("%s%luB", sep, bytes);
- printf("\t%s", msg);
- if (showbytes)
- printf(" (%lu bytes)", sbytes);
- printf(".\n");
+
+ printf("\t%s.\n", msg);
}
/*
@@ -902,45 +1165,57 @@ prflags(flags, fnp)
* Initialize the environment.
*/
int
-db_init(home, ttype)
+db_init(dbenv, home, ttype, cache, is_private)
+ DB_ENV *dbenv;
char *home;
test_t ttype;
+ u_int32_t cache;
+ int *is_private;
{
+ u_int32_t oflags;
int ret;
/*
* If our environment open fails, and we're trying to look at a
* shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
*/
- if ((ret = dbenv->open(dbenv,
- home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ *is_private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
return (0);
- if (ttype != T_DB) {
- dbenv->err(dbenv, ret, "DBENV->open%s%s",
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
home == NULL ? "" : ": ", home == NULL ? "" : home);
return (1);
}
/*
- * We're trying to look at a database.
- *
- * An environment is required because we may be trying to look at
- * databases in directories other than the current one. We could
- * avoid using an environment iff the -h option wasn't specified,
- * but that seems like more work than it's worth.
- *
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created. Declare a reasonably large cache so that we don't fail
+ * when reporting statistics on large databases.
*
- * No environment exists. Create one, but make it private so that
- * no files are actually created.
- *
- * Note that we will probably just drop core if the environment
- * we joined above does not include a memory pool. This is probably
- * acceptable; trying to use an existing shared environment that
- * does not contain a memory pool to look at a database can
- * be safely construed as operator error, I think.
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
*/
- if ((ret = dbenv->open(dbenv, home,
- DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ return (1);
+ }
+ *is_private = 1;
+ oflags = DB_CREATE | DB_PRIVATE | DB_USE_ENVIRON;
+ if (ttype == T_DB)
+ oflags |= DB_INIT_MPOOL;
+ if (ttype == T_LOG)
+ oflags |= DB_INIT_LOG;
+ if ((ret = dbenv->open(dbenv, home, oflags, 0)) == 0)
return (0);
/* An environment is required. */
@@ -963,16 +1238,18 @@ argcheck(arg, ok_args)
return (1);
}
-void
+int
usage()
{
- fprintf(stderr, "usage: db_stat %s\n",
- "[-celmNtV] [-C Acflmo] [-d file [-s file]] [-h home] [-M Ahlm]");
- exit (1);
+ fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_stat [-celmNrtVZ] [-C Aclmop]",
+ "[-d file [-f] [-s database]] [-h home] [-M Ahlm] [-P password]");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -984,6 +1261,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_upgrade/db_upgrade.c b/bdb/db_upgrade/db_upgrade.c
index dc29b6c7e0c..f46b5eabc4e 100644
--- a/bdb/db_upgrade/db_upgrade.c
+++ b/bdb/db_upgrade/db_upgrade.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_upgrade.c,v 1.13 2001/01/18 18:36:59 bostic Exp $";
+ "$Id: db_upgrade.c,v 1.31 2002/03/28 20:13:47 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -25,12 +25,9 @@ static const char revid[] =
#include "db_int.h"
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
-
-const char
- *progname = "db_upgrade"; /* Program name. */
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -39,30 +36,35 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
+ const char *progname = "db_upgrade";
DB *dbp;
DB_ENV *dbenv;
u_int32_t flags;
int ch, e_close, exitval, nflag, ret, t_ret;
- char *home;
+ char *home, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
dbenv = NULL;
flags = nflag = 0;
e_close = exitval = 0;
- home = NULL;
- while ((ch = getopt(argc, argv, "h:NsV")) != EOF)
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NP:sV")) != EOF)
switch (ch) {
case 'h':
home = optarg;
break;
case 'N':
nflag = 1;
- if ((ret = db_env_set_panicstate(0)) != 0) {
- fprintf(stderr,
- "%s: db_env_set_panicstate: %s\n",
- progname, db_strerror(ret));
- exit (1);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
case 's':
@@ -70,16 +72,16 @@ main(argc, argv)
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc <= 0)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -98,8 +100,20 @@ main(argc, argv)
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
- if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
- dbenv->err(dbenv, ret, "set_mutexlocks");
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ if (passwd != NULL && (ret = dbenv->set_encrypt(dbenv,
+ passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
goto shutdown;
}
@@ -126,7 +140,7 @@ main(argc, argv)
if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
- dbp->err(dbp, ret, "DB->close: %s", argv[0]);
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
ret = t_ret;
}
if (ret != 0)
@@ -145,18 +159,20 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- fprintf(stderr, "usage: db_upgrade [-NsV] [-h home] db_file ...\n");
- exit (1);
+ fprintf(stderr, "%s\n",
+ "usage: db_upgrade [-NsV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -168,6 +184,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/db_verify/db_verify.c b/bdb/db_verify/db_verify.c
index 3bbf14caac6..8d63a20e7bc 100644
--- a/bdb/db_verify/db_verify.c
+++ b/bdb/db_verify/db_verify.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: db_verify.c,v 1.15 2001/01/18 18:36:59 bostic Exp $";
+ "$Id: db_verify.c,v 1.38 2002/08/08 03:51:38 bostic Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -25,12 +25,9 @@ static const char revid[] =
#include "db_int.h"
-int main __P((int, char *[]));
-void usage __P((void));
-void version_check __P((void));
-
-const char
- *progname = "db_verify"; /* Program name. */
+int main __P((int, char *[]));
+int usage __P((void));
+int version_check __P((const char *));
int
main(argc, argv)
@@ -39,46 +36,56 @@ main(argc, argv)
{
extern char *optarg;
extern int optind;
- DB *dbp;
+ const char *progname = "db_verify";
+ DB *dbp, *dbp1;
DB_ENV *dbenv;
- int ch, e_close, exitval, nflag, quiet, ret, t_ret;
- char *home;
+ u_int32_t cache;
+ int ch, d_close, e_close, exitval, nflag, oflag, private;
+ int quiet, resize, ret, t_ret;
+ char *home, *passwd;
- version_check();
+ if ((ret = version_check(progname)) != 0)
+ return (ret);
dbenv = NULL;
- e_close = exitval = nflag = quiet = 0;
- home = NULL;
- while ((ch = getopt(argc, argv, "h:NqV")) != EOF)
+ cache = MEGABYTE;
+ d_close = e_close = exitval = nflag = oflag = quiet = 0;
+ home = passwd = NULL;
+ while ((ch = getopt(argc, argv, "h:NoP:qV")) != EOF)
switch (ch) {
case 'h':
home = optarg;
break;
case 'N':
nflag = 1;
- if ((ret = db_env_set_panicstate(0)) != 0) {
- fprintf(stderr,
- "%s: db_env_set_panicstate: %s\n",
- progname, db_strerror(ret));
- exit (1);
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ progname, strerror(errno));
+ return (EXIT_FAILURE);
}
break;
+ case 'o':
+ oflag = 1;
break;
case 'q':
quiet = 1;
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc <= 0)
- usage();
+ return (usage());
/* Handle possible interruptions. */
__db_util_siginit();
@@ -87,57 +94,108 @@ main(argc, argv)
* Create an environment object and initialize it for error
* reporting.
*/
- if ((ret = db_env_create(&dbenv, 0)) != 0) {
- fprintf(stderr, "%s: db_env_create: %s\n",
- progname, db_strerror(ret));
+retry: if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
goto shutdown;
}
e_close = 1;
- /*
- * XXX
- * We'd prefer to have error output configured while calling
- * db_env_create, but there's no way to turn it off once it's
- * turned on.
- */
if (!quiet) {
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
}
- if (nflag && (ret = dbenv->set_mutexlocks(dbenv, 0)) != 0) {
- dbenv->err(dbenv, ret, "set_mutexlocks");
- goto shutdown;
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
}
+ if (passwd != NULL &&
+ (ret = dbenv->set_encrypt(dbenv, passwd, DB_ENCRYPT_AES)) != 0) {
+ dbenv->err(dbenv, ret, "set_passwd");
+ goto shutdown;
+ }
/*
- * Attach to an mpool if it exists, but if that fails, attach
- * to a private region.
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
*/
- if ((ret = dbenv->open(dbenv,
- home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0 &&
- (ret = dbenv->open(dbenv, home,
+ private = 0;
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, cache, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ private = 1;
+ if ((ret = dbenv->open(dbenv, home,
DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
- dbenv->err(dbenv, ret, "open");
- goto shutdown;
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
}
for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
- fprintf(stderr,
- "%s: db_create: %s\n", progname, db_strerror(ret));
+ dbenv->err(dbenv, ret, "%s: db_create", progname);
goto shutdown;
}
- if (!quiet) {
- dbp->set_errfile(dbp, stderr);
- dbp->set_errpfx(dbp, progname);
+ d_close = 1;
+
+ /*
+ * We create a 2nd dbp to this database to get its pagesize
+ * because the dbp we're using for verify cannot be opened.
+ */
+ if (private) {
+ if ((ret = db_create(&dbp1, dbenv, 0)) != 0) {
+ dbenv->err(
+ dbenv, ret, "%s: db_create", progname);
+ goto shutdown;
+ }
+
+ if ((ret = dbp1->open(dbp1, NULL,
+ argv[0], NULL, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: %s", argv[0]);
+ (void)dbp1->close(dbp1, 0);
+ goto shutdown;
+ }
+ /*
+ * If we get here, we can check the cache/page.
+ * !!!
+ * If we have to retry with an env with a larger
+ * cache, we jump out of this loop. However, we
+ * will still be working on the same argv when we
+ * get back into the for-loop.
+ */
+ ret = __db_util_cache(dbenv, dbp1, &cache, &resize);
+ (void)dbp1->close(dbp1, 0);
+ if (ret != 0)
+ goto shutdown;
+
+ if (resize) {
+ (void)dbp->close(dbp, 0);
+ d_close = 0;
+
+ (void)dbenv->close(dbenv, 0);
+ e_close = 0;
+ goto retry;
+ }
}
- if ((ret = dbp->verify(dbp, argv[0], NULL, NULL, 0)) != 0)
+ if ((ret = dbp->verify(dbp,
+ argv[0], NULL, NULL, oflag ? DB_NOORDERCHK : 0)) != 0)
dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
- dbp->err(dbp, ret, "DB->close: %s", argv[0]);
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
ret = t_ret;
}
+ d_close = 0;
if (ret != 0)
goto shutdown;
}
@@ -145,6 +203,11 @@ main(argc, argv)
if (0) {
shutdown: exitval = 1;
}
+
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
exitval = 1;
fprintf(stderr,
@@ -154,18 +217,20 @@ shutdown: exitval = 1;
/* Resend any caught signal. */
__db_util_sigresend();
- return (exitval);
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage()
{
- fprintf(stderr, "usage: db_verify [-NqV] [-h home] db_file ...\n");
- exit (1);
+ fprintf(stderr, "%s\n",
+ "usage: db_verify [-NoqV] [-h home] [-P password] db_file ...");
+ return (EXIT_FAILURE);
}
-void
-version_check()
+int
+version_check(progname)
+ const char *progname;
{
int v_major, v_minor, v_patch;
@@ -177,6 +242,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
diff --git a/bdb/include/btree.h b/bdb/dbinc/btree.h
index 395f645f03f..54da9c5b208 100644
--- a/bdb/include/btree.h
+++ b/bdb/dbinc/btree.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -39,8 +39,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: btree.h,v 11.37 2001/01/17 17:09:52 bostic Exp $
+ * $Id: btree.h,v 11.45 2002/08/06 06:11:21 bostic Exp $
*/
+#ifndef _DB_BTREE_H_
+#define _DB_BTREE_H_
/* Forward structure declarations. */
struct __btree; typedef struct __btree BTREE;
@@ -144,7 +146,7 @@ struct __epg {
#define BT_STK_CLR(c) do { \
(c)->csp = (c)->sp; \
(c)->csp->page = NULL; \
- (c)->csp->lock.off = LOCK_INVALID; \
+ LOCK_INIT((c)->csp->lock); \
} while (0)
#define BT_STK_ENTER(dbenv, c, pagep, page_indx, l, mode, ret) do { \
@@ -159,7 +161,7 @@ struct __epg {
} while (0)
#define BT_STK_PUSH(dbenv, c, pagep, page_indx, lock, mode, ret) do { \
- BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \
+ BT_STK_ENTER(dbenv, c, pagep, page_indx, lock, mode, ret); \
++(c)->csp; \
} while (0)
@@ -169,12 +171,12 @@ struct __epg {
(c)->csp->page = NULL; \
(c)->csp->indx = page_indx; \
(c)->csp->entries = NUM_ENT(pagep); \
- (c)->csp->lock.off = LOCK_INVALID; \
+ LOCK_INIT((c)->csp->lock); \
(c)->csp->lock_mode = DB_LOCK_NG; \
} \
} while (0)
-#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx,ret) do { \
+#define BT_STK_NUMPUSH(dbenv, c, pagep, page_indx, ret) do { \
BT_STK_NUM(dbenv, cp, pagep, page_indx, ret); \
++(c)->csp; \
} while (0)
@@ -233,8 +235,8 @@ struct __cursor {
* page. Assume every item requires the maximum alignment for
* padding, out of sheer paranoia.
*/
-#define B_MINKEY_TO_OVFLSIZE(minkey, pgsize) \
- ((u_int16_t)(((pgsize) - P_OVERHEAD) / ((minkey) * P_INDX) - \
+#define B_MINKEY_TO_OVFLSIZE(dbp, minkey, pgsize) \
+ ((u_int16_t)(((pgsize) - P_OVERHEAD(dbp)) / ((minkey) * P_INDX) -\
(BKEYDATA_PSIZE(0) + ALIGN(1, sizeof(int32_t)))))
/*
@@ -312,6 +314,7 @@ typedef enum {
DB_CA_SPLIT = 4
} db_ca_mode;
-#include "btree_auto.h"
-#include "btree_ext.h"
-#include "db_am.h"
+#include "dbinc_auto/btree_auto.h"
+#include "dbinc_auto/btree_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_BTREE_H_ */
diff --git a/bdb/dbinc/crypto.h b/bdb/dbinc/crypto.h
new file mode 100644
index 00000000000..92fad098a4a
--- /dev/null
+++ b/bdb/dbinc/crypto.h
@@ -0,0 +1,78 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: crypto.h,v 1.9 2002/08/06 06:37:07 bostic Exp $
+ */
+
+#ifndef _DB_CRYPTO_H_
+#define _DB_CRYPTO_H_
+
+/*
+ * !!!
+ * These are the internal representations of the algorithm flags.
+ * They are used in both the DB_CIPHER structure and the CIPHER
+ * structure so we can tell if users specified both passwd and alg
+ * correctly.
+ *
+ * CIPHER_ANY is used when an app joins an existing env but doesn't
+ * know the algorithm originally used. This is only valid in the
+ * DB_CIPHER structure until we open and can set the alg.
+ */
+/*
+ * We store the algorithm in an 8-bit field on the meta-page. So we
+ * use a numeric value, not bit fields.
+ * now we are limited to 8 algorithms before we cannot use bits and
+ * need numeric values. That should be plenty. It is okay for the
+ * CIPHER_ANY flag to go beyond that since that is never stored on disk.
+ */
+
+/*
+ * This structure is per-process, not in shared memory.
+ */
+struct __db_cipher {
+ int (*adj_size) __P((size_t));
+ int (*close) __P((DB_ENV *, void *));
+ int (*decrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*encrypt) __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+ int (*init) __P((DB_ENV *, DB_CIPHER *));
+
+ u_int8_t mac_key[DB_MAC_KEY]; /* MAC key. */
+ void *data; /* Algorithm-specific information */
+
+#define CIPHER_AES 1 /* AES algorithm */
+ u_int8_t alg; /* Algorithm used - See above */
+ u_int8_t spare[3]; /* Spares */
+
+#define CIPHER_ANY 0x00000001 /* Only for DB_CIPHER */
+ u_int32_t flags; /* Other flags */
+};
+
+#ifdef HAVE_CRYPTO
+
+#include "crypto/rijndael/rijndael-api-fst.h"
+
+/*
+ * Shared ciphering structure
+ * No DB_MUTEX needed because all information is read-only after creation.
+ */
+typedef struct __cipher {
+ roff_t passwd; /* Offset to shared passwd */
+ size_t passwd_len; /* Length of passwd */
+ u_int32_t flags; /* Algorithm used - see above */
+} CIPHER;
+
+#define DB_AES_KEYLEN 128 /* AES key length */
+#define DB_AES_CHUNK 16 /* AES byte unit size */
+
+typedef struct __aes_cipher {
+ keyInstance decrypt_ki; /* Decryption key instance */
+ keyInstance encrypt_ki; /* Encryption key instance */
+ u_int32_t flags; /* AES-specific flags */
+} AES_CIPHER;
+
+#include "dbinc_auto/crypto_ext.h"
+#endif /* HAVE_CRYPTO */
+#endif /* !_DB_CRYPTO_H_ */
diff --git a/bdb/dbinc/cxx_common.h b/bdb/dbinc/cxx_common.h
new file mode 100644
index 00000000000..e5cb3a9aef4
--- /dev/null
+++ b/bdb/dbinc/cxx_common.h
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_common.h,v 11.2 2002/01/11 15:52:23 bostic Exp $
+ */
+
+#ifndef _CXX_COMMON_H_
+#define _CXX_COMMON_H_
+
+//
+// Common definitions used by all of Berkeley DB's C++ include files.
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Mechanisms for declaring classes
+//
+
+//
+// Every class defined in this file has an _exported next to the class name.
+// This is needed for WinTel machines so that the class methods can
+// be exported or imported in a DLL as appropriate. Users of the DLL
+// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL
+// must be defined.
+//
+#if defined(_MSC_VER)
+
+# if defined(DB_CREATE_DLL)
+# define _exported __declspec(dllexport) // creator of dll
+# elif defined(DB_USE_DLL)
+# define _exported __declspec(dllimport) // user of dll
+# else
+# define _exported // static lib creator or user
+# endif
+
+#else /* _MSC_VER */
+
+# define _exported
+
+#endif /* _MSC_VER */
+#endif /* !_CXX_COMMON_H_ */
diff --git a/bdb/dbinc/cxx_except.h b/bdb/dbinc/cxx_except.h
new file mode 100644
index 00000000000..f9bf4f859f8
--- /dev/null
+++ b/bdb/dbinc/cxx_except.h
@@ -0,0 +1,141 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: cxx_except.h,v 11.5 2002/08/01 23:32:34 mjc Exp $
+ */
+
+#ifndef _CXX_EXCEPT_H_
+#define _CXX_EXCEPT_H_
+
+#include "cxx_common.h"
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+class DbDeadlockException; // forward
+class DbException; // forward
+class DbLockNotGrantedException; // forward
+class DbLock; // forward
+class DbMemoryException; // forward
+class DbRunRecoveryException; // forward
+class Dbt; // forward
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Exception classes
+//
+
+// Almost any error in the DB library throws a DbException.
+// Every exception should be considered an abnormality
+// (e.g. bug, misuse of DB, file system error).
+//
+// NOTE: We would like to inherit from class exception and
+// let it handle what(), but there are
+// MSVC++ problems when <exception> is included.
+//
+class _exported DbException
+{
+public:
+ virtual ~DbException();
+ DbException(int err);
+ DbException(const char *description);
+ DbException(const char *prefix, int err);
+ DbException(const char *prefix1, const char *prefix2, int err);
+ int get_errno() const;
+ virtual const char *what() const;
+
+ DbException(const DbException &);
+ DbException &operator = (const DbException &);
+
+private:
+ char *what_;
+ int err_; // errno
+};
+
+//
+// A specific sort of exception that occurs when
+// an operation is aborted to resolve a deadlock.
+//
+class _exported DbDeadlockException : public DbException
+{
+public:
+ virtual ~DbDeadlockException();
+ DbDeadlockException(const char *description);
+
+ DbDeadlockException(const DbDeadlockException &);
+ DbDeadlockException &operator = (const DbDeadlockException &);
+};
+
+//
+// A specific sort of exception that occurs when
+// a lock is not granted, e.g. by lock_get or lock_vec.
+// Note that the Dbt is only live as long as the Dbt used
+// in the offending call.
+//
+class _exported DbLockNotGrantedException : public DbException
+{
+public:
+ virtual ~DbLockNotGrantedException();
+ DbLockNotGrantedException(const char *prefix, db_lockop_t op,
+ db_lockmode_t mode, const Dbt *obj, const DbLock lock, int index);
+ DbLockNotGrantedException(const DbLockNotGrantedException &);
+ DbLockNotGrantedException &operator =
+ (const DbLockNotGrantedException &);
+
+ db_lockop_t get_op() const;
+ db_lockmode_t get_mode() const;
+ const Dbt* get_obj() const;
+ DbLock *get_lock() const;
+ int get_index() const;
+
+private:
+ db_lockop_t op_;
+ db_lockmode_t mode_;
+ const Dbt *obj_;
+ DbLock *lock_;
+ int index_;
+};
+
+//
+// A specific sort of exception that occurs when
+// user declared memory is insufficient in a Dbt.
+//
+class _exported DbMemoryException : public DbException
+{
+public:
+ virtual ~DbMemoryException();
+ DbMemoryException(Dbt *dbt);
+ DbMemoryException(const char *description);
+ DbMemoryException(const char *prefix, Dbt *dbt);
+ DbMemoryException(const char *prefix1, const char *prefix2, Dbt *dbt);
+ Dbt *get_dbt() const;
+
+ DbMemoryException(const DbMemoryException &);
+ DbMemoryException &operator = (const DbMemoryException &);
+
+private:
+ Dbt *dbt_;
+};
+
+//
+// A specific sort of exception that occurs when
+// recovery is required before continuing DB activity.
+//
+class _exported DbRunRecoveryException : public DbException
+{
+public:
+ virtual ~DbRunRecoveryException();
+ DbRunRecoveryException(const char *description);
+
+ DbRunRecoveryException(const DbRunRecoveryException &);
+ DbRunRecoveryException &operator = (const DbRunRecoveryException &);
+};
+
+#endif /* !_CXX_EXCEPT_H_ */
diff --git a/bdb/include/cxx_int.h b/bdb/dbinc/cxx_int.h
index 4a9a40ceba1..9af3979d9f1 100644
--- a/bdb/include/cxx_int.h
+++ b/bdb/dbinc/cxx_int.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: cxx_int.h,v 11.13 2000/11/21 22:56:36 dda Exp $
+ * $Id: cxx_int.h,v 11.20 2002/01/11 15:52:23 bostic Exp $
*/
#ifndef _CXX_INT_H_
@@ -69,26 +69,11 @@ WRAPPED_CLASS(DbTxn, DbTxnImp, DB_TXN*)
#define DB_ERROR(caller, ecode, policy) \
DbEnv::runtime_error(caller, ecode, policy)
-// These defines are for tedious field set/get access methods.
-//
-
-#define DB_RO_ACCESS(_class, _type, _cxx_name, _field) \
- \
-_type _class::get_##_cxx_name() const \
-{ \
- return (_field); \
-}
-
-#define DB_WO_ACCESS(_class, _type, _cxx_name, _field) \
- \
-void _class::set_##_cxx_name(_type value) \
-{ \
- _field = value; \
-} \
+#define DB_ERROR_DBT(caller, dbt, policy) \
+ DbEnv::runtime_error_dbt(caller, dbt, policy)
-#define DB_RW_ACCESS(_class, _type, _cxx_name, _field) \
- DB_RO_ACCESS(_class, _type, _cxx_name, _field) \
- DB_WO_ACCESS(_class, _type, _cxx_name, _field)
+#define DB_OVERFLOWED_DBT(dbt) \
+ (F_ISSET(dbt, DB_DBT_USERMEM) && dbt->size > dbt->ulen)
/* values for Db::flags_ */
#define DB_CXX_PRIVATE_ENV 0x00000001
diff --git a/bdb/dbinc/db.in b/bdb/dbinc/db.in
new file mode 100644
index 00000000000..208de3bd622
--- /dev/null
+++ b/bdb/dbinc/db.in
@@ -0,0 +1,1883 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db.in,v 11.323 2002/09/03 17:27:16 bostic Exp $
+ *
+ * db.h include file layout:
+ * General.
+ * Database Environment.
+ * Locking subsystem.
+ * Logging subsystem.
+ * Shared buffer cache (mpool) subsystem.
+ * Transaction subsystem.
+ * Access methods.
+ * Access method cursors.
+ * Dbm/Ndbm, Hsearch historic interfaces.
+ */
+
+#ifndef _DB_H_
+#define _DB_H_
+
+#ifndef __NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * XXX
+ * Handle function prototypes and the keyword "const". This steps on name
+ * space that DB doesn't control, but all of the other solutions are worse.
+ *
+ * XXX
+ * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
+ * defined by default, you specify a command line flag or #pragma to turn
+ * it on. Don't do that, however, because some of Microsoft's own header
+ * files won't compile.
+ */
+#undef __P
+#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
+#define __P(protos) protos /* ANSI C prototypes */
+#else
+#define const
+#define __P(protos) () /* K&R C preprocessor */
+#endif
+
+/*
+ * Berkeley DB version information.
+ */
+#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@
+#define DB_VERSION_MINOR @DB_VERSION_MINOR@
+#define DB_VERSION_PATCH @DB_VERSION_PATCH@
+#define DB_VERSION_STRING @DB_VERSION_STRING@
+
+/*
+ * !!!
+ * Berkeley DB uses specifically sized types. If they're not provided by
+ * the system, typedef them here.
+ *
+ * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
+ * as does BIND and Kerberos, since we don't know for sure what #include
+ * files the user is using.
+ *
+ * !!!
+ * We also provide the standard u_int, u_long etc., if they're not provided
+ * by the system.
+ */
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+@u_int8_decl@
+@int16_decl@
+@u_int16_decl@
+@int32_decl@
+@u_int32_decl@
+#endif
+
+@u_char_decl@
+@u_short_decl@
+@u_int_decl@
+@u_long_decl@
+@ssize_t_decl@
+
+/* Basic types that are exported or quasi-exported. */
+typedef u_int32_t db_pgno_t; /* Page number type. */
+typedef u_int16_t db_indx_t; /* Page offset type. */
+#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
+
+typedef u_int32_t db_recno_t; /* Record number type. */
+#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
+
+typedef u_int32_t db_timeout_t; /* Type of a timeout. */
+
+/*
+ * Region offsets are currently limited to 32-bits. I expect that's going
+ * to have to be fixed in the not-too-distant future, since we won't want to
+ * split 100Gb memory pools into that many different regions.
+ */
+typedef u_int32_t roff_t;
+
+/*
+ * Forward structure declarations, so we can declare pointers and
+ * applications can get type checking.
+ */
+struct __db; typedef struct __db DB;
+struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
+struct __db_cipher; typedef struct __db_cipher DB_CIPHER;
+struct __db_dbt; typedef struct __db_dbt DBT;
+struct __db_env; typedef struct __db_env DB_ENV;
+struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
+struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
+struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
+struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
+struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
+struct __db_log_cursor; typedef struct __db_log_cursor DB_LOGC;
+struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
+struct __db_lsn; typedef struct __db_lsn DB_LSN;
+struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
+struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
+struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
+struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
+struct __db_preplist; typedef struct __db_preplist DB_PREPLIST;
+struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
+struct __db_rep; typedef struct __db_rep DB_REP;
+struct __db_rep_stat; typedef struct __db_rep_stat DB_REP_STAT;
+struct __db_txn; typedef struct __db_txn DB_TXN;
+struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
+struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
+struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
+struct __dbc; typedef struct __dbc DBC;
+struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
+struct __fh_t; typedef struct __fh_t DB_FH;
+struct __fname; typedef struct __fname FNAME;
+struct __key_range; typedef struct __key_range DB_KEY_RANGE;
+struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
+struct __mutex_t; typedef struct __mutex_t DB_MUTEX;
+
+/* Key/data structure -- a Data-Base Thang. */
+struct __db_dbt {
+ /*
+ * data/size must be fields 1 and 2 for DB 1.85 compatibility.
+ */
+ void *data; /* Key/data */
+ u_int32_t size; /* key/data length */
+
+ u_int32_t ulen; /* RO: length of user buffer. */
+ u_int32_t dlen; /* RO: get/put record length. */
+ u_int32_t doff; /* RO: get/put record offset. */
+
+ void *app_private; /* Application-private handle. */
+
+#define DB_DBT_APPMALLOC 0x001 /* Callback allocated memory. */
+#define DB_DBT_ISSET 0x002 /* Lower level calls set value. */
+#define DB_DBT_MALLOC 0x004 /* Return in malloc'd memory. */
+#define DB_DBT_PARTIAL 0x008 /* Partial put/get. */
+#define DB_DBT_REALLOC 0x010 /* Return in realloc'd memory. */
+#define DB_DBT_USERMEM 0x020 /* Return in user's memory. */
+#define DB_DBT_DUPOK 0x040 /* Insert if duplicate. */
+ u_int32_t flags;
+};
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ */
+#define DB_CREATE 0x000001 /* Create file as necessary. */
+#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
+#define DB_FORCE 0x000004 /* Force (anything). */
+#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
+#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
+#define DB_RECOVER 0x000020 /* Run normal recovery. */
+#define DB_THREAD 0x000040 /* Applications are threaded. */
+#define DB_TRUNCATE 0x000080 /* Discard existing DB (O_TRUNC). */
+#define DB_TXN_NOSYNC 0x000100 /* Do not sync log on commit. */
+#define DB_USE_ENVIRON 0x000200 /* Use the environment. */
+#define DB_USE_ENVIRON_ROOT 0x000400 /* Use the environment if root. */
+
+/*
+ * Common flags --
+ * Interfaces which use any of these common flags should never have
+ * interface specific flags in this range.
+ *
+ * DB_AUTO_COMMIT:
+ * DB_ENV->set_flags, DB->associate, DB->del, DB->put, DB->open,
+ * DB->remove, DB->rename, DB->truncate
+ * DB_DIRTY_READ:
+ * DB->cursor, DB->get, DB->join, DB->open, DBcursor->c_get,
+ * DB_ENV->txn_begin
+ *
+ * Shared flags up to 0x000400 */
+#define DB_AUTO_COMMIT 0x00800000 /* Implied transaction. */
+#define DB_DIRTY_READ 0x01000000 /* Dirty Read. */
+
+/*
+ * Flags private to db_env_create.
+ */
+#define DB_CLIENT 0x000001 /* Open for a client environment. */
+
+/*
+ * Flags private to db_create.
+ */
+#define DB_XA_CREATE 0x000001 /* Open in an XA environment. */
+
+/*
+ * Flags private to DB_ENV->open.
+ * Shared flags up to 0x000400 */
+#define DB_INIT_CDB 0x000800 /* Concurrent Access Methods. */
+#define DB_INIT_LOCK 0x001000 /* Initialize locking. */
+#define DB_INIT_LOG 0x002000 /* Initialize logging. */
+#define DB_INIT_MPOOL 0x004000 /* Initialize mpool. */
+#define DB_INIT_TXN 0x008000 /* Initialize transactions. */
+#define DB_JOINENV 0x010000 /* Initialize all subsystems present. */
+#define DB_LOCKDOWN 0x020000 /* Lock memory into physical core. */
+#define DB_PRIVATE 0x040000 /* DB_ENV is process local. */
+#define DB_RECOVER_FATAL 0x080000 /* Run catastrophic recovery. */
+#define DB_SYSTEM_MEM 0x100000 /* Use system-backed memory. */
+
+/*
+ * Flags private to DB->open.
+ * Shared flags up to 0x000400 */
+#define DB_EXCL 0x000800 /* Exclusive open (O_EXCL). */
+#define DB_FCNTL_LOCKING 0x001000 /* UNDOC: fcntl(2) locking. */
+#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
+#define DB_WRITEOPEN 0x004000 /* UNDOC: open with write lock. */
+
+/*
+ * Flags private to DB_ENV->txn_begin.
+ * Shared flags up to 0x000400 */
+#define DB_TXN_NOWAIT 0x000800 /* Do not wait for locks in this TXN. */
+#define DB_TXN_SYNC 0x001000 /* Always sync log on commit. */
+
+/*
+ * Flags private to DB_ENV->set_encrypt.
+ */
+#define DB_ENCRYPT_AES 0x000001 /* AES, assumes SHA1 checksum */
+
+/*
+ * Flags private to DB_ENV->set_flags.
+ * Shared flags up to 0x000400 */
+#define DB_CDB_ALLDB 0x000800 /* Set CDB locking per environment. */
+#define DB_DIRECT_DB 0x001000 /* Don't buffer databases in the OS. */
+#define DB_DIRECT_LOG 0x002000 /* Don't buffer log files in the OS. */
+#define DB_NOLOCKING 0x004000 /* Set locking/mutex behavior. */
+#define DB_NOPANIC 0x008000 /* Set panic state per DB_ENV. */
+#define DB_OVERWRITE 0x010000 /* Overwrite unlinked region files. */
+#define DB_PANIC_ENVIRONMENT 0x020000 /* Set panic state per environment. */
+#define DB_REGION_INIT 0x040000 /* Page-fault regions on open. */
+#define DB_TXN_WRITE_NOSYNC 0x080000 /* Write, don't sync, on txn commit. */
+#define DB_YIELDCPU 0x100000 /* Yield the CPU (a lot). */
+
+/*
+ * Flags private to DB->set_feedback's callback.
+ */
+#define DB_UPGRADE 0x000001 /* Upgrading. */
+#define DB_VERIFY 0x000002 /* Verifying. */
+
+/*
+ * Flags private to DB_MPOOLFILE->open.
+ * Shared flags up to 0x000400 */
+#define DB_DIRECT 0x000800 /* Don't buffer the file in the OS. */
+#define DB_EXTENT 0x001000 /* UNDOC: dealing with an extent. */
+#define DB_ODDFILESIZE 0x002000 /* Truncate file to N * pgsize. */
+
+/*
+ * Flags private to DB->set_flags.
+ */
+#define DB_CHKSUM_SHA1 0x000001 /* Use SHA1 checksumming */
+#define DB_DUP 0x000002 /* Btree, Hash: duplicate keys. */
+#define DB_DUPSORT 0x000004 /* Btree, Hash: duplicate keys. */
+#define DB_ENCRYPT 0x000008 /* Btree, Hash: duplicate keys. */
+#define DB_RECNUM 0x000010 /* Btree: record numbers. */
+#define DB_RENUMBER 0x000020 /* Recno: renumber on insert/delete. */
+#define DB_REVSPLITOFF 0x000040 /* Btree: turn off reverse splits. */
+#define DB_SNAPSHOT 0x000080 /* Recno: snapshot the input. */
+
+/*
+ * Flags private to the DB->stat methods.
+ */
+#define DB_STAT_CLEAR 0x000001 /* Clear stat after returning values. */
+
+/*
+ * Flags private to DB->join.
+ */
+#define DB_JOIN_NOSORT 0x000001 /* Don't try to optimize join. */
+
+/*
+ * Flags private to DB->verify.
+ */
+#define DB_AGGRESSIVE 0x000001 /* Salvage whatever could be data.*/
+#define DB_NOORDERCHK 0x000002 /* Skip sort order/hashing check. */
+#define DB_ORDERCHKONLY 0x000004 /* Only perform the order check. */
+#define DB_PR_PAGE 0x000008 /* Show page contents (-da). */
+#define DB_PR_RECOVERYTEST 0x000010 /* Recovery test (-dr). */
+#define DB_PRINTABLE 0x000020 /* Use printable format for salvage. */
+#define DB_SALVAGE 0x000040 /* Salvage what looks like data. */
+/*
+ * !!!
+ * These must not go over 0x8000, or they will collide with the flags
+ * used by __bam_vrfy_subtree.
+ */
+
+/*
+ * Flags private to DB->set_rep_transport's send callback.
+ */
+#define DB_REP_PERMANENT 0x0001 /* Important--app. may want to flush. */
+
+/*******************************************************
+ * Locking.
+ *******************************************************/
+#define DB_LOCKVERSION 1
+
+#define DB_FILE_ID_LEN 20 /* Unique file ID length. */
+
+/*
+ * Deadlock detector modes; used in the DB_ENV structure to configure the
+ * locking subsystem.
+ */
+#define DB_LOCK_NORUN 0
+#define DB_LOCK_DEFAULT 1 /* Default policy. */
+#define DB_LOCK_EXPIRE 2 /* Only expire locks, no detection. */
+#define DB_LOCK_MAXLOCKS 3 /* Abort txn with maximum # of locks. */
+#define DB_LOCK_MINLOCKS 4 /* Abort txn with minimum # of locks. */
+#define DB_LOCK_MINWRITE 5 /* Abort txn with minimum writelocks. */
+#define DB_LOCK_OLDEST 6 /* Abort oldest transaction. */
+#define DB_LOCK_RANDOM 7 /* Abort random transaction. */
+#define DB_LOCK_YOUNGEST 8 /* Abort youngest transaction. */
+
+/* Flag values for lock_vec(), lock_get(). */
+#define DB_LOCK_FREE_LOCKER 0x001 /* Internal: Free locker as well. */
+#define DB_LOCK_NOWAIT 0x002 /* Don't wait on unavailable lock. */
+#define DB_LOCK_RECORD 0x004 /* Internal: record lock. */
+#define DB_LOCK_REMOVE 0x008 /* Internal: flag object removed. */
+#define DB_LOCK_SET_TIMEOUT 0x010 /* Internal: set lock timeout. */
+#define DB_LOCK_SWITCH 0x020 /* Internal: switch existing lock. */
+#define DB_LOCK_UPGRADE 0x040 /* Internal: upgrade existing lock. */
+
+/*
+ * Simple R/W lock modes and for multi-granularity intention locking.
+ *
+ * !!!
+ * These values are NOT random, as they are used as an index into the lock
+ * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
+ * must be == 4.
+ */
+typedef enum {
+ DB_LOCK_NG=0, /* Not granted. */
+ DB_LOCK_READ=1, /* Shared/read. */
+ DB_LOCK_WRITE=2, /* Exclusive/write. */
+ DB_LOCK_WAIT=3, /* Wait for event */
+ DB_LOCK_IWRITE=4, /* Intent exclusive/write. */
+ DB_LOCK_IREAD=5, /* Intent to share/read. */
+ DB_LOCK_IWR=6, /* Intent to read and write. */
+ DB_LOCK_DIRTY=7, /* Dirty Read. */
+ DB_LOCK_WWRITE=8 /* Was Written. */
+} db_lockmode_t;
+
+/*
+ * Request types.
+ */
+typedef enum {
+ DB_LOCK_DUMP=0, /* Display held locks. */
+ DB_LOCK_GET=1, /* Get the lock. */
+ DB_LOCK_GET_TIMEOUT=2, /* Get lock with a timeout. */
+ DB_LOCK_INHERIT=3, /* Pass locks to parent. */
+ DB_LOCK_PUT=4, /* Release the lock. */
+ DB_LOCK_PUT_ALL=5, /* Release locker's locks. */
+ DB_LOCK_PUT_OBJ=6, /* Release locker's locks on obj. */
+ DB_LOCK_PUT_READ=7, /* Release locker's read locks. */
+ DB_LOCK_TIMEOUT=8, /* Force a txn to timeout. */
+ DB_LOCK_TRADE=9, /* Trade locker ids on a lock. */
+ DB_LOCK_UPGRADE_WRITE=10 /* Upgrade writes for dirty reads. */
+} db_lockop_t;
+
+/*
+ * Status of a lock.
+ */
+typedef enum {
+ DB_LSTAT_ABORTED=1, /* Lock belongs to an aborted txn. */
+ DB_LSTAT_ERR=2, /* Lock is bad. */
+ DB_LSTAT_EXPIRED=3, /* Lock has expired. */
+ DB_LSTAT_FREE=4, /* Lock is unallocated. */
+ DB_LSTAT_HELD=5, /* Lock is currently held. */
+ DB_LSTAT_NOTEXIST=6, /* Object on which lock was waiting
+ * was removed */
+ DB_LSTAT_PENDING=7, /* Lock was waiting and has been
+ * promoted; waiting for the owner
+ * to run and upgrade it to held. */
+ DB_LSTAT_WAITING=8 /* Lock is on the wait queue. */
+}db_status_t;
+
+/* Lock statistics structure. */
+struct __db_lock_stat {
+ u_int32_t st_id; /* Last allocated locker ID. */
+ u_int32_t st_cur_maxid; /* Current maximum unused ID. */
+ u_int32_t st_maxlocks; /* Maximum number of locks in table. */
+ u_int32_t st_maxlockers; /* Maximum num of lockers in table. */
+ u_int32_t st_maxobjects; /* Maximum num of objects in table. */
+ u_int32_t st_nmodes; /* Number of lock modes. */
+ u_int32_t st_nlocks; /* Current number of locks. */
+ u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
+ u_int32_t st_nlockers; /* Current number of lockers. */
+ u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
+ u_int32_t st_nobjects; /* Current number of objects. */
+ u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
+ u_int32_t st_nconflicts; /* Number of lock conflicts. */
+ u_int32_t st_nrequests; /* Number of lock gets. */
+ u_int32_t st_nreleases; /* Number of lock puts. */
+ u_int32_t st_nnowaits; /* Number of requests that would have
+ waited, but NOWAIT was set. */
+ u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
+ db_timeout_t st_locktimeout; /* Lock timeout. */
+ u_int32_t st_nlocktimeouts; /* Number of lock timeouts. */
+ db_timeout_t st_txntimeout; /* Transaction timeout. */
+ u_int32_t st_ntxntimeouts; /* Number of transaction timeouts. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * DB_LOCK_ILOCK --
+ * Internal DB access method lock.
+ */
+struct __db_ilock {
+ db_pgno_t pgno; /* Page being locked. */
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
+#define DB_HANDLE_LOCK 1
+#define DB_RECORD_LOCK 2
+#define DB_PAGE_LOCK 3
+#define DB_TXN_LOCK 4
+ u_int32_t type; /* Type of lock. */
+};
+
+/*
+ * DB_LOCK --
+ * The structure is allocated by the caller and filled in during a
+ * lock_get request (or a lock_vec/DB_LOCK_GET).
+ */
+struct __db_lock_u {
+ size_t off; /* Offset of the lock in the region */
+ u_int32_t ndx; /* Index of the object referenced by
+ * this lock; used for locking. */
+ u_int32_t gen; /* Generation number of this lock. */
+ db_lockmode_t mode; /* mode of this lock. */
+};
+
+/* Lock request structure. */
+struct __db_lockreq {
+ db_lockop_t op; /* Operation. */
+ db_lockmode_t mode; /* Requested mode. */
+ db_timeout_t timeout; /* Time to expire lock. */
+ DBT *obj; /* Object being locked. */
+ DB_LOCK lock; /* Lock returned. */
+};
+
+/*******************************************************
+ * Logging.
+ *******************************************************/
+#define DB_LOGVERSION 7 /* Current log version. */
+#define DB_LOGOLDVER 7 /* Oldest log version supported. */
+#define DB_LOGMAGIC 0x040988
+
+/* Flag values for log_archive(). */
+#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
+#define DB_ARCH_DATA 0x002 /* Data files. */
+#define DB_ARCH_LOG 0x004 /* Log files. */
+
+/*
+ * A DB_LSN has two parts, a fileid which identifies a specific file, and an
+ * offset within that file. The fileid is an unsigned 4-byte quantity that
+ * uniquely identifies a file within the log directory -- currently a simple
+ * counter inside the log. The offset is also an unsigned 4-byte value. The
+ * log manager guarantees the offset is never more than 4 bytes by switching
+ * to a new log file before the maximum length imposed by an unsigned 4-byte
+ * offset is reached.
+ */
+struct __db_lsn {
+ u_int32_t file; /* File ID. */
+ u_int32_t offset; /* File offset. */
+};
+
+/*
+ * DB_LOGC --
+ * Log cursor.
+ */
+struct __db_log_cursor {
+ DB_ENV *dbenv; /* Enclosing dbenv. */
+
+ DB_FH *c_fh; /* File handle. */
+ DB_LSN c_lsn; /* Cursor: LSN */
+ u_int32_t c_len; /* Cursor: record length */
+ u_int32_t c_prev; /* Cursor: previous record's offset */
+
+ DBT c_dbt; /* Return DBT. */
+
+#define DB_LOGC_BUF_SIZE (32 * 1024)
+ u_int8_t *bp; /* Allocated read buffer. */
+ u_int32_t bp_size; /* Read buffer length in bytes. */
+ u_int32_t bp_rlen; /* Read buffer valid data length. */
+ DB_LSN bp_lsn; /* Read buffer first byte LSN. */
+
+ u_int32_t bp_maxrec; /* Max record length in the log file. */
+
+ /* Methods. */
+ int (*close) __P((DB_LOGC *, u_int32_t));
+ int (*get) __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+
+#define DB_LOG_DISK 0x01 /* Log record came from disk. */
+#define DB_LOG_LOCKED 0x02 /* Log region already locked */
+#define DB_LOG_SILENT_ERR 0x04 /* Turn-off error messages. */
+ u_int32_t flags;
+};
+
+/* Log statistics structure. */
+struct __db_log_stat {
+ u_int32_t st_magic; /* Log file magic number. */
+ u_int32_t st_version; /* Log file version number. */
+ int st_mode; /* Log file mode. */
+ u_int32_t st_lg_bsize; /* Log buffer size. */
+ u_int32_t st_lg_size; /* Log file size. */
+ u_int32_t st_w_bytes; /* Bytes to log. */
+ u_int32_t st_w_mbytes; /* Megabytes to log. */
+ u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
+ u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
+ u_int32_t st_wcount; /* Total writes to the log. */
+ u_int32_t st_wcount_fill; /* Overflow writes to the log. */
+ u_int32_t st_scount; /* Total syncs to the log. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_cur_file; /* Current log file number. */
+ u_int32_t st_cur_offset; /* Current log file offset. */
+ u_int32_t st_disk_file; /* Known on disk log file number. */
+ u_int32_t st_disk_offset; /* Known on disk log file offset. */
+ u_int32_t st_regsize; /* Region size. */
+ u_int32_t st_maxcommitperflush; /* Max number of commits in a flush. */
+ u_int32_t st_mincommitperflush; /* Min number of commits in a flush. */
+};
+
+/*******************************************************
+ * Shared buffer cache (mpool).
+ *******************************************************/
+/* Flag values for DB_MPOOLFILE->get. */
+#define DB_MPOOL_CREATE 0x001 /* Create a page. */
+#define DB_MPOOL_LAST 0x002 /* Return the last page. */
+#define DB_MPOOL_NEW 0x004 /* Create a new page. */
+
+/* Flag values for DB_MPOOLFILE->put, DB_MPOOLFILE->set. */
+#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
+#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
+#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
+
+/* Priority values for DB_MPOOLFILE->set_priority. */
+typedef enum {
+ DB_PRIORITY_VERY_LOW=1,
+ DB_PRIORITY_LOW=2,
+ DB_PRIORITY_DEFAULT=3,
+ DB_PRIORITY_HIGH=4,
+ DB_PRIORITY_VERY_HIGH=5
+} DB_CACHE_PRIORITY;
+
+/* Per-process DB_MPOOLFILE information. */
+struct __db_mpoolfile {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ DB_FH *fhp; /* Underlying file handle. */
+
+ u_int32_t ref; /* Reference count. */
+
+ /*
+ * !!!
+ * The pinref and q fields are protected by the region lock, not the
+ * DB_MPOOLFILE structure mutex. We don't use the structure mutex
+ * because then I/O (which holds the structure lock held because of
+ * the race between the seek and write of the file descriptor) would
+ * block any other put/get calls using this DB_MPOOLFILE structure.
+ */
+ u_int32_t pinref; /* Pinned block reference count. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_mpoolfile) q;
+ */
+ struct {
+ struct __db_mpoolfile *tqe_next;
+ struct __db_mpoolfile **tqe_prev;
+ } q; /* Linked list of DB_MPOOLFILE's. */
+
+ /*
+ * These fields are not thread-protected because they are initialized
+ * when the file is opened and never modified.
+ */
+ int ftype; /* File type. */
+ DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
+ u_int8_t *fileid; /* Unique file ID. */
+ int32_t lsn_offset; /* LSN offset in page. */
+ u_int32_t clear_len; /* Cleared length on created pages. */
+
+ DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
+ MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
+
+ void *addr; /* Address of mmap'd region. */
+ size_t len; /* Length of mmap'd region. */
+
+ /* Methods. */
+ int (*close) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*get) __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+ void (*get_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ void (*last_pgno) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*open)__P((DB_MPOOLFILE *, const char *, u_int32_t, int, size_t));
+ int (*put) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ void (*refcnt) __P((DB_MPOOLFILE *, db_pgno_t *));
+ int (*set) __P((DB_MPOOLFILE *, void *, u_int32_t));
+ int (*set_clear_len) __P((DB_MPOOLFILE *, u_int32_t));
+ int (*set_fileid) __P((DB_MPOOLFILE *, u_int8_t *));
+ int (*set_ftype) __P((DB_MPOOLFILE *, int));
+ int (*set_lsn_offset) __P((DB_MPOOLFILE *, int32_t));
+ int (*set_pgcookie) __P((DB_MPOOLFILE *, DBT *));
+ int (*set_priority) __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+ void (*set_unlink) __P((DB_MPOOLFILE *, int));
+ int (*sync) __P((DB_MPOOLFILE *));
+
+ /*
+ * MP_OPEN_CALLED and MP_READONLY do not need to be thread protected
+ * because they are initialized when the file is opened, and never
+ * modified.
+ *
+ * MP_FLUSH, MP_UPGRADE and MP_UPGRADE_FAIL are thread protected
+ * becase they are potentially read by multiple threads of control.
+ */
+#define MP_FLUSH 0x001 /* Was opened to flush a buffer. */
+#define MP_OPEN_CALLED 0x002 /* File opened. */
+#define MP_READONLY 0x004 /* File is readonly. */
+#define MP_UPGRADE 0x008 /* File descriptor is readwrite. */
+#define MP_UPGRADE_FAIL 0x010 /* Upgrade wasn't possible. */
+ u_int32_t flags;
+};
+
+/*
+ * Mpool statistics structure.
+ */
+struct __db_mpool_stat {
+ u_int32_t st_gbytes; /* Total cache size: GB. */
+ u_int32_t st_bytes; /* Total cache size: B. */
+ u_int32_t st_ncache; /* Number of caches. */
+ u_int32_t st_regsize; /* Cache size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+ u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
+ u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
+ u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
+ u_int32_t st_pages; /* Total number of pages. */
+ u_int32_t st_page_clean; /* Clean pages. */
+ u_int32_t st_page_dirty; /* Dirty pages. */
+ u_int32_t st_hash_buckets; /* Number of hash buckets. */
+ u_int32_t st_hash_searches; /* Total hash chain searches. */
+ u_int32_t st_hash_longest; /* Longest hash chain searched. */
+ u_int32_t st_hash_examined; /* Total hash entries searched. */
+ u_int32_t st_hash_nowait; /* Hash lock granted with nowait. */
+ u_int32_t st_hash_wait; /* Hash lock granted after wait. */
+ u_int32_t st_hash_max_wait; /* Max hash lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted with nowait. */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_alloc; /* Number of page allocations. */
+ u_int32_t st_alloc_buckets; /* Buckets checked during allocation. */
+ u_int32_t st_alloc_max_buckets; /* Max checked during allocation. */
+ u_int32_t st_alloc_pages; /* Pages checked during allocation. */
+ u_int32_t st_alloc_max_pages; /* Max checked during allocation. */
+};
+
+/* Mpool file statistics structure. */
+struct __db_mpool_fstat {
+ char *file_name; /* File name. */
+ size_t st_pagesize; /* Page size. */
+ u_int32_t st_map; /* Pages from mapped files. */
+ u_int32_t st_cache_hit; /* Pages found in the cache. */
+ u_int32_t st_cache_miss; /* Pages not found in the cache. */
+ u_int32_t st_page_create; /* Pages created in the cache. */
+ u_int32_t st_page_in; /* Pages read in. */
+ u_int32_t st_page_out; /* Pages written out. */
+};
+
+/*******************************************************
+ * Transactions and recovery.
+ *******************************************************/
+#define DB_TXNVERSION 1
+
+typedef enum {
+ DB_TXN_ABORT=0, /* Public. */
+ DB_TXN_APPLY=1, /* Public. */
+ DB_TXN_BACKWARD_ALLOC=2, /* Internal. */
+ DB_TXN_BACKWARD_ROLL=3, /* Public. */
+ DB_TXN_FORWARD_ROLL=4, /* Public. */
+ DB_TXN_GETPGNOS=5, /* Internal. */
+ DB_TXN_OPENFILES=6, /* Internal. */
+ DB_TXN_POPENFILES=7, /* Internal. */
+ DB_TXN_PRINT=8 /* Public. */
+} db_recops;
+
+/*
+ * BACKWARD_ALLOC is used during the forward pass to pick up any aborted
+ * allocations for files that were created during the forward pass.
+ * The main difference between _ALLOC and _ROLL is that the entry for
+ * the file not exist during the rollforward pass.
+ */
+#define DB_UNDO(op) ((op) == DB_TXN_ABORT || \
+ (op) == DB_TXN_BACKWARD_ROLL || (op) == DB_TXN_BACKWARD_ALLOC)
+#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL || (op) == DB_TXN_APPLY)
+
+struct __db_txn {
+ DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
+ DB_TXN *parent; /* Pointer to transaction's parent. */
+ DB_LSN last_lsn; /* Lsn of last log write. */
+ u_int32_t txnid; /* Unique transaction id. */
+ roff_t off; /* Detail structure within region. */
+ db_timeout_t lock_timeout; /* Timeout for locks for this txn. */
+ db_timeout_t expire; /* Time this txn expires. */
+ void *txn_list; /* Undo information for parent. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) links;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } links; /* Links transactions off manager. */
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__events, __txn_event) events;
+ */
+ struct {
+ struct __txn_event *tqh_first;
+ struct __txn_event **tqh_last;
+ } events;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__kids, __db_txn) kids;
+ */
+ struct __kids {
+ struct __db_txn *tqh_first;
+ struct __db_txn **tqh_last;
+ } kids;
+
+ /*
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_txn) klinks;
+ */
+ struct {
+ struct __db_txn *tqe_next;
+ struct __db_txn **tqe_prev;
+ } klinks;
+
+ /* API-private structure: used by C++ */
+ void *api_internal;
+
+ u_int32_t cursors; /* Number of cursors open for txn */
+
+ /* Methods. */
+ int (*abort) __P((DB_TXN *));
+ int (*commit) __P((DB_TXN *, u_int32_t));
+ int (*discard) __P((DB_TXN *, u_int32_t));
+ u_int32_t (*id) __P((DB_TXN *));
+ int (*prepare) __P((DB_TXN *, u_int8_t *));
+ int (*set_timeout) __P((DB_TXN *, db_timeout_t, u_int32_t));
+
+#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
+#define TXN_COMPENSATE 0x02 /* Compensating transaction. */
+#define TXN_DIRTY_READ 0x04 /* Transaction does dirty reads. */
+#define TXN_LOCKTIMEOUT 0x08 /* Transaction has a lock timeout. */
+#define TXN_MALLOC 0x10 /* Structure allocated by TXN system. */
+#define TXN_NOSYNC 0x20 /* Do not sync on prepare and commit. */
+#define TXN_NOWAIT 0x40 /* Do not wait on locks. */
+#define TXN_SYNC 0x80 /* Sync on prepare and commit. */
+ u_int32_t flags;
+};
+
+/* Transaction statistics structure. */
+struct __db_txn_active {
+ u_int32_t txnid; /* Transaction ID */
+ u_int32_t parentid; /* Transaction ID of parent */
+ DB_LSN lsn; /* LSN when transaction began */
+};
+
+struct __db_txn_stat {
+ DB_LSN st_last_ckp; /* lsn of the last checkpoint */
+ time_t st_time_ckp; /* time of last checkpoint */
+ u_int32_t st_last_txnid; /* last transaction id given out */
+ u_int32_t st_maxtxns; /* maximum txns possible */
+ u_int32_t st_naborts; /* number of aborted transactions */
+ u_int32_t st_nbegins; /* number of begun transactions */
+ u_int32_t st_ncommits; /* number of committed transactions */
+ u_int32_t st_nactive; /* number of active transactions */
+ u_int32_t st_nrestores; /* number of restored transactions
+ after recovery. */
+ u_int32_t st_maxnactive; /* maximum active transactions */
+ DB_TXN_ACTIVE *st_txnarray; /* array of active transactions */
+ u_int32_t st_region_wait; /* Region lock granted after wait. */
+ u_int32_t st_region_nowait; /* Region lock granted without wait. */
+ u_int32_t st_regsize; /* Region size. */
+};
+
+/*
+ * Structure used for two phase commit interface. Berkeley DB support for two
+ * phase commit is compatible with the X/open XA interface. The xa #define
+ * XIDDATASIZE defines the size of a global transaction ID. We have our own
+ * version here which must have the same value.
+ */
+#define DB_XIDDATASIZE 128
+struct __db_preplist {
+ DB_TXN *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+/*******************************************************
+ * Replication.
+ *******************************************************/
+/* Special, out-of-band environment IDs. */
+#define DB_EID_BROADCAST -1
+#define DB_EID_INVALID -2
+
+/* rep_start flags values */
+#define DB_REP_CLIENT 0x001
+#define DB_REP_LOGSONLY 0x002
+#define DB_REP_MASTER 0x004
+
+/* Replication statistics. */
+struct __db_rep_stat {
+ /* !!!
+ * Many replication statistics fields cannot be protected by a mutex
+ * without an unacceptable performance penalty, since most message
+ * processing is done without the need to hold a region-wide lock.
+ * Fields whose comments end with a '+' may be updated without holding
+ * the replication or log mutexes (as appropriate), and thus may be
+ * off somewhat (or, on unreasonable architectures under unlucky
+ * circumstances, garbaged).
+ */
+ u_int32_t st_status; /* Current replication status. */
+ DB_LSN st_next_lsn; /* Next LSN to use or expect. */
+ DB_LSN st_waiting_lsn; /* LSN we're awaiting, if any. */
+
+ u_int32_t st_dupmasters; /* # of times a duplicate master
+ condition was detected.+ */
+ int st_env_id; /* Current environment ID. */
+ int st_env_priority; /* Current environment priority. */
+ u_int32_t st_gen; /* Current generation number. */
+ u_int32_t st_log_duplicated; /* Log records received multiply.+ */
+ u_int32_t st_log_queued; /* Log records currently queued.+ */
+ u_int32_t st_log_queued_max; /* Max. log records queued at once.+ */
+ u_int32_t st_log_queued_total; /* Total # of log recs. ever queued.+ */
+ u_int32_t st_log_records; /* Log records received and put.+ */
+ u_int32_t st_log_requested; /* Log recs. missed and requested.+ */
+ int st_master; /* Env. ID of the current master. */
+ u_int32_t st_master_changes; /* # of times we've switched masters. */
+ u_int32_t st_msgs_badgen; /* Messages with a bad generation #.+ */
+ u_int32_t st_msgs_processed; /* Messages received and processed.+ */
+ u_int32_t st_msgs_recover; /* Messages ignored because this site
+ was a client in recovery.+ */
+ u_int32_t st_msgs_send_failures;/* # of failed message sends.+ */
+ u_int32_t st_msgs_sent; /* # of successful message sends.+ */
+ u_int32_t st_newsites; /* # of NEWSITE msgs. received.+ */
+ int st_nsites; /* Current number of sites we will
+ assume during elections. */
+ u_int32_t st_nthrottles; /* # of times we were throttled. */
+ u_int32_t st_outdated; /* # of times we detected and returned
+ an OUTDATED condition.+ */
+ u_int32_t st_txns_applied; /* # of transactions applied.+ */
+
+ /* Elections generally. */
+ u_int32_t st_elections; /* # of elections held.+ */
+ u_int32_t st_elections_won; /* # of elections won by this site.+ */
+
+ /* Statistics about an in-progress election. */
+ int st_election_cur_winner; /* Current front-runner. */
+ u_int32_t st_election_gen; /* Election generation number. */
+ DB_LSN st_election_lsn; /* Max. LSN of current winner. */
+ int st_election_nsites; /* # of "registered voters". */
+ int st_election_priority; /* Current election priority. */
+ int st_election_status; /* Current election status. */
+ int st_election_tiebreaker; /* Election tiebreaker value. */
+ int st_election_votes; /* Votes received in this round. */
+};
+
+/*******************************************************
+ * Access methods.
+ *******************************************************/
+typedef enum {
+ DB_BTREE=1,
+ DB_HASH=2,
+ DB_RECNO=3,
+ DB_QUEUE=4,
+ DB_UNKNOWN=5 /* Figure it out on open. */
+} DBTYPE;
+
+#define DB_RENAMEMAGIC 0x030800 /* File has been renamed. */
+
+#define DB_BTREEVERSION 9 /* Current btree version. */
+#define DB_BTREEOLDVER 8 /* Oldest btree version supported. */
+#define DB_BTREEMAGIC 0x053162
+
+#define DB_HASHVERSION 8 /* Current hash version. */
+#define DB_HASHOLDVER 7 /* Oldest hash version supported. */
+#define DB_HASHMAGIC 0x061561
+
+#define DB_QAMVERSION 4 /* Current queue version. */
+#define DB_QAMOLDVER 3 /* Oldest queue version supported. */
+#define DB_QAMMAGIC 0x042253
+
+/*
+ * DB access method and cursor operation values. Each value is an operation
+ * code to which additional bit flags are added.
+ */
+#define DB_AFTER 1 /* c_put() */
+#define DB_APPEND 2 /* put() */
+#define DB_BEFORE 3 /* c_put() */
+#define DB_CACHED_COUNTS 4 /* stat() */
+#define DB_COMMIT 5 /* log_put() (internal) */
+#define DB_CONSUME 6 /* get() */
+#define DB_CONSUME_WAIT 7 /* get() */
+#define DB_CURRENT 8 /* c_get(), c_put(), DB_LOGC->get() */
+#define DB_FAST_STAT 9 /* stat() */
+#define DB_FIRST 10 /* c_get(), DB_LOGC->get() */
+#define DB_GET_BOTH 11 /* get(), c_get() */
+#define DB_GET_BOTHC 12 /* c_get() (internal) */
+#define DB_GET_BOTH_RANGE 13 /* get(), c_get() */
+#define DB_GET_RECNO 14 /* c_get() */
+#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
+#define DB_KEYFIRST 16 /* c_put() */
+#define DB_KEYLAST 17 /* c_put() */
+#define DB_LAST 18 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT 19 /* c_get(), DB_LOGC->get() */
+#define DB_NEXT_DUP 20 /* c_get() */
+#define DB_NEXT_NODUP 21 /* c_get() */
+#define DB_NODUPDATA 22 /* put(), c_put() */
+#define DB_NOOVERWRITE 23 /* put() */
+#define DB_NOSYNC 24 /* close() */
+#define DB_POSITION 25 /* c_dup() */
+#define DB_POSITIONI 26 /* c_dup() (internal) */
+#define DB_PREV 27 /* c_get(), DB_LOGC->get() */
+#define DB_PREV_NODUP 28 /* c_get(), DB_LOGC->get() */
+#define DB_RECORDCOUNT 29 /* stat() */
+#define DB_SET 30 /* c_get(), DB_LOGC->get() */
+#define DB_SET_LOCK_TIMEOUT 31 /* set_timout() */
+#define DB_SET_RANGE 32 /* c_get() */
+#define DB_SET_RECNO 33 /* get(), c_get() */
+#define DB_SET_TXN_NOW 34 /* set_timout() (internal) */
+#define DB_SET_TXN_TIMEOUT 35 /* set_timout() */
+#define DB_UPDATE_SECONDARY 36 /* c_get(), c_del() (internal) */
+#define DB_WRITECURSOR 37 /* cursor() */
+#define DB_WRITELOCK 38 /* cursor() (internal) */
+
+/* This has to change when the max opcode hits 255. */
+#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
+/* DB_DIRTY_READ 0x01000000 Dirty Read. */
+#define DB_FLUSH 0x02000000 /* Flush data to disk. */
+#define DB_MULTIPLE 0x04000000 /* Return multiple data values. */
+#define DB_MULTIPLE_KEY 0x08000000 /* Return multiple data/key pairs. */
+#define DB_NOCOPY 0x10000000 /* Don't copy data */
+#define DB_PERMANENT 0x20000000 /* Flag record with REP_PERMANENT. */
+#define DB_RMW 0x40000000 /* Acquire write flag immediately. */
+#define DB_WRNOSYNC 0x80000000 /* Private: write, don't sync log_put */
+
+/*
+ * DB (user visible) error return codes.
+ *
+ * !!!
+ * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
+ * following:
+ * #include <errno.h>
+ * #define DB_LOCK_DEADLOCK EAGAIN
+ *
+ * !!!
+ * We don't want our error returns to conflict with other packages where
+ * possible, so pick a base error value that's hopefully not common. We
+ * document that we own the error name space from -30,800 to -30,999.
+ */
+/* DB (public) error return codes. */
+#define DB_DONOTINDEX (-30999)/* "Null" return from 2ndary callbk. */
+#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
+#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
+#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
+#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
+#define DB_NOSERVER (-30994)/* Server panic return. */
+#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
+#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
+#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
+#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
+#define DB_PAGE_NOTFOUND (-30989)/* Requested page not found. */
+#define DB_REP_DUPMASTER (-30988)/* There are two masters. */
+#define DB_REP_HOLDELECTION (-30987)/* Time to hold an election. */
+#define DB_REP_NEWMASTER (-30986)/* We have learned of a new master. */
+#define DB_REP_NEWSITE (-30985)/* New site entered system. */
+#define DB_REP_OUTDATED (-30984)/* Site is too far behind master. */
+#define DB_REP_UNAVAIL (-30983)/* Site cannot currently be reached. */
+#define DB_RUNRECOVERY (-30982)/* Panic return. */
+#define DB_SECONDARY_BAD (-30981)/* Secondary index corrupt. */
+#define DB_VERIFY_BAD (-30980)/* Verify failed; bad format. */
+
+/* DB (private) error return codes. */
+#define DB_ALREADY_ABORTED (-30899)
+#define DB_DELETED (-30898)/* Recovery file marked deleted. */
+#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
+#define DB_LOCK_NOTEXIST (-30896)/* Object to lock is gone. */
+#define DB_NEEDSPLIT (-30895)/* Page needs to be split. */
+#define DB_SURPRISE_KID (-30894)/* Child commit where parent
+ didn't know it was a parent. */
+#define DB_SWAPBYTES (-30893)/* Database needs byte swapping. */
+#define DB_TIMEOUT (-30892)/* Timed out waiting for election. */
+#define DB_TXN_CKP (-30891)/* Encountered ckp record in log. */
+#define DB_VERIFY_FATAL (-30890)/* DB->verify cannot proceed. */
+
+/* Database handle. */
+struct __db {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ u_int32_t pgsize; /* Database logical page size. */
+
+ /* Callbacks. */
+ int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
+ void (*db_feedback) __P((DB *, int, int));
+ int (*dup_compare) __P((DB *, const DBT *, const DBT *));
+
+ void *app_private; /* Application-private handle. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ DB_ENV *dbenv; /* Backing environment. */
+
+ DBTYPE type; /* DB access method type. */
+
+ DB_MPOOLFILE *mpf; /* Backing buffer pool. */
+ DB_CACHE_PRIORITY priority; /* Priority in the buffer pool. */
+
+ DB_MUTEX *mutexp; /* Synchronization for free threading */
+
+ u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
+
+ u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
+
+#define DB_LOGFILEID_INVALID -1
+ FNAME *log_filename; /* File's naming info for logging. */
+
+ db_pgno_t meta_pgno; /* Meta page number */
+ u_int32_t lid; /* Locker id for handle locking. */
+ u_int32_t cur_lid; /* Current handle lock holder. */
+ u_int32_t associate_lid; /* Locker id for DB->associate call. */
+ DB_LOCK handle_lock; /* Lock held on this handle. */
+
+ long cl_id; /* RPC: remote client id. */
+
+ /*
+ * Returned data memory for DB->get() and friends.
+ */
+ DBT my_rskey; /* Secondary key. */
+ DBT my_rkey; /* [Primary] key. */
+ DBT my_rdata; /* Data. */
+
+ /*
+ * !!!
+ * Some applications use DB but implement their own locking outside of
+ * DB. If they're using fcntl(2) locking on the underlying database
+ * file, and we open and close a file descriptor for that file, we will
+ * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
+ * undocumented interface to support this usage which leaves any file
+ * descriptors we open until DB->close. This will only work with the
+ * DB->open interface and simple caches, e.g., creating a transaction
+ * thread may open/close file descriptors this flag doesn't protect.
+ * Locking with fcntl(2) on a file that you don't own is a very, very
+ * unsafe thing to do. 'Nuff said.
+ */
+ DB_FH *saved_open_fhp; /* Saved file handle. */
+
+ /*
+ * Linked list of DBP's, linked from the DB_ENV, used to keep track
+ * of all open db handles for cursor adjustment.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) dblistlinks;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } dblistlinks;
+
+ /*
+ * Cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_HEAD(__cq_fq, __dbc) free_queue;
+ * TAILQ_HEAD(__cq_aq, __dbc) active_queue;
+ * TAILQ_HEAD(__cq_jq, __dbc) join_queue;
+ */
+ struct __cq_fq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } free_queue;
+ struct __cq_aq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } active_queue;
+ struct __cq_jq {
+ struct __dbc *tqh_first;
+ struct __dbc **tqh_last;
+ } join_queue;
+
+ /*
+ * Secondary index support.
+ *
+ * Linked list of secondary indices -- set in the primary.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_HEAD(s_secondaries, __db);
+ */
+ struct {
+ struct __db *lh_first;
+ } s_secondaries;
+
+ /*
+ * List entries for secondaries, and reference count of how
+ * many threads are updating this secondary (see __db_c_put).
+ *
+ * !!!
+ * Note that these are synchronized by the primary's mutex, but
+ * filled in in the secondaries.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * LIST_ENTRY(__db) s_links;
+ */
+ struct {
+ struct __db *le_next;
+ struct __db **le_prev;
+ } s_links;
+ u_int32_t s_refcnt;
+
+ /* Secondary callback and free functions -- set in the secondary. */
+ int (*s_callback) __P((DB *, const DBT *, const DBT *, DBT *));
+
+ /* Reference to primary -- set in the secondary. */
+ DB *s_primary;
+
+ /* API-private structure: used by DB 1.85, C++, Java, Perl and Tcl */
+ void *api_internal;
+
+ /* Subsystem-private structure. */
+ void *bt_internal; /* Btree/Recno access method. */
+ void *h_internal; /* Hash access method. */
+ void *q_internal; /* Queue access method. */
+ void *xa_internal; /* XA. */
+
+ /* Methods. */
+ int (*associate) __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *,
+ const DBT *, DBT *), u_int32_t));
+ int (*close) __P((DB *, u_int32_t));
+ int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
+ int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
+ void (*err) __P((DB *, int, const char *, ...));
+ void (*errx) __P((DB *, const char *, ...));
+ int (*fd) __P((DB *, int *));
+ int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*pget) __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*get_byteswapped) __P((DB *, int *));
+ int (*get_type) __P((DB *, DBTYPE *));
+ int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
+ int (*key_range) __P((DB *,
+ DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+ int (*open) __P((DB *, DB_TXN *,
+ const char *, const char *, DBTYPE, u_int32_t, int));
+ int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*remove) __P((DB *, const char *, const char *, u_int32_t));
+ int (*rename) __P((DB *,
+ const char *, const char *, const char *, u_int32_t));
+ int (*truncate) __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+ int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+ int (*set_alloc) __P((DB *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
+ int (*set_cache_priority) __P((DB *, DB_CACHE_PRIORITY));
+ int (*set_dup_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_encrypt) __P((DB *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB *, FILE *));
+ void (*set_errpfx) __P((DB *, const char *));
+ int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
+ int (*set_flags) __P((DB *, u_int32_t));
+ int (*set_lorder) __P((DB *, int));
+ int (*set_pagesize) __P((DB *, u_int32_t));
+ int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
+ int (*stat) __P((DB *, void *, u_int32_t));
+ int (*sync) __P((DB *, u_int32_t));
+ int (*upgrade) __P((DB *, const char *, u_int32_t));
+ int (*verify) __P((DB *,
+ const char *, const char *, FILE *, u_int32_t));
+
+ int (*set_bt_compare) __P((DB *,
+ int (*)(DB *, const DBT *, const DBT *)));
+ int (*set_bt_maxkey) __P((DB *, u_int32_t));
+ int (*set_bt_minkey) __P((DB *, u_int32_t));
+ int (*set_bt_prefix) __P((DB *,
+ size_t (*)(DB *, const DBT *, const DBT *)));
+
+ int (*set_h_ffactor) __P((DB *, u_int32_t));
+ int (*set_h_hash) __P((DB *,
+ u_int32_t (*)(DB *, const void *, u_int32_t)));
+ int (*set_h_nelem) __P((DB *, u_int32_t));
+
+ int (*set_re_delim) __P((DB *, int));
+ int (*set_re_len) __P((DB *, u_int32_t));
+ int (*set_re_pad) __P((DB *, int));
+ int (*set_re_source) __P((DB *, const char *));
+ int (*set_q_extentsize) __P((DB *, u_int32_t));
+
+ int (*db_am_remove) __P((DB *,
+ DB_TXN *, const char *, const char *, DB_LSN *));
+ int (*db_am_rename) __P((DB *, DB_TXN *,
+ const char *, const char *, const char *));
+
+ /*
+ * Never called; these are a place to save function pointers
+ * so that we can undo an associate.
+ */
+ int (*stored_get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ int (*stored_close) __P((DB *, u_int32_t));
+
+#define DB_OK_BTREE 0x01
+#define DB_OK_HASH 0x02
+#define DB_OK_QUEUE 0x04
+#define DB_OK_RECNO 0x08
+ u_int32_t am_ok; /* Legal AM choices. */
+
+#define DB_AM_CHKSUM 0x00000001 /* Checksumming. */
+#define DB_AM_CL_WRITER 0x00000002 /* Allow writes in client replica. */
+#define DB_AM_COMPENSATE 0x00000004 /* Created by compensating txn. */
+#define DB_AM_CREATED 0x00000008 /* Database was created upon open. */
+#define DB_AM_CREATED_MSTR 0x00000010 /* Encompassing file was created. */
+#define DB_AM_DBM_ERROR 0x00000020 /* Error in DBM/NDBM database. */
+#define DB_AM_DELIMITER 0x00000040 /* Variable length delimiter set. */
+#define DB_AM_DIRTY 0x00000080 /* Support Dirty Reads. */
+#define DB_AM_DISCARD 0x00000100 /* Discard any cached pages. */
+#define DB_AM_DUP 0x00000200 /* DB_DUP. */
+#define DB_AM_DUPSORT 0x00000400 /* DB_DUPSORT. */
+#define DB_AM_ENCRYPT 0x00000800 /* Encryption. */
+#define DB_AM_FIXEDLEN 0x00001000 /* Fixed-length records. */
+#define DB_AM_INMEM 0x00002000 /* In-memory; no sync on close. */
+#define DB_AM_IN_RENAME 0x00004000 /* File is being renamed. */
+#define DB_AM_OPEN_CALLED 0x00008000 /* DB->open called. */
+#define DB_AM_PAD 0x00010000 /* Fixed-length record pad. */
+#define DB_AM_PGDEF 0x00020000 /* Page size was defaulted. */
+#define DB_AM_RDONLY 0x00040000 /* Database is readonly. */
+#define DB_AM_RECNUM 0x00080000 /* DB_RECNUM. */
+#define DB_AM_RECOVER 0x00100000 /* DB opened by recovery routine. */
+#define DB_AM_RENUMBER 0x00200000 /* DB_RENUMBER. */
+#define DB_AM_REVSPLITOFF 0x00400000 /* DB_REVSPLITOFF. */
+#define DB_AM_SECONDARY 0x00800000 /* Database is a secondary index. */
+#define DB_AM_SNAPSHOT 0x01000000 /* DB_SNAPSHOT. */
+#define DB_AM_SUBDB 0x02000000 /* Subdatabases supported. */
+#define DB_AM_SWAP 0x04000000 /* Pages need to be byte-swapped. */
+#define DB_AM_TXN 0x08000000 /* Opened in a transaction. */
+#define DB_AM_VERIFYING 0x10000000 /* DB handle is in the verifier. */
+ u_int32_t flags;
+};
+
+/*
+ * Macros for bulk get. Note that wherever we use a DBT *, we explicitly
+ * cast it; this allows the same macros to work with C++ Dbt *'s, as Dbt
+ * is a subclass of struct DBT in C++.
+ */
+#define DB_MULTIPLE_INIT(pointer, dbt) \
+ (pointer = (u_int8_t *)((DBT *)(dbt))->data + \
+ ((DBT *)(dbt))->ulen - sizeof(u_int32_t))
+#define DB_MULTIPLE_NEXT(pointer, dbt, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ if (retdlen == 0 && \
+ retdata == (u_int8_t *)((DBT *)(dbt))->data) \
+ retdata = NULL; \
+ } while (0)
+#define DB_MULTIPLE_KEY_NEXT(pointer, dbt, retkey, retklen, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)-1) { \
+ retdata = NULL; \
+ retkey = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ retkey = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retklen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+#define DB_MULTIPLE_RECNO_NEXT(pointer, dbt, recno, retdata, retdlen) \
+ do { \
+ if (*((u_int32_t *)(pointer)) == (u_int32_t)0) { \
+ recno = 0; \
+ retdata = NULL; \
+ pointer = NULL; \
+ break; \
+ } \
+ recno = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdata = (u_int8_t *) \
+ ((DBT *)(dbt))->data + *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ retdlen = *(u_int32_t *)(pointer); \
+ (pointer) = (u_int32_t *)(pointer) - 1; \
+ } while (0)
+
+/*******************************************************
+ * Access method cursors.
+ *******************************************************/
+struct __dbc {
+ DB *dbp; /* Related DB access method. */
+ DB_TXN *txn; /* Associated transaction. */
+
+ /*
+ * Active/free cursor queues.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__dbc) links;
+ */
+ struct {
+ DBC *tqe_next;
+ DBC **tqe_prev;
+ } links;
+
+ /*
+ * The DBT *'s below are used by the cursor routines to return
+ * data to the user when DBT flags indicate that DB should manage
+ * the returned memory. They point at a DBT containing the buffer
+ * and length that will be used, and "belonging" to the handle that
+ * should "own" this memory. This may be a "my_*" field of this
+ * cursor--the default--or it may be the corresponding field of
+ * another cursor, a DB handle, a join cursor, etc. In general, it
+ * will be whatever handle the user originally used for the current
+ * DB interface call.
+ */
+ DBT *rskey; /* Returned secondary key. */
+ DBT *rkey; /* Returned [primary] key. */
+ DBT *rdata; /* Returned data. */
+
+ DBT my_rskey; /* Space for returned secondary key. */
+ DBT my_rkey; /* Space for returned [primary] key. */
+ DBT my_rdata; /* Space for returned data. */
+
+ u_int32_t lid; /* Default process' locker id. */
+ u_int32_t locker; /* Locker for this operation. */
+ DBT lock_dbt; /* DBT referencing lock. */
+ DB_LOCK_ILOCK lock; /* Object to be locked. */
+ DB_LOCK mylock; /* Lock held on this cursor. */
+
+ long cl_id; /* Remote client id. */
+
+ DBTYPE dbtype; /* Cursor type. */
+
+ DBC_INTERNAL *internal; /* Access method private. */
+
+ int (*c_close) __P((DBC *)); /* Methods: public. */
+ int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
+ int (*c_del) __P((DBC *, u_int32_t));
+ int (*c_dup) __P((DBC *, DBC **, u_int32_t));
+ int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+ int (*c_pget) __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+ int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+ /* Methods: private. */
+ int (*c_am_bulk) __P((DBC *, DBT *, u_int32_t));
+ int (*c_am_close) __P((DBC *, db_pgno_t, int *));
+ int (*c_am_del) __P((DBC *));
+ int (*c_am_destroy) __P((DBC *));
+ int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+ int (*c_am_writelock) __P((DBC *));
+
+ /* Private: for secondary indices. */
+ int (*c_real_get) __P((DBC *, DBT *, DBT *, u_int32_t));
+
+#define DBC_ACTIVE 0x0001 /* Cursor in use. */
+#define DBC_COMPENSATE 0x0002 /* Cursor compensating, don't lock. */
+#define DBC_DIRTY_READ 0x0004 /* Cursor supports dirty reads. */
+#define DBC_OPD 0x0008 /* Cursor references off-page dups. */
+#define DBC_RECOVER 0x0010 /* Recovery cursor; don't log/lock. */
+#define DBC_RMW 0x0020 /* Acquire write flag in read op. */
+#define DBC_TRANSIENT 0x0040 /* Cursor is transient. */
+#define DBC_WRITECURSOR 0x0080 /* Cursor may be used to write (CDB). */
+#define DBC_WRITEDUP 0x0100 /* idup'ed DBC_WRITECURSOR (CDB). */
+#define DBC_WRITER 0x0200 /* Cursor immediately writing (CDB). */
+#define DBC_MULTIPLE 0x0400 /* Return Multiple data. */
+#define DBC_MULTIPLE_KEY 0x0800 /* Return Multiple keys and data. */
+#define DBC_OWN_LID 0x1000 /* Free lock id on destroy. */
+ u_int32_t flags;
+};
+
+/* Key range statistics structure */
+struct __key_range {
+ double less;
+ double equal;
+ double greater;
+};
+
+/* Btree/Recno statistics structure. */
+struct __db_bt_stat {
+ u_int32_t bt_magic; /* Magic number. */
+ u_int32_t bt_version; /* Version number. */
+ u_int32_t bt_metaflags; /* Metadata flags. */
+ u_int32_t bt_nkeys; /* Number of unique keys. */
+ u_int32_t bt_ndata; /* Number of data items. */
+ u_int32_t bt_pagesize; /* Page size. */
+ u_int32_t bt_maxkey; /* Maxkey value. */
+ u_int32_t bt_minkey; /* Minkey value. */
+ u_int32_t bt_re_len; /* Fixed-length record length. */
+ u_int32_t bt_re_pad; /* Fixed-length record pad. */
+ u_int32_t bt_levels; /* Tree levels. */
+ u_int32_t bt_int_pg; /* Internal pages. */
+ u_int32_t bt_leaf_pg; /* Leaf pages. */
+ u_int32_t bt_dup_pg; /* Duplicate pages. */
+ u_int32_t bt_over_pg; /* Overflow pages. */
+ u_int32_t bt_free; /* Pages on the free list. */
+ u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
+ u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
+ u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
+ u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
+};
+
+/* Hash statistics structure. */
+struct __db_h_stat {
+ u_int32_t hash_magic; /* Magic number. */
+ u_int32_t hash_version; /* Version number. */
+ u_int32_t hash_metaflags; /* Metadata flags. */
+ u_int32_t hash_nkeys; /* Number of unique keys. */
+ u_int32_t hash_ndata; /* Number of data items. */
+ u_int32_t hash_pagesize; /* Page size. */
+ u_int32_t hash_ffactor; /* Fill factor specified at create. */
+ u_int32_t hash_buckets; /* Number of hash buckets. */
+ u_int32_t hash_free; /* Pages on the free list. */
+ u_int32_t hash_bfree; /* Bytes free on bucket pages. */
+ u_int32_t hash_bigpages; /* Number of big key/data pages. */
+ u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
+ u_int32_t hash_overflows; /* Number of overflow pages. */
+ u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
+ u_int32_t hash_dup; /* Number of dup pages. */
+ u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
+};
+
+/* Queue statistics structure. */
+struct __db_qam_stat {
+ u_int32_t qs_magic; /* Magic number. */
+ u_int32_t qs_version; /* Version number. */
+ u_int32_t qs_metaflags; /* Metadata flags. */
+ u_int32_t qs_nkeys; /* Number of unique keys. */
+ u_int32_t qs_ndata; /* Number of data items. */
+ u_int32_t qs_pagesize; /* Page size. */
+ u_int32_t qs_extentsize; /* Pages per extent. */
+ u_int32_t qs_pages; /* Data pages. */
+ u_int32_t qs_re_len; /* Fixed-length record length. */
+ u_int32_t qs_re_pad; /* Fixed-length record pad. */
+ u_int32_t qs_pgfree; /* Bytes free in data pages. */
+ u_int32_t qs_first_recno; /* First not deleted record. */
+ u_int32_t qs_cur_recno; /* Next available record number. */
+};
+
+/*******************************************************
+ * Environment.
+ *******************************************************/
+#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
+
+typedef enum {
+ DB_NOTICE_LOGFILE_CHANGED
+} db_notices;
+
+/* Database Environment handle. */
+struct __db_env {
+ /*******************************************************
+ * Public: owned by the application.
+ *******************************************************/
+ FILE *db_errfile; /* Error message file stream. */
+ const char *db_errpfx; /* Error message prefix. */
+ /* Callbacks. */
+ void (*db_errcall) __P((const char *, char *));
+ void (*db_feedback) __P((DB_ENV *, int, int));
+ void (*db_paniccall) __P((DB_ENV *, int));
+ void (*db_noticecall) __P((DB_ENV *, db_notices));
+
+ /* App-specified alloc functions. */
+ void *(*db_malloc) __P((size_t));
+ void *(*db_realloc) __P((void *, size_t));
+ void (*db_free) __P((void *));
+
+ /*
+ * Currently, the verbose list is a bit field with room for 32
+ * entries. There's no reason that it needs to be limited, if
+ * there are ever more than 32 entries, convert to a bit array.
+ */
+#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
+#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
+#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
+#define DB_VERB_REPLICATION 0x0008 /* Replication information. */
+#define DB_VERB_WAITSFOR 0x0010 /* Dump waits-for table. */
+ u_int32_t verbose; /* Verbose output. */
+
+ void *app_private; /* Application-private handle. */
+
+ int (*app_dispatch) /* User-specified recovery dispatch. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+
+ /* Locking. */
+ u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
+ u_int32_t lk_modes; /* Number of lock modes in table. */
+ u_int32_t lk_max; /* Maximum number of locks. */
+ u_int32_t lk_max_lockers;/* Maximum number of lockers. */
+ u_int32_t lk_max_objects;/* Maximum number of locked objects. */
+ u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
+ db_timeout_t lk_timeout; /* Lock timeout period. */
+
+ /* Logging. */
+ u_int32_t lg_bsize; /* Buffer size. */
+ u_int32_t lg_size; /* Log file size. */
+ u_int32_t lg_regionmax; /* Region size. */
+
+ /* Memory pool. */
+ u_int32_t mp_gbytes; /* Cachesize: GB. */
+ u_int32_t mp_bytes; /* Cachesize: Bytes. */
+ size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
+ int mp_ncache; /* Number of cache regions. */
+ size_t mp_mmapsize; /* Maximum file size for mmap. */
+
+ int rep_eid; /* environment id. */
+
+ /* Transactions. */
+ u_int32_t tx_max; /* Maximum number of transactions. */
+ time_t tx_timestamp; /* Recover to specific timestamp. */
+ db_timeout_t tx_timeout; /* Timeout for transactions. */
+
+ /*******************************************************
+ * Private: owned by DB.
+ *******************************************************/
+ int panic_errval; /* Panic causing errno. */
+
+ /* User files, paths. */
+ char *db_home; /* Database home. */
+ char *db_log_dir; /* Database log file directory. */
+ char *db_tmp_dir; /* Database tmp file directory. */
+
+ char **db_data_dir; /* Database data file directories. */
+ int data_cnt; /* Database data file slots. */
+ int data_next; /* Next Database data file slot. */
+
+ int db_mode; /* Default open permissions. */
+
+ void *reginfo; /* REGINFO structure reference. */
+ DB_FH *lockfhp; /* fcntl(2) locking file handle. */
+
+ int (**recover_dtab) /* Dispatch table for recover funcs. */
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t recover_dtab_size;
+ /* Slots in the dispatch table. */
+
+ void *cl_handle; /* RPC: remote client handle. */
+ long cl_id; /* RPC: remote client env id. */
+
+ int db_ref; /* DB reference count. */
+
+ long shm_key; /* shmget(2) key. */
+ u_int32_t tas_spins; /* test-and-set spins. */
+
+ /*
+ * List of open DB handles for this DB_ENV, used for cursor
+ * adjustment. Must be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the
+ * mutex may need to be stored elsewhere on architectures unable
+ * to support mutexes in heap memory, e.g. HP/UX 9.
+ *
+ * !!!
+ * Explicit representation of structure in queue.h.
+ * LIST_HEAD(dblist, __db);
+ */
+ DB_MUTEX *dblist_mutexp; /* Mutex. */
+ struct {
+ struct __db *lh_first;
+ } dblist;
+
+ /*
+ * XA support.
+ *
+ * !!!
+ * Explicit representations of structures from queue.h.
+ * TAILQ_ENTRY(__db_env) links;
+ */
+ struct {
+ struct __db_env *tqe_next;
+ struct __db_env **tqe_prev;
+ } links;
+ int xa_rmid; /* XA Resource Manager ID. */
+ DB_TXN *xa_txn; /* XA Current transaction. */
+
+ /* API-private structure. */
+ void *api1_internal; /* C++, Perl API private */
+ void *api2_internal; /* Java API private */
+
+ char *passwd; /* Cryptography support. */
+ size_t passwd_len;
+ void *crypto_handle; /* Primary handle. */
+ DB_MUTEX *mt_mutexp; /* Mersenne Twister mutex. */
+ int mti; /* Mersenne Twister index. */
+ u_long *mt; /* Mersenne Twister state vector. */
+
+ /* DB_ENV Methods. */
+ int (*close) __P((DB_ENV *, u_int32_t));
+ int (*dbremove) __P((DB_ENV *,
+ DB_TXN *, const char *, const char *, u_int32_t));
+ int (*dbrename) __P((DB_ENV *, DB_TXN *,
+ const char *, const char *, const char *, u_int32_t));
+ void (*err) __P((const DB_ENV *, int, const char *, ...));
+ void (*errx) __P((const DB_ENV *, const char *, ...));
+ int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
+ int (*remove) __P((DB_ENV *, const char *, u_int32_t));
+ int (*set_data_dir) __P((DB_ENV *, const char *));
+ int (*set_alloc) __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+ int (*set_app_dispatch) __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+ int (*set_encrypt) __P((DB_ENV *, const char *, u_int32_t));
+ void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
+ void (*set_errfile) __P((DB_ENV *, FILE *));
+ void (*set_errpfx) __P((DB_ENV *, const char *));
+ int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+ int (*set_flags) __P((DB_ENV *, u_int32_t, int));
+ void (*set_noticecall) __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+ int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
+ int (*set_rpc_server) __P((DB_ENV *,
+ void *, const char *, long, long, u_int32_t));
+ int (*set_shm_key) __P((DB_ENV *, long));
+ int (*set_tas_spins) __P((DB_ENV *, u_int32_t));
+ int (*set_tmp_dir) __P((DB_ENV *, const char *));
+ int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
+
+ void *lg_handle; /* Log handle and methods. */
+ int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_dir) __P((DB_ENV *, const char *));
+ int (*set_lg_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lg_regionmax) __P((DB_ENV *, u_int32_t));
+ int (*log_archive) __P((DB_ENV *, char **[], u_int32_t));
+ int (*log_cursor) __P((DB_ENV *, DB_LOGC **, u_int32_t));
+ int (*log_file) __P((DB_ENV *, const DB_LSN *, char *, size_t));
+ int (*log_flush) __P((DB_ENV *, const DB_LSN *));
+ int (*log_put) __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ int (*log_stat) __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+
+ void *lk_handle; /* Lock handle and methods. */
+ int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
+ int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
+ int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
+ int (*lock_detect) __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ int (*lock_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*lock_get) __P((DB_ENV *,
+ u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+ int (*lock_put) __P((DB_ENV *, DB_LOCK *));
+ int (*lock_id) __P((DB_ENV *, u_int32_t *));
+ int (*lock_id_free) __P((DB_ENV *, u_int32_t));
+ int (*lock_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*lock_stat) __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+ int (*lock_vec) __P((DB_ENV *,
+ u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ int (*lock_downgrade) __P((DB_ENV *,
+ DB_LOCK *, db_lockmode_t, u_int32_t));
+
+ void *mp_handle; /* Mpool handle and methods. */
+ int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
+ int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
+ int (*memp_dump_region) __P((DB_ENV *, char *, FILE *));
+ int (*memp_fcreate) __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int (*memp_nameop) __P((DB_ENV *,
+ u_int8_t *, const char *, const char *, const char *));
+ int (*memp_register) __P((DB_ENV *, int,
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+ int (*memp_stat) __P((DB_ENV *,
+ DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+ int (*memp_sync) __P((DB_ENV *, DB_LSN *));
+ int (*memp_trickle) __P((DB_ENV *, int, int *));
+
+ void *rep_handle; /* Replication handle and methods. */
+ int (*rep_elect) __P((DB_ENV *, int, int, u_int32_t, int *));
+ int (*rep_flush) __P((DB_ENV *));
+ int (*rep_process_message) __P((DB_ENV *, DBT *, DBT *, int *));
+ int (*rep_start) __P((DB_ENV *, DBT *, u_int32_t));
+ int (*rep_stat) __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+ int (*set_rep_election) __P((DB_ENV *,
+ u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+ int (*set_rep_limit) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_request) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_timeout) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*set_rep_transport) __P((DB_ENV *, int,
+ int (*) (DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+
+ void *tx_handle; /* Txn handle and methods. */
+ int (*set_tx_max) __P((DB_ENV *, u_int32_t));
+ int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
+ int (*txn_begin) __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ int (*txn_checkpoint) __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+ int (*txn_id_set) __P((DB_ENV *, u_int32_t, u_int32_t));
+ int (*txn_recover) __P((DB_ENV *,
+ DB_PREPLIST *, long, long *, u_int32_t));
+ int (*txn_stat) __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ int (*set_timeout) __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+#define DB_TEST_ELECTINIT 1 /* after __rep_elect_init */
+#define DB_TEST_ELECTSEND 2 /* after REP_ELECT msgnit */
+#define DB_TEST_ELECTVOTE1 3 /* after __rep_send_vote 1 */
+#define DB_TEST_ELECTVOTE2 4 /* after __rep_wait */
+#define DB_TEST_ELECTWAIT1 5 /* after REP_VOTE2 */
+#define DB_TEST_ELECTWAIT2 6 /* after __rep_wait 2 */
+#define DB_TEST_PREDESTROY 7 /* before destroy op */
+#define DB_TEST_PREOPEN 8 /* before __os_open */
+#define DB_TEST_POSTDESTROY 9 /* after destroy op */
+#define DB_TEST_POSTLOG 10 /* after logging all pages */
+#define DB_TEST_POSTLOGMETA 11 /* after logging meta in btree */
+#define DB_TEST_POSTOPEN 12 /* after __os_open */
+#define DB_TEST_POSTSYNC 13 /* after syncing the log */
+#define DB_TEST_SUBDB_LOCKS 14 /* subdb locking tests */
+ int test_abort; /* Abort value for testing. */
+ int test_copy; /* Copy value for testing. */
+
+#define DB_ENV_AUTO_COMMIT 0x0000001 /* DB_AUTO_COMMIT. */
+#define DB_ENV_CDB 0x0000002 /* DB_INIT_CDB. */
+#define DB_ENV_CDB_ALLDB 0x0000004 /* CDB environment wide locking. */
+#define DB_ENV_CREATE 0x0000008 /* DB_CREATE set. */
+#define DB_ENV_DBLOCAL 0x0000010 /* DB_ENV allocated for private DB. */
+#define DB_ENV_DIRECT_DB 0x0000020 /* DB_DIRECT_DB set. */
+#define DB_ENV_DIRECT_LOG 0x0000040 /* DB_DIRECT_LOG set. */
+#define DB_ENV_FATAL 0x0000080 /* Doing fatal recovery in env. */
+#define DB_ENV_LOCKDOWN 0x0000100 /* DB_LOCKDOWN set. */
+#define DB_ENV_NOLOCKING 0x0000200 /* DB_NOLOCKING set. */
+#define DB_ENV_NOMMAP 0x0000400 /* DB_NOMMAP set. */
+#define DB_ENV_NOPANIC 0x0000800 /* Okay if panic set. */
+#define DB_ENV_OPEN_CALLED 0x0001000 /* DB_ENV->open called. */
+#define DB_ENV_OVERWRITE 0x0002000 /* DB_OVERWRITE set. */
+#define DB_ENV_PRIVATE 0x0004000 /* DB_PRIVATE set. */
+#define DB_ENV_REGION_INIT 0x0008000 /* DB_REGION_INIT set. */
+#define DB_ENV_REP_CLIENT 0x0010000 /* Replication client. */
+#define DB_ENV_REP_LOGSONLY 0x0020000 /* Log files only replication site. */
+#define DB_ENV_REP_MASTER 0x0040000 /* Replication master. */
+#define DB_ENV_RPCCLIENT 0x0080000 /* DB_CLIENT set. */
+#define DB_ENV_RPCCLIENT_GIVEN 0x0100000 /* User-supplied RPC client struct */
+#define DB_ENV_SYSTEM_MEM 0x0200000 /* DB_SYSTEM_MEM set. */
+#define DB_ENV_THREAD 0x0400000 /* DB_THREAD set. */
+#define DB_ENV_TXN_NOSYNC 0x0800000 /* DB_TXN_NOSYNC set. */
+#define DB_ENV_TXN_WRITE_NOSYNC 0x1000000 /* DB_TXN_WRITE_NOSYNC set. */
+#define DB_ENV_YIELDCPU 0x2000000 /* DB_YIELDCPU set. */
+ u_int32_t flags;
+};
+
+#ifndef DB_DBM_HSEARCH
+#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
+#endif
+#if DB_DBM_HSEARCH != 0
+/*******************************************************
+ * Dbm/Ndbm historic interfaces.
+ *******************************************************/
+typedef struct __db DBM;
+
+#define DBM_INSERT 0 /* Flags to dbm_store(). */
+#define DBM_REPLACE 1
+
+/*
+ * The DB support for ndbm(3) always appends this suffix to the
+ * file name to avoid overwriting the user's original database.
+ */
+#define DBM_SUFFIX ".db"
+
+#if defined(_XPG4_2)
+typedef struct {
+ char *dptr;
+ size_t dsize;
+} datum;
+#else
+typedef struct {
+ char *dptr;
+ int dsize;
+} datum;
+#endif
+
+/*
+ * Translate NDBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ */
+#define dbm_clearerr(a) __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_close(a) __db_ndbm_close@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_delete(a, b) __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_dirfno(a) __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_error(a) __db_ndbm_error@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_fetch(a, b) __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@(a, b)
+#define dbm_firstkey(a) __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_nextkey(a) __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_open(a, b, c) __db_ndbm_open@DB_VERSION_UNIQUE_NAME@(a, b, c)
+#define dbm_pagfno(a) __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_rdonly(a) __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@(a)
+#define dbm_store(a, b, c, d) \
+ __db_ndbm_store@DB_VERSION_UNIQUE_NAME@(a, b, c, d)
+
+/*
+ * Translate DBM calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
+ * replaced the dbm interface with ndbm, and are not supported here.
+ */
+#define dbminit(a) __db_dbm_init@DB_VERSION_UNIQUE_NAME@(a)
+#define dbmclose __db_dbm_close@DB_VERSION_UNIQUE_NAME@
+#if !defined(__cplusplus)
+#define delete(a) __db_dbm_delete@DB_VERSION_UNIQUE_NAME@(a)
+#endif
+#define fetch(a) __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@(a)
+#define firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define nextkey(a) __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@(a)
+#define store(a, b) __db_dbm_store@DB_VERSION_UNIQUE_NAME@(a, b)
+
+/*******************************************************
+ * Hsearch historic interface.
+ *******************************************************/
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+#define hcreate(a) __db_hcreate@DB_VERSION_UNIQUE_NAME@(a)
+#define hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@
+#define hsearch(a, b) __db_hsearch@DB_VERSION_UNIQUE_NAME@(a, b)
+
+#endif /* DB_DBM_HSEARCH */
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_H_ */
diff --git a/bdb/include/db_185.h b/bdb/dbinc/db_185.in
index e50ebb0adb8..86e2290c304 100644
--- a/bdb/include/db_185.h
+++ b/bdb/dbinc/db_185.in
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -32,7 +32,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: db_185.h,v 11.4 2000/02/14 02:59:54 bostic Exp $
+ * $Id: db_185.in,v 11.8 2002/01/11 15:52:24 bostic Exp $
*/
#ifndef _DB_185_H_
@@ -163,13 +163,7 @@ typedef struct {
char *bfname; /* btree file name */
} RECNOINFO;
-#if defined(__cplusplus)
-extern "C" {
-#endif
-#define dbopen __db185_open
-DB *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+/* Re-define the user's dbopen calls. */
+#define dbopen __db185_open@DB_VERSION_UNIQUE_NAME@
-#if defined(__cplusplus)
-}
-#endif
#endif /* !_DB_185_H_ */
diff --git a/bdb/dbinc/db_am.h b/bdb/dbinc/db_am.h
new file mode 100644
index 00000000000..c5aa424255d
--- /dev/null
+++ b/bdb/dbinc/db_am.h
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_am.h,v 11.61 2002/08/08 03:20:46 bostic Exp $
+ */
+#ifndef _DB_AM_H_
+#define _DB_AM_H_
+
+/*
+ * IS_AUTO_COMMIT --
+ * Test for local auto-commit flag or global flag with no local DbTxn
+ * handle.
+ */
+#define IS_AUTO_COMMIT(dbenv, txn, flags) \
+ (LF_ISSET(DB_AUTO_COMMIT) || \
+ ((txn) == NULL && F_ISSET((dbenv), DB_ENV_AUTO_COMMIT)))
+
+/* DB recovery operation codes. */
+#define DB_ADD_DUP 1
+#define DB_REM_DUP 2
+#define DB_ADD_BIG 3
+#define DB_REM_BIG 4
+#define DB_ADD_PAGE 5
+#define DB_REM_PAGE 6
+
+/*
+ * Standard initialization and shutdown macros for all recovery functions.
+ */
+#define REC_INTRO(func, inc_count) { \
+ argp = NULL; \
+ dbc = NULL; \
+ file_dbp = NULL; \
+ mpf = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ goto out; \
+ if ((ret = __dbreg_id_to_db(dbenv, argp->txnid, \
+ &file_dbp, argp->fileid, inc_count)) != 0) { \
+ if (ret == DB_DELETED) { \
+ ret = 0; \
+ goto done; \
+ } \
+ goto out; \
+ } \
+ if ((ret = file_dbp->cursor(file_dbp, NULL, &dbc, 0)) != 0) \
+ goto out; \
+ F_SET(dbc, DBC_RECOVER); \
+ mpf = file_dbp->mpf; \
+}
+
+#define REC_CLOSE { \
+ int __t_ret; \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ if (dbc != NULL && \
+ (__t_ret = dbc->c_close(dbc)) != 0 && ret == 0) \
+ ret = __t_ret; \
+ return (ret); \
+}
+
+/*
+ * No-op versions of the same macros.
+ */
+#define REC_NOOP_INTRO(func) { \
+ argp = NULL; \
+ if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
+ return (ret); \
+}
+#define REC_NOOP_CLOSE \
+ if (argp != NULL) \
+ __os_free(dbenv, argp); \
+ return (ret); \
+
+/*
+ * Standard debugging macro for all recovery functions.
+ */
+#ifdef DEBUG_RECOVER
+#define REC_PRINT(func) \
+ (void)func(dbenv, dbtp, lsnp, op, info);
+#else
+#define REC_PRINT(func)
+#endif
+
+/*
+ * Actions to __db_lget
+ */
+#define LCK_ALWAYS 1 /* Lock even for off page dup cursors */
+#define LCK_COUPLE 2 /* Lock Couple */
+#define LCK_COUPLE_ALWAYS 3 /* Lock Couple even in txn. */
+#define LCK_DOWNGRADE 4 /* Downgrade the lock. (internal) */
+#define LCK_ROLLBACK 5 /* Lock even if in rollback */
+
+/*
+ * If doing transactions we have to hold the locks associated with a data item
+ * from a page for the entire transaction. However, we don't have to hold the
+ * locks associated with walking the tree. Distinguish between the two so that
+ * we don't tie up the internal pages of the tree longer than necessary.
+ */
+#define __LPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? \
+ (dbc)->dbp->dbenv->lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
+
+/*
+ * __TLPUT -- transactional lock put
+ * If the lock is valid then
+ * If we are not in a transaction put the lock.
+ * Else if the cursor is doing dirty reads and this was a read then
+ * put the lock.
+ * Else if the db is supporting dirty reads and this is a write then
+ * downgrade it.
+ * Else do nothing.
+ */
+#define __TLPUT(dbc, lock) \
+ (LOCK_ISSET(lock) ? __db_lput(dbc, &(lock)) : 0)
+
+typedef struct {
+ DBC *dbc;
+ int count;
+} db_trunc_param;
+
+#include "dbinc/db_dispatch.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+#endif /* !_DB_AM_H_ */
diff --git a/bdb/dbinc/db_cxx.in b/bdb/dbinc/db_cxx.in
new file mode 100644
index 00000000000..6752b36ec42
--- /dev/null
+++ b/bdb/dbinc/db_cxx.in
@@ -0,0 +1,795 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: db_cxx.in,v 11.113 2002/08/23 13:02:27 mjc Exp $
+ */
+
+#ifndef _DB_CXX_H_
+#define _DB_CXX_H_
+//
+// C++ assumptions:
+//
+// To ensure portability to many platforms, both new and old, we make
+// few assumptions about the C++ compiler and library. For example,
+// we do not expect STL, templates or namespaces to be available. The
+// "newest" C++ feature used is exceptions, which are used liberally
+// to transmit error information. Even the use of exceptions can be
+// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
+// with the DbEnv or Db constructor.
+//
+// C++ naming conventions:
+//
+// - All top level class names start with Db.
+// - All class members start with lower case letter.
+// - All private data members are suffixed with underscore.
+// - Use underscores to divide names into multiple words.
+// - Simple data accessors are named with get_ or set_ prefix.
+// - All method names are taken from names of functions in the C
+// layer of db (usually by dropping a prefix like "db_").
+// These methods have the same argument types and order,
+// other than dropping the explicit arg that acts as "this".
+//
+// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
+// (defined in db.h) associated with it. In some cases, we inherit directly
+// from the DB_FOO structure to make this relationship explicit. Often,
+// the underlying C layer allocates and deallocates these structures, so
+// there is no easy way to add any data to the DbFoo class. When you see
+// a comment about whether data is permitted to be added, this is what
+// is going on. Of course, if we need to add data to such C++ classes
+// in the future, we will arrange to have an indirect pointer to the
+// DB_FOO struct (as some of the classes already have).
+//
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Forward declarations
+//
+
+#include <stdarg.h>
+
+@cxx_have_stdheaders@
+#ifdef HAVE_CXX_STDHEADERS
+#include <iostream>
+#define __DB_OSTREAMCLASS std::ostream
+#else
+#include <iostream.h>
+#define __DB_OSTREAMCLASS ostream
+#endif
+
+#include "db.h"
+#include "cxx_common.h"
+#include "cxx_except.h"
+
+class Db; // forward
+class Dbc; // forward
+class DbEnv; // forward
+class DbInfo; // forward
+class DbLock; // forward
+class DbLogc; // forward
+class DbLsn; // forward
+class DbMpoolFile; // forward
+class DbPreplist; // forward
+class Dbt; // forward
+class DbTxn; // forward
+
+// These classes are not defined here and should be invisible
+// to the user, but some compilers require forward references.
+// There is one for each use of the DEFINE_DB_CLASS macro.
+
+class DbImp;
+class DbEnvImp;
+class DbMpoolFileImp;
+class DbTxnImp;
+
+// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
+// The underlying type is a pointer to an opaque *Imp class, that
+// gets converted to the correct implementation class by the implementation.
+//
+// Since these defines use "private/public" labels, and leave the access
+// being "private", we always use these by convention before any data
+// members in the private section of a class. Keeping them in the
+// private section also emphasizes that they are off limits to user code.
+//
+#define DEFINE_DB_CLASS(name) \
+ public: class name##Imp* imp() { return (imp_); } \
+ public: const class name##Imp* constimp() const { return (imp_); } \
+ private: class name##Imp* imp_
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Turn off inappropriate compiler warnings
+//
+
+#ifdef _MSC_VER
+
+// These are level 4 warnings that are explicitly disabled.
+// With Visual C++, by default you do not see above level 3 unless
+// you use /W4. But we like to compile with the highest level
+// warnings to catch other errors.
+//
+// 4201: nameless struct/union
+// triggered by standard include file <winnt.h>
+//
+// 4514: unreferenced inline function has been removed
+// certain include files in MSVC define methods that are not called
+//
+#pragma warning(disable: 4201 4514)
+
+#endif
+
+// Some interfaces can be customized by allowing users to define
+// callback functions. For performance and logistical reasons, some
+// callback functions must be declared in extern "C" blocks. For others,
+// we allow you to declare the callbacks in C++ or C (or an extern "C"
+// block) as you wish. See the set methods for the callbacks for
+// the choices.
+//
+extern "C" {
+ typedef void * (*db_malloc_fcn_type)
+ (size_t);
+ typedef void * (*db_realloc_fcn_type)
+ (void *, size_t);
+ typedef void (*db_free_fcn_type)
+ (void *);
+ typedef int (*bt_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef size_t (*bt_prefix_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef int (*dup_compare_fcn_type) /*C++ version available*/
+ (DB *, const DBT *, const DBT *);
+ typedef u_int32_t (*h_hash_fcn_type) /*C++ version available*/
+ (DB *, const void *, u_int32_t);
+ typedef int (*pgin_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+ typedef int (*pgout_fcn_type)
+ (DB_ENV *dbenv, db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Lock classes
+//
+
+class _exported DbLock
+{
+ friend class DbEnv;
+
+public:
+ DbLock();
+ DbLock(const DbLock &);
+ DbLock &operator = (const DbLock &);
+
+protected:
+ // We can add data to this class if needed
+ // since its contained class is not allocated by db.
+ // (see comment at top)
+
+ DbLock(DB_LOCK);
+ DB_LOCK lock_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Log classes
+//
+
+class _exported DbLsn : protected DB_LSN
+{
+ friend class DbEnv; // friendship needed to cast to base class
+ friend class DbLogc; // friendship needed to cast to base class
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Memory pool classes
+//
+
+class _exported DbMpoolFile
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbMpoolFile);
+
+public:
+ int close(u_int32_t flags);
+ int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
+ void last_pgno(db_pgno_t *pgnoaddr);
+ int open(const char *file, u_int32_t flags, int mode, size_t pagesize);
+ int put(void *pgaddr, u_int32_t flags);
+ void refcnt(db_pgno_t *pgnoaddr);
+ int set(void *pgaddr, u_int32_t flags);
+ int set_clear_len(u_int32_t len);
+ int set_fileid(u_int8_t *fileid);
+ int set_ftype(int ftype);
+ int set_lsn_offset(int32_t offset);
+ int set_pgcookie(DBT *dbt);
+ void set_unlink(int);
+ int sync();
+
+ virtual DB_MPOOLFILE *get_DB_MPOOLFILE()
+ {
+ return (DB_MPOOLFILE *)imp();
+ }
+
+ virtual const DB_MPOOLFILE *get_const_DB_MPOOLFILE() const
+ {
+ return (const DB_MPOOLFILE *)constimp();
+ }
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::memp_fcreate() to get pointers to a DbMpoolFile,
+ // and call DbMpoolFile::close() rather than delete to release them.
+ //
+ DbMpoolFile();
+
+ // Shut g++ up.
+protected:
+ virtual ~DbMpoolFile();
+
+private:
+ // no copying
+ DbMpoolFile(const DbMpoolFile &);
+ void operator = (const DbMpoolFile &);
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// This is filled in and returned by the DbEnv::txn_recover() method.
+//
+
+class _exported DbPreplist
+{
+public:
+ DbTxn *txn;
+ u_int8_t gid[DB_XIDDATASIZE];
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Transaction classes
+//
+
+class _exported DbTxn
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbTxn);
+
+public:
+ int abort();
+ int commit(u_int32_t flags);
+ int discard(u_int32_t flags);
+ u_int32_t id();
+ int prepare(u_int8_t *gid);
+ int set_timeout(db_timeout_t timeout, u_int32_t flags);
+
+ virtual DB_TXN *get_DB_TXN()
+ {
+ return (DB_TXN *)imp();
+ }
+
+ virtual const DB_TXN *get_const_DB_TXN() const
+ {
+ return (const DB_TXN *)constimp();
+ }
+
+ static DbTxn* get_DbTxn(DB_TXN *txn)
+ {
+ return (DbTxn *)txn->api_internal;
+ }
+
+ static const DbTxn* get_const_DbTxn(const DB_TXN *txn)
+ {
+ return (const DbTxn *)txn->api_internal;
+ }
+
+ // For internal use only.
+ static DbTxn* wrap_DB_TXN(DB_TXN *txn);
+
+private:
+ // We can add data to this class if needed
+ // since it is implemented via a pointer.
+ // (see comment at top)
+
+ // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
+ // and call DbTxn::abort() or DbTxn::commit rather than
+ // delete to release them.
+ //
+ DbTxn();
+ // For internal use only.
+ DbTxn(DB_TXN *txn);
+ virtual ~DbTxn();
+
+ // no copying
+ DbTxn(const DbTxn &);
+ void operator = (const DbTxn &);
+};
+
+//
+// Berkeley DB environment class. Provides functions for opening databases.
+// User of this library can use this class as a starting point for
+// developing a DB application - derive their application class from
+// this one, add application control logic.
+//
+// Note that if you use the default constructor, you must explicitly
+// call appinit() before any other db activity (e.g. opening files)
+//
+class _exported DbEnv
+{
+ friend class Db;
+ friend class DbLock;
+ friend class DbMpoolFile;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(DbEnv);
+
+public:
+ // After using this constructor, you can set any needed
+ // parameters for the environment using the set_* methods.
+ // Then call open() to finish initializing the environment
+ // and attaching it to underlying files.
+ //
+ DbEnv(u_int32_t flags);
+
+ virtual ~DbEnv();
+
+ // These methods match those in the C interface.
+ //
+ virtual int close(u_int32_t);
+ virtual int dbremove(DbTxn *txn, const char *name, const char *subdb,
+ u_int32_t flags);
+ virtual int dbrename(DbTxn *txn, const char *name, const char *subdb,
+ const char *newname, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual void *get_app_private() const;
+ virtual int open(const char *, u_int32_t, int);
+ virtual int remove(const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_data_dir(const char *);
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_flags(u_int32_t, int);
+ virtual int set_feedback(void (*)(DbEnv *, int, int));
+ virtual int set_lg_bsize(u_int32_t);
+ virtual int set_lg_dir(const char *);
+ virtual int set_lg_max(u_int32_t);
+ virtual int set_lg_regionmax(u_int32_t);
+ virtual int set_lk_conflicts(u_int8_t *, int);
+ virtual int set_lk_detect(u_int32_t);
+ virtual int set_lk_max(u_int32_t);
+ virtual int set_lk_max_lockers(u_int32_t);
+ virtual int set_lk_max_locks(u_int32_t);
+ virtual int set_lk_max_objects(u_int32_t);
+ virtual int set_mp_mmapsize(size_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_rpc_server(void *, char *, long, long, u_int32_t);
+ virtual int set_shm_key(long);
+ virtual int set_timeout(db_timeout_t timeout, u_int32_t flags);
+ virtual int set_tmp_dir(const char *);
+ virtual int set_tas_spins(u_int32_t);
+ virtual int set_tx_max(u_int32_t);
+ virtual int set_app_dispatch(int (*)(DbEnv *,
+ Dbt *, DbLsn *, db_recops));
+ virtual int set_tx_timestamp(time_t *);
+ virtual int set_verbose(u_int32_t which, int onoff);
+
+ // Version information. A static method so it can be obtained anytime.
+ //
+ static char *version(int *major, int *minor, int *patch);
+
+ // Convert DB errors to strings
+ static char *strerror(int);
+
+ // If an error is detected and the error call function
+ // or stream is set, a message is dispatched or printed.
+ // If a prefix is set, each message is prefixed.
+ //
+ // You can use set_errcall() or set_errfile() above to control
+ // error functionality. Alternatively, you can call
+ // set_error_stream() to force all errors to a C++ stream.
+ // It is unwise to mix these approaches.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ // used internally
+ static void runtime_error(const char *caller, int err,
+ int error_policy);
+ static void runtime_error_dbt(const char *caller, Dbt *dbt,
+ int error_policy);
+ static void runtime_error_lock_get(const char *caller, int err,
+ db_lockop_t op, db_lockmode_t mode,
+ const Dbt *obj, DbLock lock, int index,
+ int error_policy);
+
+ // Lock functions
+ //
+ virtual int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
+ virtual int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock);
+ virtual int lock_id(u_int32_t *idp);
+ virtual int lock_id_free(u_int32_t id);
+ virtual int lock_put(DbLock *lock);
+ virtual int lock_stat(DB_LOCK_STAT **statp, u_int32_t flags);
+ virtual int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elistp);
+
+ // Log functions
+ //
+ virtual int log_archive(char **list[], u_int32_t flags);
+ static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
+ virtual int log_cursor(DbLogc **cursorp, u_int32_t flags);
+ virtual int log_file(DbLsn *lsn, char *namep, size_t len);
+ virtual int log_flush(const DbLsn *lsn);
+ virtual int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
+
+ virtual int log_stat(DB_LOG_STAT **spp, u_int32_t flags);
+
+ // Mpool functions
+ //
+ virtual int memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+ virtual int memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn);
+ virtual int memp_stat(DB_MPOOL_STAT
+ **gsp, DB_MPOOL_FSTAT ***fsp, u_int32_t flags);
+ virtual int memp_sync(DbLsn *lsn);
+ virtual int memp_trickle(int pct, int *nwrotep);
+
+ // Transaction functions
+ //
+ virtual int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
+ virtual int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
+ virtual int txn_recover(DbPreplist *preplist, long count,
+ long *retp, u_int32_t flags);
+ virtual int txn_stat(DB_TXN_STAT **statp, u_int32_t flags);
+
+ // Replication functions
+ //
+ virtual int rep_elect(int, int, u_int32_t, int *);
+ virtual int rep_process_message(Dbt *, Dbt *, int *);
+ virtual int rep_start(Dbt *, u_int32_t);
+ virtual int rep_stat(DB_REP_STAT **statp, u_int32_t flags);
+ virtual int set_rep_limit(u_int32_t, u_int32_t);
+ virtual int set_rep_transport(u_int32_t,
+ int (*)(DbEnv *, const Dbt *, const Dbt *, int, u_int32_t));
+
+ // Conversion functions
+ //
+ virtual DB_ENV *get_DB_ENV()
+ {
+ return (DB_ENV *)imp();
+ }
+
+ virtual const DB_ENV *get_const_DB_ENV() const
+ {
+ return (const DB_ENV *)constimp();
+ }
+
+ static DbEnv* get_DbEnv(DB_ENV *dbenv)
+ {
+ return (DbEnv *)dbenv->api1_internal;
+ }
+
+ static const DbEnv* get_const_DbEnv(const DB_ENV *dbenv)
+ {
+ return (const DbEnv *)dbenv->api1_internal;
+ }
+
+ // For internal use only.
+ static DbEnv* wrap_DB_ENV(DB_ENV *dbenv);
+
+ // These are public only because they need to be called
+ // via C functions. They should never be called by users
+ // of this class.
+ //
+ static void _stream_error_function(const char *, char *);
+ static int _app_dispatch_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
+ db_recops op);
+ static void _paniccall_intercept(DB_ENV *env, int errval);
+ static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
+ static int _rep_send_intercept(DB_ENV *env,
+ const DBT *cntrl, const DBT *data,
+ int id, u_int32_t flags);
+
+private:
+ void cleanup();
+ int initialize(DB_ENV *env);
+ int error_policy();
+
+ // For internal use only.
+ DbEnv(DB_ENV *, u_int32_t flags);
+
+ // no copying
+ DbEnv(const DbEnv &);
+ void operator = (const DbEnv &);
+
+ // instance data
+ int construct_error_;
+ u_int32_t construct_flags_;
+ int (*app_dispatch_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
+ void (*feedback_callback_)(DbEnv *, int, int);
+ void (*paniccall_callback_)(DbEnv *, int);
+ int (*pgin_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*pgout_callback_)(DbEnv *dbenv, db_pgno_t pgno,
+ void *pgaddr, Dbt *pgcookie);
+ int (*rep_send_callback_)(DbEnv *,
+ const Dbt *, const Dbt *, int, u_int32_t);
+
+ // class data
+ static __DB_OSTREAMCLASS *error_stream_;
+};
+
+////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////
+//
+// Table access classes
+//
+
+//
+// Represents a database table = a set of keys with associated values.
+//
+class _exported Db
+{
+ friend class DbEnv;
+
+private:
+ // Put this first to allow inlining with some C++ compilers (g++-2.95)
+ DEFINE_DB_CLASS(Db);
+
+public:
+ Db(DbEnv*, u_int32_t); // create a Db object, then call open()
+ virtual ~Db(); // does *not* call close.
+
+ // These methods exactly match those in the C interface.
+ //
+ virtual int associate(DbTxn *txn, Db *secondary,
+ int (*callback)(Db *, const Dbt *, const Dbt *, Dbt *),
+ u_int32_t flags);
+ virtual int close(u_int32_t flags);
+ virtual int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
+ virtual int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
+ virtual void err(int, const char *, ...);
+ virtual void errx(const char *, ...);
+ virtual int fd(int *fdp);
+ virtual int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
+ virtual void *get_app_private() const;
+ virtual int get_byteswapped(int *);
+ virtual int get_type(DBTYPE *);
+ virtual int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
+ virtual int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
+ virtual int open(DbTxn *txnid,
+ const char *, const char *subname, DBTYPE, u_int32_t, int);
+ virtual int pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *data,
+ u_int32_t flags);
+ virtual int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
+ virtual int remove(const char *, const char *, u_int32_t);
+ virtual int rename(const char *, const char *, const char *, u_int32_t);
+ virtual int set_alloc(db_malloc_fcn_type, db_realloc_fcn_type,
+ db_free_fcn_type);
+ virtual void set_app_private(void *);
+ virtual int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
+ virtual int set_bt_compare(bt_compare_fcn_type); /*deprecated*/
+ virtual int set_bt_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_bt_maxkey(u_int32_t);
+ virtual int set_bt_minkey(u_int32_t);
+ virtual int set_bt_prefix(bt_prefix_fcn_type); /*deprecated*/
+ virtual int set_bt_prefix(size_t (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_cachesize(u_int32_t, u_int32_t, int);
+ virtual int set_cache_priority(DB_CACHE_PRIORITY);
+ virtual int set_dup_compare(dup_compare_fcn_type); /*deprecated*/
+ virtual int set_dup_compare(int (*)(Db *, const Dbt *, const Dbt *));
+ virtual int set_encrypt(const char *, int);
+ virtual void set_errcall(void (*)(const char *, char *));
+ virtual void set_errfile(FILE *);
+ virtual void set_errpfx(const char *);
+ virtual int set_feedback(void (*)(Db *, int, int));
+ virtual int set_flags(u_int32_t);
+ virtual int set_h_ffactor(u_int32_t);
+ virtual int set_h_hash(h_hash_fcn_type); /*deprecated*/
+ virtual int set_h_hash(u_int32_t (*)(Db *, const void *, u_int32_t));
+ virtual int set_h_nelem(u_int32_t);
+ virtual int set_lorder(int);
+ virtual int set_pagesize(u_int32_t);
+ virtual int set_paniccall(void (*)(DbEnv *, int));
+ virtual int set_re_delim(int);
+ virtual int set_re_len(u_int32_t);
+ virtual int set_re_pad(int);
+ virtual int set_re_source(char *);
+ virtual int set_q_extentsize(u_int32_t);
+ virtual int stat(void *sp, u_int32_t flags);
+ virtual int sync(u_int32_t flags);
+ virtual int truncate(DbTxn *, u_int32_t *, u_int32_t);
+ virtual int upgrade(const char *name, u_int32_t flags);
+ virtual int verify(const char *, const char *, __DB_OSTREAMCLASS *, u_int32_t);
+
+ // These additional methods are not in the C interface, and
+ // are only available for C++.
+ //
+ virtual void set_error_stream(__DB_OSTREAMCLASS *);
+
+ virtual DB *get_DB()
+ {
+ return (DB *)imp();
+ }
+
+ virtual const DB *get_const_DB() const
+ {
+ return (const DB *)constimp();
+ }
+
+ static Db* get_Db(DB *db)
+ {
+ return (Db *)db->api_internal;
+ }
+
+ static const Db* get_const_Db(const DB *db)
+ {
+ return (const Db *)db->api_internal;
+ }
+
+private:
+ // no copying
+ Db(const Db &);
+ Db &operator = (const Db &);
+
+ void cleanup();
+ int initialize();
+ int error_policy();
+
+ // instance data
+ DbEnv *env_;
+ int construct_error_;
+ u_int32_t flags_;
+ u_int32_t construct_flags_;
+
+public:
+ // These are public only because they need to be called
+ // via C callback functions. They should never be used by
+ // external users of this class.
+ //
+ int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
+ int (*associate_callback_)(Db *, const Dbt *, const Dbt *, Dbt *);
+ int (*bt_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ size_t (*bt_prefix_callback_)(Db *, const Dbt *, const Dbt *);
+ int (*dup_compare_callback_)(Db *, const Dbt *, const Dbt *);
+ void (*feedback_callback_)(Db *, int, int);
+ u_int32_t (*h_hash_callback_)(Db *, const void *, u_int32_t);
+};
+
+//
+// A chunk of data, maybe a key or value.
+//
+class _exported Dbt : private DBT
+{
+ friend class Dbc;
+ friend class Db;
+ friend class DbEnv;
+ friend class DbLogc;
+
+public:
+
+ // key/data
+ void *get_data() const { return data; }
+ void set_data(void *value) { data = value; }
+
+ // key/data length
+ u_int32_t get_size() const { return size; }
+ void set_size(u_int32_t value) { size = value; }
+
+ // RO: length of user buffer.
+ u_int32_t get_ulen() const { return ulen; }
+ void set_ulen(u_int32_t value) { ulen = value; }
+
+ // RO: get/put record length.
+ u_int32_t get_dlen() const { return dlen; }
+ void set_dlen(u_int32_t value) { dlen = value; }
+
+ // RO: get/put record offset.
+ u_int32_t get_doff() const { return doff; }
+ void set_doff(u_int32_t value) { doff = value; }
+
+ // flags
+ u_int32_t get_flags() const { return flags; }
+ void set_flags(u_int32_t value) { flags = value; }
+
+ // Conversion functions
+ DBT *get_DBT() { return (DBT *)this; }
+ const DBT *get_const_DBT() const { return (const DBT *)this; }
+
+ static Dbt* get_Dbt(DBT *dbt) { return (Dbt *)dbt; }
+ static const Dbt* get_const_Dbt(const DBT *dbt)
+ { return (const Dbt *)dbt; }
+
+ Dbt(void *data, u_int32_t size);
+ Dbt();
+ ~Dbt();
+ Dbt(const Dbt &);
+ Dbt &operator = (const Dbt &);
+
+private:
+ // Note: no extra data appears in this class (other than
+ // inherited from DBT) since we need DBT and Dbt objects
+ // to have interchangable pointers.
+ //
+ // When subclassing this class, remember that callback
+ // methods like bt_compare, bt_prefix, dup_compare may
+ // internally manufacture DBT objects (which later are
+ // cast to Dbt), so such callbacks might receive objects
+ // not of your subclassed type.
+};
+
+class _exported Dbc : protected DBC
+{
+ friend class Db;
+
+public:
+ int close();
+ int count(db_recno_t *countp, u_int32_t flags);
+ int del(u_int32_t flags);
+ int dup(Dbc** cursorp, u_int32_t flags);
+ int get(Dbt* key, Dbt *data, u_int32_t flags);
+ int pget(Dbt* key, Dbt* pkey, Dbt *data, u_int32_t flags);
+ int put(Dbt* key, Dbt *data, u_int32_t flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ Dbc();
+ ~Dbc();
+
+ // no copying
+ Dbc(const Dbc &);
+ Dbc &operator = (const Dbc &);
+};
+
+class _exported DbLogc : protected DB_LOGC
+{
+ friend class DbEnv;
+
+public:
+ int close(u_int32_t _flags);
+ int get(DbLsn *lsn, Dbt *data, u_int32_t _flags);
+
+private:
+ // No data is permitted in this class (see comment at top)
+
+ // Note: use Db::cursor() to get pointers to a Dbc,
+ // and call Dbc::close() rather than delete to release them.
+ //
+ DbLogc();
+ ~DbLogc();
+
+ // no copying
+ DbLogc(const Dbc &);
+ DbLogc &operator = (const Dbc &);
+};
+#endif /* !_DB_CXX_H_ */
diff --git a/bdb/include/db_dispatch.h b/bdb/dbinc/db_dispatch.h
index 003acee6f65..283eb1e95de 100644
--- a/bdb/include/db_dispatch.h
+++ b/bdb/dbinc/db_dispatch.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -32,7 +32,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: db_dispatch.h,v 11.17 2000/12/14 07:39:13 ubell Exp $
+ * $Id: db_dispatch.h,v 11.30 2002/06/20 19:34:03 margo Exp $
*/
#ifndef _DB_DISPATCH_H_
@@ -43,13 +43,31 @@
* recovery. This is a generic list used to pass along whatever information
* we need during recovery.
*/
+typedef enum {
+ TXNLIST_DELETE,
+ TXNLIST_LSN,
+ TXNLIST_PGNO,
+ TXNLIST_TXNID
+} db_txnlist_type;
+
+#define DB_TXNLIST_MASK(hp, n) (n % hp->nslots)
struct __db_txnhead {
- LIST_HEAD(__db_headlink, __db_txnlist) head;
- u_int32_t maxid;
- int32_t generation;
+ u_int32_t maxid; /* Maximum transaction id. */
+ DB_LSN maxlsn; /* Maximum commit lsn. */
+ DB_LSN ckplsn; /* LSN of last retained checkpoint. */
+ DB_LSN trunc_lsn; /* Lsn to which we are going to truncate;
+ * make sure we abort anyone after this. */
+ int32_t generation; /* Current generation number. */
+ int32_t gen_alloc; /* Number of generations allocated. */
+ struct {
+ int32_t generation;
+ u_int32_t txn_min;
+ u_int32_t txn_max;
+ } *gen_array; /* Array of txnids associted with a gen. */
+ int nslots;
+ LIST_HEAD(__db_headlink, __db_txnlist) head[1];
};
-#define TXNLIST_INVALID_ID 0xffffffff
struct __db_txnlist {
db_txnlist_type type;
LIST_ENTRY(__db_txnlist) links;
@@ -57,17 +75,9 @@ struct __db_txnlist {
struct {
u_int32_t txnid;
int32_t generation;
- int32_t aborted;
+ int32_t status;
} t;
struct {
-#define TXNLIST_FLAG_DELETED 0x1
-#define TXNLIST_FLAG_CLOSED 0x2
- u_int32_t flags;
- int32_t fileid;
- u_int32_t count;
- char *fname;
- } d;
- struct {
int32_t ntxns;
int32_t maxn;
DB_LSN *lsn_array;
@@ -75,6 +85,7 @@ struct __db_txnlist {
struct {
int32_t nentries;
int32_t maxentry;
+ int32_t locked;
char *fname;
int32_t fileid;
db_pgno_t *pgno_array;
@@ -87,9 +98,8 @@ struct __db_txnlist {
* Flag value for __db_txnlist_lsnadd. Distinguish whether we are replacing
* an entry in the transaction list or adding a new one.
*/
-
#define TXNLIST_NEW 0x1
#define DB_user_BEGIN 10000
-#endif
+#endif /* !_DB_DISPATCH_H_ */
diff --git a/bdb/include/db_int.src b/bdb/dbinc/db_int.in
index 347169ab5cd..2f46293a65d 100644
--- a/bdb/include/db_int.src
+++ b/bdb/dbinc/db_int.in
@@ -1,20 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_int.src,v 11.42 2001/01/11 17:49:17 krinsky Exp $
+ * $Id: db_int.in,v 11.106 2002/09/10 02:48:08 bostic Exp $
*/
#ifndef _DB_INTERNAL_H_
#define _DB_INTERNAL_H_
/*******************************************************
- * General includes.
+ * System includes, db.h, a few general DB includes. The DB includes are
+ * here because it's OK if db_int.h includes queue structure declarations.
*******************************************************/
-#include "db.h"
-
#ifndef NO_SYSTEM_INCLUDES
#if defined(__STDC__) || defined(__cplusplus)
#include <stdarg.h>
@@ -24,8 +23,10 @@
#include <errno.h>
#endif
-#include "queue.h"
-#include "shqueue.h"
+#include "db.h"
+
+#include "dbinc/queue.h"
+#include "dbinc/shqueue.h"
#if defined(__cplusplus)
extern "C" {
@@ -43,10 +44,19 @@ extern "C" {
#define MS_PER_SEC 1000 /* Milliseconds in a second. */
#define USEC_PER_MS 1000 /* Microseconds in a millisecond. */
+#define RECNO_OOB 0 /* Illegal record number. */
+
+/* Test for a power-of-two (tests true for zero, which doesn't matter here). */
+#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
+
+/* Test for valid page sizes. */
#define DB_MIN_PGSIZE 0x000200 /* Minimum page size (512). */
#define DB_MAX_PGSIZE 0x010000 /* Maximum page size (65536). */
+#define IS_VALID_PAGESIZE(x) \
+ (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
-#define RECNO_OOB 0 /* Illegal record number. */
+/* Minimum number of pages cached, by default. */
+#define DB_MINPAGECACHE 16
/*
* If we are unable to determine the underlying filesystem block size, use
@@ -79,12 +89,25 @@ extern "C" {
/* Align an integer to a specific boundary. */
#undef ALIGN
-#define ALIGN(value, bound) \
- (((value) + (bound) - 1) & ~(((u_int)bound) - 1))
+#define ALIGN(v, bound) (((v) + (bound) - 1) & ~(((db_align_t)bound) - 1))
+
+/*
+ * Print an address as a u_long (a u_long is the largest type we can print
+ * portably). Most 64-bit systems have made longs 64-bits, so this should
+ * work.
+ */
+#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
-/* Align a pointer to a specific boundary. */
-#undef ALIGNP
-#define ALIGNP(value, bound) ALIGN((db_alignp_t)value, bound)
+/*
+ * Convert a pointer to a small integral value.
+ *
+ * The (u_int16_t)(db_alignp_t) cast avoids warnings: the (db_alignp_t) cast
+ * converts the value to an integral type, and the (u_int16_t) cast converts
+ * it to a small integral type so we don't get complaints when we assign the
+ * final result to an integral type smaller than db_alignp_t.
+ */
+#define P_TO_UINT32(p) ((u_int32_t)(db_alignp_t)(p))
+#define P_TO_UINT16(p) ((u_int16_t)(db_alignp_t)(p))
/*
* There are several on-page structures that are declared to have a number of
@@ -100,17 +123,10 @@ extern "C" {
* an array.
*/
#undef SSZ
-#define SSZ(name, field) ((int)&(((name *)0)->field))
+#define SSZ(name, field) P_TO_UINT16(&(((name *)0)->field))
#undef SSZA
-#define SSZA(name, field) ((int)&(((name *)0)->field[0]))
-
-/*
- * Print an address as a u_long (a u_long is the largest type we can print
- * portably). Most 64-bit systems have made longs 64-bits, so this should
- * work.
- */
-#define P_TO_ULONG(p) ((u_long)(db_alignp_t)(p))
+#define SSZA(name, field) P_TO_UINT16(&(((name *)0)->field[0]))
/* Structure used to print flag values. */
typedef struct __fn {
@@ -125,9 +141,9 @@ typedef struct __fn {
#define F_CLR(p, f) (p)->flags &= ~(f)
#define F_ISSET(p, f) ((p)->flags & (f))
#define F_SET(p, f) (p)->flags |= (f)
-#define LF_CLR(f) (flags &= ~(f))
-#define LF_ISSET(f) (flags & (f))
-#define LF_SET(f) (flags |= (f))
+#define LF_CLR(f) ((flags) &= ~(f))
+#define LF_ISSET(f) ((flags) & (f))
+#define LF_SET(f) ((flags) |= (f))
/* Display separator string. */
#undef DB_LINE
@@ -137,6 +153,29 @@ typedef struct __fn {
#define COMPQUIET(n, v) (n) = (v)
/*******************************************************
+ * API return values
+ *******************************************************/
+ /*
+ * Return values that are OK for each different call. Most calls have
+ * a standard 'return of 0 is only OK value', but some, like db->get
+ * have DB_NOTFOUND as a return value, but it really isn't an error.
+ */
+#define DB_RETOK_STD(ret) ((ret) == 0)
+#define DB_RETOK_DBCDEL(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBCGET(ret) DB_RETOK_DBGET(ret)
+#define DB_RETOK_DBCPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBDEL(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBGET(ret) ((ret) == 0 || (ret) == DB_KEYEMPTY || \
+ (ret) == DB_NOTFOUND)
+#define DB_RETOK_DBPUT(ret) ((ret) == 0 || (ret) == DB_KEYEXIST)
+#define DB_RETOK_LGGET(ret) ((ret) == 0 || (ret) == DB_NOTFOUND)
+#define DB_RETOK_MPGET(ret) ((ret) == 0 || (ret) == DB_PAGE_NOTFOUND)
+#define DB_RETOK_REPPMSG(ret) ((ret) == 0 || (ret) == DB_REP_NEWMASTER || \
+ (ret) == DB_REP_NEWSITE)
+
+/*******************************************************
* Files.
*******************************************************/
/*
@@ -148,19 +187,20 @@ typedef struct __fn {
#define MAXPATHLEN 1024
#define PATH_DOT "." /* Current working directory. */
-#define PATH_SEPARATOR "/" /* Path separator character. */
+#define PATH_SEPARATOR "/" /* Path separator character(s). */
/*
* Flags understood by __os_open.
*/
-#define DB_OSO_CREATE 0x001 /* POSIX: O_CREAT */
-#define DB_OSO_EXCL 0x002 /* POSIX: O_EXCL */
-#define DB_OSO_LOG 0x004 /* Opening a log file. */
-#define DB_OSO_RDONLY 0x008 /* POSIX: O_RDONLY */
-#define DB_OSO_REGION 0x010 /* Opening a region file. */
-#define DB_OSO_SEQ 0x020 /* Expected sequential access. */
-#define DB_OSO_TEMP 0x040 /* Remove after last close. */
-#define DB_OSO_TRUNC 0x080 /* POSIX: O_TRUNC */
+#define DB_OSO_CREATE 0x0001 /* POSIX: O_CREAT */
+#define DB_OSO_DIRECT 0x0002 /* Don't buffer the file in the OS. */
+#define DB_OSO_EXCL 0x0004 /* POSIX: O_EXCL */
+#define DB_OSO_LOG 0x0008 /* Opening a log file. */
+#define DB_OSO_RDONLY 0x0010 /* POSIX: O_RDONLY */
+#define DB_OSO_REGION 0x0020 /* Opening a region file. */
+#define DB_OSO_SEQ 0x0040 /* Expected sequential access. */
+#define DB_OSO_TEMP 0x0080 /* Remove after last close. */
+#define DB_OSO_TRUNC 0x0100 /* POSIX: O_TRUNC */
/*
* Seek options understood by __os_seek.
@@ -184,15 +224,19 @@ typedef enum {
/*
* CDB_LOCKING CDB product locking.
+ * CRYPTO_ON Security has been configured.
* LOCKING_ON Locking has been configured.
* LOGGING_ON Logging has been configured.
* MPOOL_ON Memory pool has been configured.
+ * RPC_ON RPC has been configured.
* TXN_ON Transactions have been configured.
*/
#define CDB_LOCKING(dbenv) F_ISSET(dbenv, DB_ENV_CDB)
+#define CRYPTO_ON(dbenv) ((dbenv)->crypto_handle != NULL)
#define LOCKING_ON(dbenv) ((dbenv)->lk_handle != NULL)
#define LOGGING_ON(dbenv) ((dbenv)->lg_handle != NULL)
#define MPOOL_ON(dbenv) ((dbenv)->mp_handle != NULL)
+#define RPC_ON(dbenv) ((dbenv)->cl_handle != NULL)
#define TXN_ON(dbenv) ((dbenv)->tx_handle != NULL)
/*
@@ -205,21 +249,24 @@ typedef enum {
!CDB_LOCKING((dbc)->dbp->dbenv) && LOCKING_ON((dbc)->dbp->dbenv))
/*
- * IS_RECOVERING The system is running recovery.
+ * IS_RECOVERING: The system is running recovery.
*/
#define IS_RECOVERING(dbenv) \
(LOGGING_ON(dbenv) && \
F_ISSET((DB_LOG *)(dbenv)->lg_handle, DBLOG_RECOVER))
-/* Most initialization methods cannot be called after open is called. */
+/* Initialization methods are often illegal before/after open is called. */
#define ENV_ILLEGAL_AFTER_OPEN(dbenv, name) \
if (F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
return (__db_mi_open(dbenv, name, 1));
+#define ENV_ILLEGAL_BEFORE_OPEN(dbenv, name) \
+ if (!F_ISSET((dbenv), DB_ENV_OPEN_CALLED)) \
+ return (__db_mi_open(dbenv, name, 0));
/* We're not actually user hostile, honest. */
-#define ENV_REQUIRES_CONFIG(dbenv, handle, subsystem) \
+#define ENV_REQUIRES_CONFIG(dbenv, handle, i, flags) \
if (handle == NULL) \
- return (__db_env_config(dbenv, subsystem));
+ return (__db_env_config(dbenv, i, flags));
/*******************************************************
* Database Access Methods.
@@ -233,15 +280,15 @@ typedef enum {
/* Initialization methods are often illegal before/after open is called. */
#define DB_ILLEGAL_AFTER_OPEN(dbp, name) \
- if (F_ISSET((dbp), DB_OPEN_CALLED)) \
- return (__db_mi_open(dbp->dbenv, name, 1));
+ if (F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 1));
#define DB_ILLEGAL_BEFORE_OPEN(dbp, name) \
- if (!F_ISSET((dbp), DB_OPEN_CALLED)) \
- return (__db_mi_open(dbp->dbenv, name, 0));
+ if (!F_ISSET((dbp), DB_AM_OPEN_CALLED)) \
+ return (__db_mi_open((dbp)->dbenv, name, 0));
/* Some initialization methods are illegal if environment isn't local. */
#define DB_ILLEGAL_IN_ENV(dbp, name) \
- if (!F_ISSET(dbp->dbenv, DB_ENV_DBLOCAL)) \
- return (__db_mi_env(dbp->dbenv, name));
+ if (!F_ISSET((dbp)->dbenv, DB_ENV_DBLOCAL)) \
+ return (__db_mi_env((dbp)->dbenv, name));
#define DB_ILLEGAL_METHOD(dbp, flags) { \
int __ret; \
if ((__ret = __dbh_am_chk(dbp, flags)) != 0) \
@@ -267,12 +314,49 @@ struct __dbc_internal {
__DBC_INTERNAL
};
+/* Actions that __db_master_update can take. */
+typedef enum { MU_REMOVE, MU_RENAME, MU_OPEN } mu_action;
+
/*
* Access-method-common macro for determining whether a cursor
* has been initialized.
*/
#define IS_INITIALIZED(dbc) ((dbc)->internal->pgno != PGNO_INVALID)
+/* Free the callback-allocated buffer, if necessary, hanging off of a DBT. */
+#define FREE_IF_NEEDED(sdbp, dbt) \
+ if (F_ISSET((dbt), DB_DBT_APPMALLOC)) { \
+ __os_ufree((sdbp)->dbenv, (dbt)->data); \
+ F_CLR((dbt), DB_DBT_APPMALLOC); \
+ }
+
+/*
+ * Use memory belonging to object "owner" to return the results of
+ * any no-DBT-flag get ops on cursor "dbc".
+ */
+#define SET_RET_MEM(dbc, owner) \
+ do { \
+ (dbc)->rskey = &(owner)->my_rskey; \
+ (dbc)->rkey = &(owner)->my_rkey; \
+ (dbc)->rdata = &(owner)->my_rdata; \
+ } while (0)
+
+/* Use the return-data memory src is currently set to use in dest as well. */
+#define COPY_RET_MEM(src, dest) \
+ do { \
+ (dest)->rskey = (src)->rskey; \
+ (dest)->rkey = (src)->rkey; \
+ (dest)->rdata = (src)->rdata; \
+ } while (0)
+
+/* Reset the returned-memory pointers to their defaults. */
+#define RESET_RET_MEM(dbc) \
+ do { \
+ (dbc)->rskey = &(dbc)->my_rskey; \
+ (dbc)->rkey = &(dbc)->my_rkey; \
+ (dbc)->rdata = &(dbc)->my_rdata; \
+ } while (0)
+
/*******************************************************
* Mpool.
*******************************************************/
@@ -285,7 +369,8 @@ struct __dbc_internal {
/* Structure used as the DB pgin/pgout pgcookie. */
typedef struct __dbpginfo {
size_t db_pagesize; /* Underlying page size. */
- int needswap; /* If swapping required. */
+ u_int32_t flags; /* Some DB_AM flags needed. */
+ DBTYPE type; /* DB type */
} DB_PGINFO;
/*******************************************************
@@ -296,102 +381,93 @@ typedef struct __dbpginfo {
(LSN).file = 0; \
(LSN).offset = 0; \
} while (0)
-
-/* Return 1 if LSN is a 'zero' lsn, otherwise return 0. */
#define IS_ZERO_LSN(LSN) ((LSN).file == 0)
-/* Test if we need to log a change. */
-#define DB_LOGGING(dbc) \
- (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET(dbc, DBC_RECOVER))
+#define IS_INIT_LSN(LSN) ((LSN).file == 1 && (LSN).offset == 0)
+#define INIT_LSN(LSN) do { \
+ (LSN).file = 1; \
+ (LSN).offset = 0; \
+} while (0)
+
+#define MAX_LSN(LSN) do { \
+ (LSN).file = UINT32_T_MAX; \
+ (LSN).offset = UINT32_T_MAX; \
+} while (0)
+#define IS_MAX_LSN(LSN) \
+ ((LSN).file == UINT32_T_MAX && (LSN).offset == UINT32_T_MAX)
+
+/* If logging is turned off, smash the lsn. */
+#define LSN_NOT_LOGGED(LSN) do { \
+ (LSN).file = 0; \
+ (LSN).offset = 1; \
+} while (0)
+#define IS_NOT_LOGGED_LSN(LSN) \
+ ((LSN).file == 0 && (LSN).offset == 1)
+
+/*
+ * Test if the environment is currently logging changes. If we're in
+ * recovery or we're a replication client, we don't need to log changes
+ * because they're already in the log, even though we have a fully functional
+ * log system.
+ */
+#define DBENV_LOGGING(dbenv) \
+ (LOGGING_ON(dbenv) && !F_ISSET((dbenv), DB_ENV_REP_CLIENT) && \
+ (!IS_RECOVERING(dbenv)))
+
+/*
+ * Test if we need to log a change. Note that the DBC_RECOVER flag is set
+ * when we're in abort, as well as during recovery; thus DBC_LOGGING may be
+ * false for a particular dbc even when DBENV_LOGGING is true.
+ *
+ * We explicitly use LOGGING_ON/DB_ENV_REP_CLIENT here because we don't
+ * want to have to pull in the log headers, which IS_RECOVERING (and thus
+ * DBENV_LOGGING) rely on, and because DBC_RECOVER should be set anytime
+ * IS_RECOVERING would be true.
+ */
+#define DBC_LOGGING(dbc) \
+ (LOGGING_ON((dbc)->dbp->dbenv) && !F_ISSET((dbc), DBC_RECOVER) && \
+ !F_ISSET((dbc)->dbp->dbenv, DB_ENV_REP_CLIENT))
-/* Internal flag for use with internal __log_unregister. */
-#define DB_LOGONLY 0x01
/*******************************************************
* Txn.
*******************************************************/
#define DB_NONBLOCK(C) ((C)->txn != NULL && F_ISSET((C)->txn, TXN_NOWAIT))
-#define IS_SUBTRANSACTION(txn) \
+#define IS_SUBTRANSACTION(txn) \
((txn) != NULL && (txn)->parent != NULL)
/*******************************************************
- * Global variables.
+ * Crypto.
*******************************************************/
-#ifdef HAVE_VXWORKS
-#include "semLib.h"
-#endif
+#define DB_IV_BYTES 16 /* Bytes per IV */
+#define DB_MAC_KEY 20 /* Bytes per MAC checksum */
-/*
- * DB global variables. Done in a single structure to minimize the name-space
- * pollution.
- */
-typedef struct __db_globals {
- u_int32_t db_pageyield; /* db_set_pageyield */
- u_int32_t db_panic; /* db_set_panic */
- u_int32_t db_region_init; /* db_set_region_init */
- u_int32_t db_tas_spins; /* db_set_tas_spins */
-#ifdef HAVE_VXWORKS
- u_int32_t db_global_init; /* VxWorks: inited */
- SEM_ID db_global_lock; /* VxWorks: global semaphore */
-#endif
- /* XA: list of opened environments. */
- TAILQ_HEAD(__db_envq, __db_env) db_envq;
-} DB_GLOBALS;
-
-#ifdef DB_INITIALIZE_DB_GLOBALS
-DB_GLOBALS __db_global_values = {
- 0, /* db_set_pageyield */
- 1, /* db_set_panic */
- 0, /* db_set_region_init */
- 0, /* db_set_tas_spins */
-#ifdef HAVE_VXWORKS
- 0, /* db_global_init */
- NULL, /* db_global_lock */
-#endif
- /* XA environment queue */
- {NULL, &__db_global_values.db_envq.tqh_first}
-};
-#else
-extern DB_GLOBALS __db_global_values;
-#endif
-#define DB_GLOBAL(v) __db_global_values.v
-
-/* Forward structure declarations. */
+/*******************************************************
+ * Forward structure declarations.
+ *******************************************************/
struct __db_reginfo_t; typedef struct __db_reginfo_t REGINFO;
-struct __mutex_t; typedef struct __mutex_t MUTEX;
+struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
+struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
struct __vrfy_childinfo; typedef struct __vrfy_childinfo VRFY_CHILDINFO;
struct __vrfy_dbinfo; typedef struct __vrfy_dbinfo VRFY_DBINFO;
struct __vrfy_pageinfo; typedef struct __vrfy_pageinfo VRFY_PAGEINFO;
-struct __db_txnlist; typedef struct __db_txnlist DB_TXNLIST;
-struct __db_txnhead; typedef struct __db_txnhead DB_TXNHEAD;
-typedef enum {
- TXNLIST_DELETE,
- TXNLIST_LSN,
- TXNLIST_TXNID,
- TXNLIST_PGNO
-} db_txnlist_type;
-
-/*
- * Currently, region offsets are limited to 32-bits. I expect that's going
- * to have to be fixed in the not-too-distant future, since we won't want to
- * split 100Gb memory pools into that many different regions. It's typedef'd
- * so it won't be too painful to upgrade.
- */
-typedef u_int32_t roff_t;
#if defined(__cplusplus)
}
#endif
/*******************************************************
- * More general includes.
+ * Remaining general DB includes.
*******************************************************/
-#include "debug.h"
-#include "mutex.h"
-#include "region.h"
-#include "mutex_ext.h"
-#include "env_ext.h"
-#include "os.h"
-#include "os_ext.h"
-#include "common_ext.h"
+@db_int_def@
+
+#include "dbinc/globals.h"
+#include "dbinc/debug.h"
+#include "dbinc/mutex.h"
+#include "dbinc/region.h"
+#include "dbinc_auto/mutex_ext.h" /* XXX: Include after region.h. */
+#include "dbinc_auto/env_ext.h"
+#include "dbinc/os.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc_auto/common_ext.h"
#endif /* !_DB_INTERNAL_H_ */
diff --git a/bdb/include/db_join.h b/bdb/dbinc/db_join.h
index d92887bb589..487ce3eebbb 100644
--- a/bdb/include/db_join.h
+++ b/bdb/dbinc/db_join.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*
* @(#)db_join.h 11.1 (Sleepycat) 7/25/99
@@ -22,9 +22,10 @@ typedef struct __join_cursor {
DBC **j_workcurs; /* Scratch cursor copies to muck with. */
DB *j_primary; /* Primary dbp. */
DBT j_key; /* Used to do lookups. */
+ DBT j_rdata; /* Memory used for data return. */
u_int32_t j_ncurs; /* How many cursors do we have? */
#define JOIN_RETRY 0x01 /* Error on primary get; re-return same key. */
u_int32_t flags;
} JOIN_CURSOR;
-#endif
+#endif /* !_DB_JOIN_H_ */
diff --git a/bdb/include/db_page.h b/bdb/dbinc/db_page.h
index 8066424143b..97497556fd9 100644
--- a/bdb/include/db_page.h
+++ b/bdb/dbinc/db_page.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_page.h,v 11.28 2000/12/06 19:55:45 ubell Exp $
+ * $Id: db_page.h,v 11.52 2002/09/13 21:24:04 bostic Exp $
*/
#ifndef _DB_PAGE_H_
@@ -65,28 +65,31 @@ extern "C" {
* The magic and version numbers have to be in the same place in all versions
* of the metadata page as the application may not have upgraded the database.
************************************************************************/
-typedef struct _dbmeta31 {
+typedef struct _dbmeta33 {
DB_LSN lsn; /* 00-07: LSN. */
db_pgno_t pgno; /* 08-11: Current page number. */
u_int32_t magic; /* 12-15: Magic number. */
u_int32_t version; /* 16-19: Version. */
u_int32_t pagesize; /* 20-23: Pagesize. */
- u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t encrypt_alg; /* 24: Encryption algorithm. */
u_int8_t type; /* 25: Page type. */
- u_int8_t unused2[2]; /* 26-27: Unused. */
+#define DBMETA_CHKSUM 0x01
+ u_int8_t metaflags; /* 26: Meta-only flags */
+ u_int8_t unused1; /* 27: Unused. */
u_int32_t free; /* 28-31: Free list page number. */
- DB_LSN unused3; /* 32-39: former Lsn for allocation */
+ db_pgno_t last_pgno; /* 32-35: Page number of last page in db. */
+ u_int32_t unused3; /* 36-39: Unused. */
u_int32_t key_count; /* 40-43: Cached key count. */
u_int32_t record_count; /* 44-47: Cached record count. */
u_int32_t flags; /* 48-51: Flags: unique to each AM. */
/* 52-71: Unique file ID. */
u_int8_t uid[DB_FILE_ID_LEN];
-} DBMETA31, DBMETA;
+} DBMETA33, DBMETA;
/************************************************************************
BTREE METADATA PAGE LAYOUT
************************************************************************/
-typedef struct _btmeta31 {
+typedef struct _btmeta33 {
#define BTM_DUP 0x001 /* Duplicates. */
#define BTM_RECNO 0x002 /* Recno tree. */
#define BTM_RECNUM 0x004 /* Btree: maintain record count. */
@@ -101,17 +104,22 @@ typedef struct _btmeta31 {
u_int32_t minkey; /* 76-79: Btree: Minkey. */
u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
- u_int32_t root; /* 88-92: Root page. */
+ u_int32_t root; /* 88-91: Root page. */
+ u_int32_t unused[92]; /* 92-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
/*
- * Minimum page size is 128.
+ * Minimum page size is 512.
*/
-} BTMETA31, BTMETA;
+} BTMETA33, BTMETA;
/************************************************************************
HASH METADATA PAGE LAYOUT
************************************************************************/
-typedef struct _hashmeta31 {
+typedef struct _hashmeta33 {
#define DB_HASH_DUP 0x01 /* Duplicates. */
#define DB_HASH_SUBDB 0x02 /* Subdatabases. */
#define DB_HASH_DUPSORT 0x04 /* Duplicates are sorted. */
@@ -126,11 +134,16 @@ typedef struct _hashmeta31 {
#define NCACHED 32 /* number of spare points */
/* 96-223: Spare pages for overflow */
u_int32_t spares[NCACHED];
+ u_int32_t unused[59]; /* 224-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
/*
- * Minimum page size is 256.
+ * Minimum page size is 512.
*/
-} HMETA31, HMETA;
+} HMETA33, HMETA;
/************************************************************************
QUEUE METADATA PAGE LAYOUT
@@ -139,27 +152,32 @@ typedef struct _hashmeta31 {
* QAM Meta data page structure
*
*/
-typedef struct _qmeta32 {
+typedef struct _qmeta33 {
DBMETA dbmeta; /* 00-71: Generic meta-data header. */
u_int32_t first_recno; /* 72-75: First not deleted record. */
- u_int32_t cur_recno; /* 76-79: Last recno allocated. */
+ u_int32_t cur_recno; /* 76-79: Next recno to be allocated. */
u_int32_t re_len; /* 80-83: Fixed-length record length. */
u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
u_int32_t rec_page; /* 88-91: Records Per Page. */
u_int32_t page_ext; /* 92-95: Pages per extent */
+ u_int32_t unused[91]; /* 96-459: Unused space */
+ u_int32_t crypto_magic; /* 460-463: Crypto magic number */
+ u_int32_t trash[3]; /* 464-475: Trash space - Do not use */
+ u_int8_t iv[DB_IV_BYTES]; /* 476-495: Crypto IV */
+ u_int8_t chksum[DB_MAC_KEY]; /* 496-511: Page chksum */
/*
- * Minimum page size is 128.
+ * Minimum page size is 512.
*/
-} QMETA32, QMETA;
+} QMETA33, QMETA;
/*
* DBMETASIZE is a constant used by __db_file_setup and DB->verify
* as a buffer which is guaranteed to be larger than any possible
* metadata page size and smaller than any disk sector.
*/
-#define DBMETASIZE 256
+#define DBMETASIZE 512
/************************************************************************
BTREE/HASH MAIN PAGE LAYOUT
@@ -170,9 +188,9 @@ typedef struct _qmeta32 {
* +-----------------------------------+
* | next pgno | entries | hf offset |
* +-----------------------------------+
- * | level | type | index |
+ * | level | type | chksum |
* +-----------------------------------+
- * | index | free --> |
+ * | iv | index | free --> |
* +-----------+-----------------------+
* | F R E E A R E A |
* +-----------------------------------+
@@ -181,12 +199,28 @@ typedef struct _qmeta32 {
* | item | item | item |
* +-----------------------------------+
*
- * sizeof(PAGE) == 26 bytes, and the following indices are guaranteed to be
- * two-byte aligned.
+ * sizeof(PAGE) == 26 bytes + possibly 20 bytes of checksum and possibly
+ * 16 bytes of IV (+ 2 bytes for alignment), and the following indices
+ * are guaranteed to be two-byte aligned. If we aren't doing crypto or
+ * checksumming the bytes are reclaimed for data storage.
*
* For hash and btree leaf pages, index items are paired, e.g., inp[0] is the
* key for inp[1]'s data. All other types of pages only contain single items.
*/
+typedef struct __pg_chksum {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[4]; /* 28-31: Checksum */
+} PG_CHKSUM;
+
+typedef struct __pg_crypto {
+ u_int8_t unused[2]; /* 26-27: For alignment */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
+ /* !!!
+ * Must be 16-byte aligned for crypto
+ */
+} PG_CRYPTO;
+
typedef struct _db_page {
DB_LSN lsn; /* 00-07: Log sequence number. */
db_pgno_t pgno; /* 08-11: Current page number. */
@@ -207,9 +241,30 @@ typedef struct _db_page {
#define MAXBTREELEVEL 255
u_int8_t level; /* 24: Btree tree level. */
u_int8_t type; /* 25: Page type. */
- db_indx_t inp[1]; /* Variable length index of items. */
} PAGE;
+#define SIZEOF_PAGE 26
+/*
+ * !!!
+ * DB_AM_ENCRYPT always implies DB_AM_CHKSUM so that must come first.
+ */
+#define P_INP(dbp, pg) \
+ ((db_indx_t *)((u_int8_t *)(pg) + SIZEOF_PAGE + \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? sizeof(PG_CRYPTO) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? sizeof(PG_CHKSUM) : 0))))
+
+#define P_IV(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, iv)) \
+ : NULL)
+
+#define P_CHKSUM(dbp, pg) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CRYPTO, chksum)) : \
+ (F_ISSET((dbp), DB_AM_CHKSUM) ? ((u_int8_t *)(pg) + \
+ SIZEOF_PAGE + SSZA(PG_CHKSUM, chksum)) \
+ : NULL))
+
/* PAGE element macros. */
#define LSN(p) (((PAGE *)p)->lsn)
#define PGNO(p) (((PAGE *)p)->pgno)
@@ -223,6 +278,15 @@ typedef struct _db_page {
/************************************************************************
QUEUE MAIN PAGE LAYOUT
************************************************************************/
+/*
+ * Sizes of page below. Used to reclaim space if not doing
+ * crypto or checksumming. If you change the QPAGE below you
+ * MUST adjust this too.
+ */
+#define QPAGE_NORMAL 28
+#define QPAGE_CHKSUM 48
+#define QPAGE_SEC 64
+
typedef struct _qpage {
DB_LSN lsn; /* 00-07: Log sequence number. */
db_pgno_t pgno; /* 08-11: Current page number. */
@@ -230,8 +294,13 @@ typedef struct _qpage {
u_int8_t unused1[1]; /* 24: Unused. */
u_int8_t type; /* 25: Page type. */
u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int8_t chksum[DB_MAC_KEY]; /* 28-47: Checksum */
+ u_int8_t iv[DB_IV_BYTES]; /* 48-63: IV */
} QPAGE;
+#define QPAGE_SZ(dbp) \
+ (F_ISSET((dbp), DB_AM_ENCRYPT) ? QPAGE_SEC : \
+ F_ISSET((dbp), DB_AM_CHKSUM) ? QPAGE_CHKSUM : QPAGE_NORMAL)
/*
* !!!
* The next_pgno and prev_pgno fields are not maintained for btree and recno
@@ -248,8 +317,8 @@ typedef struct _qpage {
* the BINTERNAL fields on each access.) Overload the PREV_PGNO field.
*/
#define RE_NREC(p) \
- ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? \
- PREV_PGNO(p) : (TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p)))
+ ((TYPE(p) == P_IBTREE || TYPE(p) == P_IRECNO) ? PREV_PGNO(p) : \
+ (db_pgno_t)(TYPE(p) == P_LBTREE ? NUM_ENT(p) / 2 : NUM_ENT(p)))
#define RE_NREC_ADJ(p, adj) \
PREV_PGNO(p) += adj;
#define RE_NREC_SET(p, num) \
@@ -273,16 +342,17 @@ typedef struct _qpage {
} while (0)
/* Page header length (offset to first index). */
-#define P_OVERHEAD (SSZA(PAGE, inp))
+#define P_OVERHEAD(dbp) P_TO_UINT16(P_INP(dbp, 0))
/* First free byte. */
-#define LOFFSET(pg) (P_OVERHEAD + NUM_ENT(pg) * sizeof(db_indx_t))
+#define LOFFSET(dbp, pg) \
+ (P_OVERHEAD(dbp) + NUM_ENT(pg) * sizeof(db_indx_t))
/* Free space on a regular page. */
-#define P_FREESPACE(pg) (HOFFSET(pg) - LOFFSET(pg))
+#define P_FREESPACE(dbp, pg) (HOFFSET(pg) - LOFFSET(dbp, pg))
/* Get a pointer to the bytes at a specific index. */
-#define P_ENTRY(pg, indx) ((u_int8_t *)pg + ((PAGE *)pg)->inp[indx])
+#define P_ENTRY(dbp, pg, indx) ((u_int8_t *)pg + P_INP(dbp, pg)[indx])
/************************************************************************
OVERFLOW PAGE LAYOUT
@@ -307,10 +377,10 @@ typedef struct _qpage {
#define OV_REF(p) (((PAGE *)p)->entries)
/* Maximum number of bytes that you can put on an overflow page. */
-#define P_MAXSPACE(psize) ((psize) - P_OVERHEAD)
+#define P_MAXSPACE(dbp, psize) ((psize) - P_OVERHEAD(dbp))
/* Free space on an overflow page. */
-#define P_OVFLSPACE(psize, pg) (P_MAXSPACE(psize) - HOFFSET(pg))
+#define P_OVFLSPACE(dbp, psize, pg) (P_MAXSPACE(dbp, psize) - HOFFSET(pg))
/************************************************************************
HASH PAGE LAYOUT
@@ -331,7 +401,7 @@ typedef struct _qpage {
* structures, there's a pair of macros.
*/
#define HPAGE_PTYPE(p) (*(u_int8_t *)p)
-#define HPAGE_TYPE(pg, indx) (*P_ENTRY(pg, indx))
+#define HPAGE_TYPE(dbp, pg, indx) (*P_ENTRY(dbp, pg, indx))
/*
* The first and second types are H_KEYDATA and H_DUPLICATE, represented
@@ -361,12 +431,12 @@ typedef struct _hkeydata {
* The length of any HKEYDATA item. Note that indx is an element index,
* not a PAIR index.
*/
-#define LEN_HITEM(pg, pgsize, indx) \
+#define LEN_HITEM(dbp, pg, pgsize, indx) \
(((indx) == 0 ? pgsize : \
- ((PAGE *)(pg))->inp[indx - 1]) - ((PAGE *)(pg))->inp[indx])
+ (P_INP(dbp, pg)[indx - 1])) - (P_INP(dbp, pg)[indx]))
-#define LEN_HKEYDATA(pg, psize, indx) \
- (LEN_HITEM(pg, psize, indx) - HKEYDATA_SIZE(0))
+#define LEN_HKEYDATA(dbp, pg, psize, indx) \
+ (db_indx_t)(LEN_HITEM(dbp, pg, psize, indx) - HKEYDATA_SIZE(0))
/*
* Page space required to add a new HKEYDATA item to the page, with and
@@ -389,13 +459,15 @@ typedef struct _hkeydata {
#define H_NUMPAIRS(pg) (NUM_ENT(pg) / 2)
#define H_KEYINDEX(indx) (indx)
#define H_DATAINDEX(indx) ((indx) + 1)
-#define H_PAIRKEY(pg, indx) P_ENTRY(pg, H_KEYINDEX(indx))
-#define H_PAIRDATA(pg, indx) P_ENTRY(pg, H_DATAINDEX(indx))
-#define H_PAIRSIZE(pg, psize, indx) \
- (LEN_HITEM(pg, psize, H_KEYINDEX(indx)) + \
- LEN_HITEM(pg, psize, H_DATAINDEX(indx)))
-#define LEN_HDATA(p, psize, indx) LEN_HKEYDATA(p, psize, H_DATAINDEX(indx))
-#define LEN_HKEY(p, psize, indx) LEN_HKEYDATA(p, psize, H_KEYINDEX(indx))
+#define H_PAIRKEY(dbp, pg, indx) P_ENTRY(dbp, pg, H_KEYINDEX(indx))
+#define H_PAIRDATA(dbp, pg, indx) P_ENTRY(dbp, pg, H_DATAINDEX(indx))
+#define H_PAIRSIZE(dbp, pg, psize, indx) \
+ (LEN_HITEM(dbp, pg, psize, H_KEYINDEX(indx)) + \
+ LEN_HITEM(dbp, pg, psize, H_DATAINDEX(indx)))
+#define LEN_HDATA(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_DATAINDEX(indx))
+#define LEN_HKEY(dbp, p, psize, indx) \
+ LEN_HKEYDATA(dbp, p, psize, H_KEYINDEX(indx))
/*
* The third type is the H_OFFPAGE, represented by the HOFFPAGE structure:
@@ -470,8 +542,8 @@ typedef struct _bkeydata {
} BKEYDATA;
/* Get a BKEYDATA item for a specific index. */
-#define GET_BKEYDATA(pg, indx) \
- ((BKEYDATA *)P_ENTRY(pg, indx))
+#define GET_BKEYDATA(dbp, pg, indx) \
+ ((BKEYDATA *)P_ENTRY(dbp, pg, indx))
/*
* Page space required to add a new BKEYDATA item to the page, with and
@@ -495,15 +567,18 @@ typedef struct _boverflow {
} BOVERFLOW;
/* Get a BOVERFLOW item for a specific index. */
-#define GET_BOVERFLOW(pg, indx) \
- ((BOVERFLOW *)P_ENTRY(pg, indx))
+#define GET_BOVERFLOW(dbp, pg, indx) \
+ ((BOVERFLOW *)P_ENTRY(dbp, pg, indx))
/*
* Page space required to add a new BOVERFLOW item to the page, with and
- * without the index value.
+ * without the index value. The (u_int16_t) cast avoids warnings: ALIGN
+ * casts to db_align_t, the cast converts it to a small integral type so
+ * we don't get complaints when we assign the final result to an integral
+ * type smaller than db_align_t.
*/
#define BOVERFLOW_SIZE \
- ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t))
+ ((u_int16_t)ALIGN(sizeof(BOVERFLOW), sizeof(u_int32_t)))
#define BOVERFLOW_PSIZE \
(BOVERFLOW_SIZE + sizeof(db_indx_t))
@@ -532,8 +607,8 @@ typedef struct _binternal {
} BINTERNAL;
/* Get a BINTERNAL item for a specific index. */
-#define GET_BINTERNAL(pg, indx) \
- ((BINTERNAL *)P_ENTRY(pg, indx))
+#define GET_BINTERNAL(dbp, pg, indx) \
+ ((BINTERNAL *)P_ENTRY(dbp, pg, indx))
/*
* Page space required to add a new BINTERNAL item to the page, with and
@@ -557,8 +632,8 @@ typedef struct _rinternal {
} RINTERNAL;
/* Get a RINTERNAL item for a specific index. */
-#define GET_RINTERNAL(pg, indx) \
- ((RINTERNAL *)P_ENTRY(pg, indx))
+#define GET_RINTERNAL(dbp, pg, indx) \
+ ((RINTERNAL *)P_ENTRY(dbp, pg, indx))
/*
* Page space required to add a new RINTERNAL item to the page, with and
@@ -573,4 +648,4 @@ typedef struct _rinternal {
}
#endif
-#endif /* _DB_PAGE_H_ */
+#endif /* !_DB_PAGE_H_ */
diff --git a/bdb/include/db_server_int.h b/bdb/dbinc/db_server_int.h
index 69e88ea5aec..efec539b2f8 100644
--- a/bdb/include/db_server_int.h
+++ b/bdb/dbinc/db_server_int.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_server_int.h,v 1.13 2001/01/11 18:19:52 bostic Exp $
+ * $Id: db_server_int.h,v 1.23 2002/02/12 15:01:24 sue Exp $
*/
#ifndef _DB_SERVER_INT_H_
@@ -14,6 +14,21 @@
#define DB_SERVER_MAXTIMEOUT 1200 /* 20 minutes */
#define DB_SERVER_IDLETIMEOUT 86400 /* 1 day */
+/*
+ * Ignore/mask off the following env->open flags:
+ * Most are illegal for a client to specify as they would control
+ * server resource usage. We will just ignore them.
+ * DB_LOCKDOWN
+ * DB_PRIVATE
+ * DB_RECOVER
+ * DB_RECOVER_FATAL
+ * DB_SYSTEM_MEM
+ * DB_USE_ENVIRON, DB_USE_ENVIRON_ROOT - handled on client
+ */
+#define DB_SERVER_FLAGMASK ( \
+DB_LOCKDOWN | DB_PRIVATE | DB_RECOVER | DB_RECOVER_FATAL | \
+DB_SYSTEM_MEM | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT)
+
#define CT_CURSOR 0x001 /* Cursor */
#define CT_DB 0x002 /* Database */
#define CT_ENV 0x004 /* Env */
@@ -28,6 +43,39 @@ struct home_entry {
char *home;
char *dir;
char *name;
+ char *passwd;
+};
+
+/*
+ * Data needed for sharing handles.
+ * To share an env handle, on the open call, they must have matching
+ * env flags, and matching set_flags.
+ *
+ * To share a db handle on the open call, the db, subdb and flags must
+ * all be the same.
+ */
+#define DB_SERVER_ENVFLAGS ( \
+DB_INIT_CDB | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | \
+DB_INIT_TXN | DB_JOINENV)
+
+#define DB_SERVER_DBFLAGS (DB_DIRTY_READ | DB_NOMMAP | DB_RDONLY)
+#define DB_SERVER_DBNOSHARE (DB_EXCL | DB_TRUNCATE)
+
+typedef struct ct_envdata ct_envdata;
+typedef struct ct_dbdata ct_dbdata;
+struct ct_envdata {
+ u_int32_t envflags;
+ u_int32_t onflags;
+ u_int32_t offflags;
+ home_entry *home;
+};
+
+struct ct_dbdata {
+ u_int32_t dbflags;
+ u_int32_t setflags;
+ char *db;
+ char *subdb;
+ DBTYPE type;
};
/*
@@ -42,18 +90,30 @@ typedef struct ct_entry ct_entry;
struct ct_entry {
LIST_ENTRY(ct_entry) entries; /* List of entries */
union {
+#ifdef __cplusplus
+ DbEnv *envp; /* H_ENV */
+ DbTxn *txnp; /* H_TXN */
+ Db *dbp; /* H_DB */
+ Dbc *dbc; /* H_CURSOR */
+#else
DB_ENV *envp; /* H_ENV */
DB_TXN *txnp; /* H_TXN */
DB *dbp; /* H_DB */
DBC *dbc; /* H_CURSOR */
+#endif
void *anyp;
} handle_u;
+ union { /* Private data per type */
+ ct_envdata envdp; /* Env info */
+ ct_dbdata dbdp; /* Db info */
+ } private_u;
long ct_id; /* Client ID */
long *ct_activep; /* Activity timestamp pointer*/
long *ct_origp; /* Original timestamp pointer*/
long ct_active; /* Activity timestamp */
long ct_timeout; /* Resource timeout */
long ct_idle; /* Idle timeout */
+ u_int32_t ct_refcount; /* Ref count for sharing */
u_int32_t ct_type; /* This entry's type */
struct ct_entry *ct_parent; /* Its parent */
struct ct_entry *ct_envparent; /* Its environment */
@@ -65,6 +125,9 @@ struct ct_entry {
#define ct_dbc handle_u.dbc
#define ct_anyp handle_u.anyp
+#define ct_envdp private_u.envdp
+#define ct_dbdp private_u.dbdp
+
extern int __dbsrv_verbose;
/*
@@ -82,4 +145,4 @@ extern int __dbsrv_verbose;
__dbsrv_active(ctp); \
}
-#endif /* _DB_SERVER_INT_H_ */
+#endif /* !_DB_SERVER_INT_H_ */
diff --git a/bdb/include/db_shash.h b/bdb/dbinc/db_shash.h
index 0b9aac98f53..2c54d6145c5 100644
--- a/bdb/include/db_shash.h
+++ b/bdb/dbinc/db_shash.h
@@ -1,12 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_shash.h,v 11.7 2000/12/12 17:43:56 bostic Exp $
+ * $Id: db_shash.h,v 11.11 2002/01/11 15:52:26 bostic Exp $
*/
+#ifndef _DB_SHASH_H_
+#define _DB_SHASH_H_
+
/* Hash Headers */
typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB;
@@ -75,3 +78,4 @@ typedef SH_TAILQ_HEAD(__hash_head) DB_HASHTAB;
__bucket = &begin[ndx]; \
SH_TAILQ_REMOVE(__bucket, obj, field, type); \
}
+#endif /* !_DB_SHASH_H_ */
diff --git a/bdb/include/db_swap.h b/bdb/dbinc/db_swap.h
index bc96afb7a10..d5aad65385e 100644
--- a/bdb/include/db_swap.h
+++ b/bdb/dbinc/db_swap.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -32,7 +32,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: db_swap.h,v 11.5 2000/03/28 16:14:36 bostic Exp $
+ * $Id: db_swap.h,v 11.8 2002/01/11 15:52:26 bostic Exp $
*/
#ifndef _DB_SWAP_H_
@@ -100,16 +100,17 @@
}
/*
- * DB has local versions of htonl() and ntohl() that only operate on pointers
- * to the right size memory locations, the portability magic for finding the
- * real ones isn't worth the effort.
+ * Berkeley DB has local versions of htonl() and ntohl() that operate on
+ * pointers to the right size memory locations; the portability magic for
+ * finding the real system functions isn't worth the effort.
*/
-#if defined(WORDS_BIGENDIAN)
-#define DB_HTONL(p)
-#define DB_NTOHL(p)
-#else
-#define DB_HTONL(p) P_32_SWAP(p)
-#define DB_NTOHL(p) P_32_SWAP(p)
-#endif
+#define DB_HTONL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
+#define DB_NTOHL(p) do { \
+ if (!__db_isbigendian()) \
+ P_32_SWAP(p); \
+} while (0)
#endif /* !_DB_SWAP_H_ */
diff --git a/bdb/include/db_upgrade.h b/bdb/dbinc/db_upgrade.h
index d8d99645231..3ccba810889 100644
--- a/bdb/include/db_upgrade.h
+++ b/bdb/dbinc/db_upgrade.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_upgrade.h,v 1.5 2000/11/16 23:40:56 ubell Exp $
+ * $Id: db_upgrade.h,v 1.10 2002/01/11 15:52:26 bostic Exp $
*/
#ifndef _DB_UPGRADE_H_
@@ -16,12 +16,65 @@
*/
/* Structures from the 3.1 release */
+typedef struct _dbmeta31 {
+ DB_LSN lsn; /* 00-07: LSN. */
+ db_pgno_t pgno; /* 08-11: Current page number. */
+ u_int32_t magic; /* 12-15: Magic number. */
+ u_int32_t version; /* 16-19: Version. */
+ u_int32_t pagesize; /* 20-23: Pagesize. */
+ u_int8_t unused1[1]; /* 24: Unused. */
+ u_int8_t type; /* 25: Page type. */
+ u_int8_t unused2[2]; /* 26-27: Unused. */
+ u_int32_t free; /* 28-31: Free list page number. */
+ DB_LSN unused3; /* 36-39: Unused. */
+ u_int32_t key_count; /* 40-43: Cached key count. */
+ u_int32_t record_count; /* 44-47: Cached record count. */
+ u_int32_t flags; /* 48-51: Flags: unique to each AM. */
+ /* 52-71: Unique file ID. */
+ u_int8_t uid[DB_FILE_ID_LEN];
+} DBMETA31;
+
+typedef struct _btmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t maxkey; /* 72-75: Btree: Maxkey. */
+ u_int32_t minkey; /* 76-79: Btree: Minkey. */
+ u_int32_t re_len; /* 80-83: Recno: fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Recno: fixed-length record pad. */
+ u_int32_t root; /* 88-92: Root page. */
+
+ /*
+ * Minimum page size is 128.
+ */
+} BTMETA31;
+
+/************************************************************************
+ HASH METADATA PAGE LAYOUT
+ ************************************************************************/
+typedef struct _hashmeta31 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data page header. */
+
+ u_int32_t max_bucket; /* 72-75: ID of Maximum bucket in use */
+ u_int32_t high_mask; /* 76-79: Modulo mask into table */
+ u_int32_t low_mask; /* 80-83: Modulo mask into table lower half */
+ u_int32_t ffactor; /* 84-87: Fill factor */
+ u_int32_t nelem; /* 88-91: Number of keys in hash table */
+ u_int32_t h_charkey; /* 92-95: Value of hash(CHARKEY) */
+#define NCACHED 32 /* number of spare points */
+ /* 96-223: Spare pages for overflow */
+ u_int32_t spares[NCACHED];
+
+ /*
+ * Minimum page size is 256.
+ */
+} HMETA31;
+
/*
* QAM Meta data page structure
*
*/
typedef struct _qmeta31 {
- DBMETA dbmeta; /* 00-71: Generic meta-data header. */
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
u_int32_t start; /* 72-75: Start offset. */
u_int32_t first_recno; /* 76-79: First not deleted record. */
@@ -34,6 +87,21 @@ typedef struct _qmeta31 {
* Minimum page size is 128.
*/
} QMETA31;
+/* Structures from the 3.2 release */
+typedef struct _qmeta32 {
+ DBMETA31 dbmeta; /* 00-71: Generic meta-data header. */
+
+ u_int32_t first_recno; /* 72-75: First not deleted record. */
+ u_int32_t cur_recno; /* 76-79: Last recno allocated. */
+ u_int32_t re_len; /* 80-83: Fixed-length record length. */
+ u_int32_t re_pad; /* 84-87: Fixed-length record pad. */
+ u_int32_t rec_page; /* 88-91: Records Per Page. */
+ u_int32_t page_ext; /* 92-95: Pages per extent */
+
+ /*
+ * Minimum page size is 128.
+ */
+} QMETA32;
/* Structures from the 3.0 release */
@@ -171,4 +239,4 @@ typedef struct hashhdr { /* Disk resident portion */
*/
} HASHHDR;
-#endif
+#endif /* !_DB_UPGRADE_H_ */
diff --git a/bdb/include/db_verify.h b/bdb/dbinc/db_verify.h
index 2507f1f1082..949c9a2a6a1 100644
--- a/bdb/include/db_verify.h
+++ b/bdb/dbinc/db_verify.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: db_verify.h,v 1.18 2000/12/31 17:51:52 bostic Exp $
+ * $Id: db_verify.h,v 1.26 2002/08/06 06:37:08 bostic Exp $
*/
#ifndef _DB_VERIFY_H_
@@ -27,14 +27,17 @@
/* For fatal type errors--i.e., verifier bugs. */
#define TYPE_ERR_PRINT(dbenv, func, pgno, ptype) \
- EPRINT(((dbenv), "%s called on nonsensical page %lu of type %lu", \
- (func), (u_long)(pgno), (u_long)(ptype)));
-
-/* Is x a power of two? (Tests true for zero, which doesn't matter here.) */
-#define POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
-
-#define IS_VALID_PAGESIZE(x) \
- (POWER_OF_TWO(x) && (x) >= DB_MIN_PGSIZE && ((x) <= DB_MAX_PGSIZE))
+ EPRINT(((dbenv), "Page %lu: %s called on nonsensical page of type %lu", \
+ (u_long)(pgno), (func), (u_long)(ptype)));
+
+/* Complain about a totally zeroed page where we don't expect one. */
+#define ZEROPG_ERR_PRINT(dbenv, pgno, str) \
+ do { \
+ EPRINT(((dbenv), "Page %lu: %s is of inappropriate type %lu", \
+ (u_long)(pgno), str, (u_long)P_INVALID)); \
+ EPRINT(((dbenv), "Page %lu: totally zeroed page", \
+ (u_long)(pgno))); \
+ } while (0)
/*
* Note that 0 is, in general, a valid pgno, despite equalling PGNO_INVALID;
@@ -111,12 +114,23 @@ struct __vrfy_dbinfo {
db_pgno_t last_pgno;
db_pgno_t pgs_remaining; /* For dbp->db_feedback(). */
+ /*
+ * These are used during __bam_vrfy_subtree to keep track, while
+ * walking up and down the Btree structure, of the prev- and next-page
+ * chain of leaf pages and verify that it's intact. Also, make sure
+ * that this chain contains pages of only one type.
+ */
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ u_int8_t leaf_type;
+
/* Queue needs these to verify data pages in the first pass. */
u_int32_t re_len;
u_int32_t rec_page;
-#define SALVAGE_PRINTHEADER 0x01
-#define SALVAGE_PRINTFOOTER 0x02
+#define SALVAGE_PRINTABLE 0x01 /* Output printable chars literally. */
+#define SALVAGE_PRINTHEADER 0x02 /* Print the unknown-key header. */
+#define SALVAGE_PRINTFOOTER 0x04 /* Print the unknown-key footer. */
u_int32_t flags;
}; /* VRFY_DBINFO */
@@ -188,4 +202,4 @@ struct __vrfy_childinfo {
LIST_ENTRY(__vrfy_childinfo) links;
}; /* VRFY_CHILDINFO */
-#endif /* _DB_VERIFY_H_ */
+#endif /* !_DB_VERIFY_H_ */
diff --git a/bdb/dbinc/debug.h b/bdb/dbinc/debug.h
new file mode 100644
index 00000000000..21f80387ccc
--- /dev/null
+++ b/bdb/dbinc/debug.h
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: debug.h,v 11.31 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _DB_DEBUG_H_
+#define _DB_DEBUG_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * When running with #DIAGNOSTIC defined, we smash memory and do memory
+ * guarding with a special byte value.
+ */
+#define CLEAR_BYTE 0xdb
+#define GUARD_BYTE 0xdc
+
+/*
+ * DB assertions.
+ */
+#if defined(DIAGNOSTIC) && defined(__STDC__)
+#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__))
+#else
+#define DB_ASSERT(e)
+#endif
+
+/*
+ * Purify and other run-time tools complain about uninitialized reads/writes
+ * of structure fields whose only purpose is padding, as well as when heap
+ * memory that was never initialized is written to disk.
+ */
+#ifdef UMRW
+#define UMRW_SET(v) (v) = 0
+#else
+#define UMRW_SET(v)
+#endif
+
+/*
+ * Error message handling. Use a macro instead of a function because va_list
+ * references to variadic arguments cannot be reset to the beginning of the
+ * variadic argument list (and then rescanned), by functions other than the
+ * original routine that took the variadic list of arguments.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap, fmt); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap, fmt); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#else
+#define DB_REAL_ERR(env, error, error_set, stderr_default, fmt) { \
+ va_list ap; \
+ \
+ /* Call the user's callback function, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errcall != NULL) \
+ __db_errcall(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* Write to the user's file descriptor, if specified. */ \
+ va_start(ap); \
+ if ((env) != NULL && (env)->db_errfile != NULL) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+ \
+ /* \
+ * If we have a default and we didn't do either of the above, \
+ * write to the default. \
+ */ \
+ va_start(ap); \
+ if ((stderr_default) && ((env) == NULL || \
+ ((env)->db_errcall == NULL && (env)->db_errfile == NULL))) \
+ __db_errfile(env, error, error_set, fmt, ap); \
+ va_end(ap); \
+}
+#endif
+
+/*
+ * Debugging macro to log operations.
+ * If DEBUG_WOP is defined, log operations that modify the database.
+ * If DEBUG_ROP is defined, log operations that read the database.
+ *
+ * D dbp
+ * T txn
+ * O operation (string)
+ * K key
+ * A data
+ * F flags
+ */
+#define LOG_OP(C, T, O, K, A, F) { \
+ DB_LSN __lsn; \
+ DBT __op; \
+ if (DBC_LOGGING((C))) { \
+ memset(&__op, 0, sizeof(__op)); \
+ __op.data = O; \
+ __op.size = strlen(O) + 1; \
+ (void)__db_debug_log((C)->dbp->dbenv, T, &__lsn, 0, \
+ &__op, (C)->dbp->log_filename->id, K, A, F); \
+ } \
+}
+#ifdef DEBUG_ROP
+#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LREAD(C, T, O, K, A, F)
+#endif
+#ifdef DEBUG_WOP
+#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
+#else
+#define DEBUG_LWRITE(C, T, O, K, A, F)
+#endif
+
+/*
+ * Hook for testing recovery at various places in the create/delete paths.
+ * Hook for testing subdb locks.
+ */
+#if CONFIG_TEST
+#define DB_TEST_SUBLOCKS(env, flags) \
+do { \
+ if ((env)->test_abort == DB_TEST_SUBDB_LOCKS) \
+ (flags) |= DB_LOCK_NOWAIT; \
+} while (0)
+
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((env)); \
+ if ((env)->test_copy == (val)) { \
+ /* COPY the FILE */ \
+ if ((__ret = __db_testcopy((env), NULL, (name))) != 0) \
+ (ret) = __db_panic((env), __ret); \
+ } \
+ if ((env)->test_abort == (val)) { \
+ /* ABORT the TXN */ \
+ (env)->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY(dbp, val, ret, name) \
+do { \
+ int __ret; \
+ PANIC_CHECK((dbp)->dbenv); \
+ if ((dbp)->dbenv->test_copy == (val)) { \
+ /* Copy the file. */ \
+ if (F_ISSET((dbp), \
+ DB_AM_OPEN_CALLED) && (dbp)->mpf != NULL) \
+ (void)(dbp)->sync((dbp), 0); \
+ if ((__ret = \
+ __db_testcopy((dbp)->dbenv, (dbp), (name))) != 0) \
+ (ret) = __db_panic((dbp)->dbenv, __ret); \
+ } \
+ if ((dbp)->dbenv->test_abort == (val)) { \
+ /* Abort the transaction. */ \
+ (dbp)->dbenv->test_abort = 0; \
+ (ret) = EINVAL; \
+ goto db_tr_err; \
+ } \
+} while (0)
+
+#define DB_TEST_RECOVERY_LABEL db_tr_err:
+#else
+#define DB_TEST_SUBLOCKS(env, flags)
+#define DB_ENV_TEST_RECOVERY(env, val, ret, name)
+#define DB_TEST_RECOVERY(dbp, val, ret, name)
+#define DB_TEST_RECOVERY_LABEL
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_DEBUG_H_ */
diff --git a/bdb/dbinc/fop.h b/bdb/dbinc/fop.h
new file mode 100644
index 00000000000..c438ef7ef40
--- /dev/null
+++ b/bdb/dbinc/fop.h
@@ -0,0 +1,16 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: fop.h,v 11.3 2002/03/27 04:34:54 bostic Exp $
+ */
+
+#ifndef _FOP_H_
+#define _FOP_H_
+
+#include "dbinc_auto/fileops_auto.h"
+#include "dbinc_auto/fileops_ext.h"
+
+#endif /* !_FOP_H_ */
diff --git a/bdb/dbinc/globals.h b/bdb/dbinc/globals.h
new file mode 100644
index 00000000000..3441ade2ea9
--- /dev/null
+++ b/bdb/dbinc/globals.h
@@ -0,0 +1,83 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: globals.h,v 11.1 2002/07/12 18:56:41 bostic Exp $
+ */
+
+/*******************************************************
+ * Global variables.
+ *
+ * Held in a single structure to minimize the name-space pollution.
+ *******************************************************/
+#ifdef HAVE_VXWORKS
+#include "semLib.h"
+#endif
+
+typedef struct __db_globals {
+ u_int32_t no_write_errors; /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ u_int32_t db_global_init; /* VxWorks: inited */
+ SEM_ID db_global_lock; /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ TAILQ_HEAD(__db_envq, __db_env) db_envq;
+
+ int (*j_close) __P((int)); /* Underlying OS interface jump table.*/
+ void (*j_dirfree) __P((char **, int));
+ int (*j_dirlist) __P((const char *, char ***, int *));
+ int (*j_exists) __P((const char *, int *));
+ void (*j_free) __P((void *));
+ int (*j_fsync) __P((int));
+ int (*j_ioinfo) __P((const char *,
+ int, u_int32_t *, u_int32_t *, u_int32_t *));
+ void *(*j_malloc) __P((size_t));
+ int (*j_map) __P((char *, size_t, int, int, void **));
+ int (*j_open) __P((const char *, int, ...));
+ ssize_t (*j_read) __P((int, void *, size_t));
+ void *(*j_realloc) __P((void *, size_t));
+ int (*j_rename) __P((const char *, const char *));
+ int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
+ int (*j_sleep) __P((u_long, u_long));
+ int (*j_unlink) __P((const char *));
+ int (*j_unmap) __P((void *, size_t));
+ ssize_t (*j_write) __P((int, const void *, size_t));
+ int (*j_yield) __P((void));
+} DB_GLOBALS;
+
+#ifdef DB_INITIALIZE_DB_GLOBALS
+DB_GLOBALS __db_global_values = {
+ 0, /* write error testing disallowed */
+#ifdef HAVE_VXWORKS
+ 0, /* VxWorks: initialized */
+ NULL, /* VxWorks: global semaphore */
+#endif
+ /* XA: list of opened environments. */
+ {NULL, &__db_global_values.db_envq.tqh_first},
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#else
+extern DB_GLOBALS __db_global_values;
+#endif
+
+#define DB_GLOBAL(v) __db_global_values.v
diff --git a/bdb/include/hash.h b/bdb/dbinc/hash.h
index 14a88c80b9c..98289735fc4 100644
--- a/bdb/include/hash.h
+++ b/bdb/dbinc/hash.h
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -39,9 +39,12 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: hash.h,v 11.19 2000/12/21 23:05:16 krinsky Exp $
+ * $Id: hash.h,v 11.26 2002/03/27 04:34:54 bostic Exp $
*/
+#ifndef _DB_HASH_H_
+#define _DB_HASH_H_
+
/* Hash internal structure. */
typedef struct hash_t {
db_pgno_t meta_pgno; /* Page number of the meta data page. */
@@ -131,10 +134,14 @@ typedef struct cursor_t {
typedef enum {
DB_HAM_CHGPG = 1,
- DB_HAM_SPLIT = 2,
- DB_HAM_DUP = 3
+ DB_HAM_DELFIRSTPG = 2,
+ DB_HAM_DELMIDPG = 3,
+ DB_HAM_DELLASTPG = 4,
+ DB_HAM_DUP = 5,
+ DB_HAM_SPLIT = 6
} db_ham_mode;
-#include "hash_auto.h"
-#include "hash_ext.h"
-#include "db_am.h"
+#include "dbinc_auto/hash_auto.h"
+#include "dbinc_auto/hash_ext.h"
+#include "dbinc/db_am.h"
+#endif /* !_DB_HASH_H_ */
diff --git a/bdb/dbinc/hmac.h b/bdb/dbinc/hmac.h
new file mode 100644
index 00000000000..16f61fb58ad
--- /dev/null
+++ b/bdb/dbinc/hmac.h
@@ -0,0 +1,32 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: hmac.h,v 1.3 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _DB_HMAC_H_
+#define _DB_HMAC_H_
+
+/*
+ * Algorithm specific information.
+ */
+/*
+ * SHA1 checksumming
+ */
+typedef struct {
+ u_int32_t state[5];
+ u_int32_t count[2];
+ unsigned char buffer[64];
+} SHA1_CTX;
+
+/*
+ * AES assumes the SHA1 checksumming (also called MAC)
+ */
+#define DB_MAC_MAGIC "mac derivation key magic value"
+#define DB_ENC_MAGIC "encryption and decryption key value magic"
+
+#include "dbinc_auto/hmac_ext.h"
+#endif /* !_DB_HMAC_H_ */
diff --git a/bdb/include/lock.h b/bdb/dbinc/lock.h
index e4a01ddf9c7..7ddc9ce9988 100644
--- a/bdb/include/lock.h
+++ b/bdb/dbinc/lock.h
@@ -1,33 +1,58 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: lock.h,v 11.20 2000/12/12 17:43:56 bostic Exp $
+ * $Id: lock.h,v 11.42 2002/05/18 01:34:13 bostic Exp $
*/
+#ifndef _DB_LOCK_H_
+#define _DB_LOCK_H_
+
#define DB_LOCK_DEFAULT_N 1000 /* Default # of locks in region. */
/*
+ * The locker id space is divided between the transaction manager and the lock
+ * manager. Lock IDs start at 1 and go to DB_LOCK_MAXID. Txn IDs start at
+ * DB_LOCK_MAXID + 1 and go up to TXN_MAXIMUM.
+ */
+#define DB_LOCK_INVALIDID 0
+#define DB_LOCK_MAXID 0x7fffffff
+
+/*
* Out of band value for a lock. Locks contain an offset into a lock region,
* so we use an invalid region offset to indicate an invalid or unset lock.
*/
-#define LOCK_INVALID INVALID_ROFF
+#define LOCK_INVALID INVALID_ROFF
+#define LOCK_ISSET(lock) ((lock).off != LOCK_INVALID)
+#define LOCK_INIT(lock) ((lock).off = LOCK_INVALID)
/*
- * The locker id space is divided between the transaction manager and the lock
- * manager. Lock IDs start at 0 and go to DB_LOCK_MAXID. Txn IDs start at
- * DB_LOCK_MAXID + 1 and go up to TXN_INVALID.
+ * Macro to identify a write lock for the purpose of counting locks
+ * for the NUMWRITES option to deadlock detection.
*/
-#define DB_LOCK_MAXID 0x7fffffff
+#define IS_WRITELOCK(m) \
+ ((m) == DB_LOCK_WRITE || (m) == DB_LOCK_IWRITE || (m) == DB_LOCK_IWR)
+
+/*
+ * Lock timers.
+ */
+typedef struct {
+ u_int32_t tv_sec; /* Seconds. */
+ u_int32_t tv_usec; /* Microseconds. */
+} db_timeval_t;
+
+#define LOCK_TIME_ISVALID(time) ((time)->tv_sec != 0)
+#define LOCK_SET_TIME_INVALID(time) ((time)->tv_sec = 0)
+#define LOCK_TIME_EQUAL(t1, t2) \
+ ((t1)->tv_sec == (t2)->tv_sec && (t1)->tv_usec == (t2)->tv_usec)
/*
* DB_LOCKREGION --
* The lock shared region.
*/
typedef struct __db_lockregion {
- u_int32_t id; /* unique id generator */
u_int32_t need_dd; /* flag for deadlock detector */
u_int32_t detect; /* run dd on every conflict */
/* free lock header */
@@ -37,30 +62,23 @@ typedef struct __db_lockregion {
/* free locker header */
SH_TAILQ_HEAD(__flocker) free_lockers;
SH_TAILQ_HEAD(__dobj) dd_objs; /* objects with waiters */
- u_int32_t maxlocks; /* maximum number of locks in table */
- u_int32_t maxlockers; /* maximum number of lockers in table */
- u_int32_t maxobjects; /* maximum number of objects in table */
+ SH_TAILQ_HEAD(__lkrs) lockers; /* list of lockers */
+
+ db_timeout_t lk_timeout; /* timeout for locks. */
+ db_timeout_t tx_timeout; /* timeout for txns. */
+
u_int32_t locker_t_size; /* size of locker hash table */
u_int32_t object_t_size; /* size of object hash table */
- u_int32_t nmodes; /* number of lock modes */
- u_int32_t nlocks; /* current number of locks */
- u_int32_t maxnlocks; /* maximum number of locks so far*/
- u_int32_t nlockers; /* current number of lockers */
- u_int32_t maxnlockers; /* maximum number of lockers so far */
- u_int32_t nobjects; /* current number of objects */
- u_int32_t maxnobjects; /* maximum number of objects so far */
+
roff_t conf_off; /* offset of conflicts array */
roff_t obj_off; /* offset of object hash table */
roff_t osynch_off; /* offset of the object mutex table */
roff_t locker_off; /* offset of locker hash table */
roff_t lsynch_off; /* offset of the locker mutex table */
- u_int32_t nconflicts; /* number of lock conflicts */
- u_int32_t nrequests; /* number of lock gets */
- u_int32_t nreleases; /* number of lock puts */
- u_int32_t nnowaits; /* number of lock requests that would
- have waited without nowait */
- u_int32_t ndeadlocks; /* number of deadlocks */
-#ifdef MUTEX_SYSTEM_RESOURCES
+
+ DB_LOCK_STAT stat; /* stats about locking. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
roff_t maint_off; /* offset of region maintenance info */
#endif
} DB_LOCKREGION;
@@ -98,6 +116,8 @@ typedef struct __db_lockobj {
typedef struct __db_locker {
u_int32_t id; /* Locker id. */
u_int32_t dd_id; /* Deadlock detector id. */
+ u_int32_t nlocks; /* Number of locks held. */
+ u_int32_t nwrites; /* Number of write locks held. */
size_t master_locker; /* Locker of master transaction. */
size_t parent_locker; /* Parent of this child. */
SH_LIST_HEAD(_child) child_locker; /* List of descendant txns;
@@ -106,23 +126,21 @@ typedef struct __db_locker {
SH_LIST_ENTRY child_link; /* Links transactions in the family;
elements of the child_locker
list. */
- SH_TAILQ_ENTRY links; /* Links for free list. */
+ SH_TAILQ_ENTRY links; /* Links for free and hash list. */
+ SH_TAILQ_ENTRY ulinks; /* Links in-use list. */
SH_LIST_HEAD(_held) heldby; /* Locks held by this locker. */
+ db_timeval_t lk_expire; /* When current lock expires. */
+ db_timeval_t tx_expire; /* When this txn expires. */
+ db_timeout_t lk_timeout; /* How long do we let locks live. */
#define DB_LOCKER_DELETED 0x0001
+#define DB_LOCKER_DIRTY 0x0002
+#define DB_LOCKER_INABORT 0x0004
+#define DB_LOCKER_TIMEOUT 0x0008
u_int32_t flags;
} DB_LOCKER;
/*
- * Lockers can be freed if they are not part of a transaction family.
- * Members of a family either point at the master transaction or are
- * the master transaction and have children lockers.
- */
-#define LOCKER_FREEABLE(lp) \
- ((lp)->master_locker == TXN_INVALID_ID && \
- SH_LIST_FIRST(&(lp)->child_locker, __db_locker) == NULL)
-
-/*
* DB_LOCKTAB --
* The primary library lock data structure (i.e., the one referenced
* by the environment, as opposed to the internal one laid out in the region.)
@@ -137,7 +155,7 @@ typedef struct __db_locktab {
/* Test for conflicts. */
#define CONFLICTS(T, R, HELD, WANTED) \
- (T)->conflicts[(HELD) * (R)->nmodes + (WANTED)]
+ (T)->conflicts[(HELD) * (R)->stat.st_nmodes + (WANTED)]
#define OBJ_LINKS_VALID(L) ((L)->links.stqe_prev != -1)
@@ -146,7 +164,7 @@ struct __db_lock {
* Wait on mutex to wait on lock. You reference your own mutex with
* ID 0 and others reference your mutex with ID 1.
*/
- MUTEX mutex;
+ DB_MUTEX mutex;
u_int32_t holder; /* Who holds this lock. */
u_int32_t gen; /* Generation count. */
@@ -167,13 +185,15 @@ struct __db_lock {
* DB_LOCK_NOPROMOTE: Don't bother running promotion when releasing locks
* (used by __lock_put_internal).
* DB_LOCK_UNLINK: Remove from the locker links (used in checklocker).
+ * Make sure that these do not conflict with the interface flags because
+ * we pass some of those around (i.e., DB_LOCK_REMOVE).
*/
-#define DB_LOCK_DOALL 0x001
-#define DB_LOCK_FREE 0x002
-#define DB_LOCK_IGNOREDEL 0x004
-#define DB_LOCK_NOPROMOTE 0x008
-#define DB_LOCK_UNLINK 0x010
-#define DB_LOCK_NOWAITERS 0x020
+#define DB_LOCK_DOALL 0x010000
+#define DB_LOCK_FREE 0x020000
+#define DB_LOCK_IGNOREDEL 0x040000
+#define DB_LOCK_NOPROMOTE 0x080000
+#define DB_LOCK_UNLINK 0x100000
+#define DB_LOCK_NOWAITERS 0x200000
/*
* Macros to get/release different types of mutexes.
@@ -187,4 +207,6 @@ struct __db_lock {
#define LOCKREGION(dbenv, lt) R_LOCK((dbenv), &(lt)->reginfo)
#define UNLOCKREGION(dbenv, lt) R_UNLOCK((dbenv), &(lt)->reginfo)
-#include "lock_ext.h"
+
+#include "dbinc_auto/lock_ext.h"
+#endif /* !_DB_LOCK_H_ */
diff --git a/bdb/dbinc/log.h b/bdb/dbinc/log.h
new file mode 100644
index 00000000000..434994528ea
--- /dev/null
+++ b/bdb/dbinc/log.h
@@ -0,0 +1,273 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: log.h,v 11.60 2002/08/06 06:37:08 bostic Exp $
+ */
+
+#ifndef _LOG_H_
+#define _LOG_H_
+
+struct __db_log; typedef struct __db_log DB_LOG;
+struct __hdr; typedef struct __hdr HDR;
+struct __log; typedef struct __log LOG;
+struct __log_persist; typedef struct __log_persist LOGP;
+
+#define LFPREFIX "log." /* Log file name prefix. */
+#define LFNAME "log.%010d" /* Log file name template. */
+#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */
+
+#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */
+#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */
+#define LG_BASE_REGION_SIZE (60 * 1024) /* 60 KB. */
+
+/*
+ * The per-process table that maps log file-id's to DB structures.
+ */
+typedef struct __db_entry {
+ DB *dbp; /* Open dbp for this file id. */
+ int deleted; /* File was not found during open. */
+} DB_ENTRY;
+
+/*
+ * DB_LOG
+ * Per-process log structure.
+ */
+struct __db_log {
+/*
+ * These fields need to be protected for multi-threaded support.
+ *
+ * !!!
+ * As this structure is allocated in per-process memory, the mutex may need
+ * to be stored elsewhere on architectures unable to support mutexes in heap
+ * memory, e.g., HP/UX 9.
+ */
+ DB_MUTEX *mutexp; /* Mutex for thread protection. */
+
+ DB_ENTRY *dbentry; /* Recovery file-id mapping. */
+#define DB_GROW_SIZE 64
+ int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */
+
+/*
+ * These fields are always accessed while the region lock is held, so they do
+ * not have to be protected by the thread lock as well, OR, they are only used
+ * when threads are not being used, i.e. most cursor operations are disallowed
+ * on threaded logs.
+ */
+ u_int32_t lfname; /* Log file "name". */
+ DB_FH lfh; /* Log file handle. */
+
+ u_int8_t *bufp; /* Region buffer. */
+
+/* These fields are not protected. */
+ DB_ENV *dbenv; /* Reference to error information. */
+ REGINFO reginfo; /* Region information. */
+
+#define DBLOG_RECOVER 0x01 /* We are in recovery. */
+#define DBLOG_FORCE_OPEN 0x02 /* Force the DB open even if it appears
+ * to be deleted.
+ */
+ u_int32_t flags;
+};
+
+/*
+ * HDR --
+ * Log record header.
+ */
+struct __hdr {
+ u_int32_t prev; /* Previous offset. */
+ u_int32_t len; /* Current length. */
+ u_int8_t chksum[DB_MAC_KEY]; /* Current checksum. */
+ u_int8_t iv[DB_IV_BYTES]; /* IV */
+ u_int32_t orig_size; /* Original size of log record */
+ /* !!! - 'size' is not written to log, must be last in hdr */
+ size_t size; /* Size of header to use */
+};
+
+/*
+ * We use HDR internally, and then when we write out, we write out
+ * prev, len, and then a 4-byte checksum if normal operation or
+ * a crypto-checksum and IV and original size if running in crypto
+ * mode. We must store the original size in case we pad. Set the
+ * size when we set up the header. We compute a DB_MAC_KEY size
+ * checksum regardless, but we can safely just use the first 4 bytes.
+ */
+#define HDR_NORMAL_SZ 12
+#define HDR_CRYPTO_SZ 12 + DB_MAC_KEY + DB_IV_BYTES
+
+struct __log_persist {
+ u_int32_t magic; /* DB_LOGMAGIC */
+ u_int32_t version; /* DB_LOGVERSION */
+
+ u_int32_t log_size; /* Log file size. */
+ u_int32_t mode; /* Log file mode. */
+};
+
+/*
+ * LOG --
+ * Shared log region. One of these is allocated in shared memory,
+ * and describes the log.
+ */
+struct __log {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * flush_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX fq_mutex; /* Mutex guarding file name list. */
+
+ LOGP persist; /* Persistent information. */
+
+ SH_TAILQ_HEAD(__fq1) fq; /* List of file names. */
+ int32_t fid_max; /* Max fid allocated. */
+ roff_t free_fid_stack; /* Stack of free file ids. */
+ int free_fids; /* Height of free fid stack. */
+ int free_fids_alloced; /* Number of free fid slots alloc'ed. */
+
+ /*
+ * The lsn LSN is the file offset that we're about to write and which
+ * we will return to the user.
+ */
+ DB_LSN lsn; /* LSN at current file offset. */
+
+ /*
+ * The f_lsn LSN is the LSN (returned to the user) that "owns" the
+ * first byte of the buffer. If the record associated with the LSN
+ * spans buffers, it may not reflect the physical file location of
+ * the first byte of the buffer.
+ */
+ DB_LSN f_lsn; /* LSN of first byte in the buffer. */
+ size_t b_off; /* Current offset in the buffer. */
+ u_int32_t w_off; /* Current write offset in the file. */
+ u_int32_t len; /* Length of the last record. */
+
+ /*
+ * The s_lsn LSN is the last LSN that we know is on disk, not just
+ * written, but synced. This field is protected by the flush mutex
+ * rather than by the region mutex.
+ */
+ int in_flush; /* Log flush in progress. */
+ roff_t flush_mutex_off; /* Mutex guarding flushing. */
+ DB_LSN s_lsn; /* LSN of the last sync. */
+
+ DB_LOG_STAT stat; /* Log statistics. */
+
+ /*
+ * The waiting_lsn is used by the replication system. It is the
+ * first LSN that we are holding without putting in the log, because
+ * we received one or more log records out of order. Associated with
+ * the waiting_lsn is the number of log records that we still have to
+ * receive before we decide that we should request it again.
+ */
+ DB_LSN waiting_lsn; /* First log record after a gap. */
+ DB_LSN verify_lsn; /* LSN we are waiting to verify. */
+ u_int32_t wait_recs; /* Records to wait before requesting. */
+ u_int32_t rcvd_recs; /* Records received while waiting. */
+
+ /*
+ * The ready_lsn is also used by the replication system. It is the
+ * next LSN we expect to receive. It's normally equal to "lsn",
+ * except at the beginning of a log file, at which point it's set
+ * to the LSN of the first record of the new file (after the
+ * header), rather than to 0.
+ */
+ DB_LSN ready_lsn;
+
+ /*
+ * During initialization, the log system walks forward through the
+ * last log file to find its end. If it runs into a checkpoint
+ * while it's doing so, it caches it here so that the transaction
+ * system doesn't need to walk through the file again on its
+ * initialization.
+ */
+ DB_LSN cached_ckp_lsn;
+
+ roff_t buffer_off; /* Log buffer offset in the region. */
+ u_int32_t buffer_size; /* Log buffer size. */
+
+ u_int32_t log_size; /* Log file's size. */
+ u_int32_t log_nsize; /* Next log file's size. */
+
+ u_int32_t ncommit; /* Number of txns waiting to commit. */
+
+ DB_LSN t_lsn; /* LSN of first commit */
+ SH_TAILQ_HEAD(__commit) commits;/* list of txns waiting to commit. */
+ SH_TAILQ_HEAD(__free) free_commits;/* free list of commit structs. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define LG_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
+
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
+
+/*
+ * __db_commit structure --
+ * One of these is allocated for each transaction waiting
+ * to commit.
+ */
+struct __db_commit {
+ DB_MUTEX mutex; /* Mutex for txn to wait on. */
+ DB_LSN lsn; /* LSN of commit record. */
+ SH_TAILQ_ENTRY links; /* Either on free or waiting list. */
+
+#define DB_COMMIT_FLUSH 0x0001 /* Flush the log when you wake up. */
+ u_int32_t flags;
+};
+
+/*
+ * FNAME --
+ * File name and id.
+ */
+struct __fname {
+ SH_TAILQ_ENTRY q; /* File name queue. */
+
+ int32_t id; /* Logging file id. */
+ DBTYPE s_type; /* Saved DB type. */
+
+ roff_t name_off; /* Name offset. */
+ db_pgno_t meta_pgno; /* Page number of the meta page. */
+ u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */
+
+ u_int32_t create_txnid; /*
+ * Txn ID of the DB create, stored so
+ * we can log it at register time.
+ */
+};
+
+/* File open/close register log record opcodes. */
+#define LOG_CHECKPOINT 1 /* Checkpoint: file name/id dump. */
+#define LOG_CLOSE 2 /* File close. */
+#define LOG_OPEN 3 /* File open. */
+#define LOG_RCLOSE 4 /* File close after recovery. */
+
+#define CHECK_LSN(redo, cmp, lsn, prev) \
+ DB_ASSERT(!DB_REDO(redo) || \
+ (cmp) >= 0 || IS_NOT_LOGGED_LSN(*lsn)); \
+ if (DB_REDO(redo) && (cmp) < 0 && !IS_NOT_LOGGED_LSN(*(lsn))) { \
+ __db_err(dbenv, \
+ "Log sequence error: page LSN %lu %lu; previous LSN %lu %lu", \
+ (u_long)(lsn)->file, (u_long)(lsn)->offset, \
+ (u_long)(prev)->file, (u_long)(prev)->offset); \
+ goto out; \
+ }
+
+/*
+ * Status codes indicating the validity of a log file examined by
+ * __log_valid().
+ */
+typedef enum {
+ DB_LV_INCOMPLETE,
+ DB_LV_NONEXISTENT,
+ DB_LV_NORMAL,
+ DB_LV_OLD_READABLE,
+ DB_LV_OLD_UNREADABLE
+} logfile_validity;
+
+#include "dbinc_auto/dbreg_auto.h"
+#include "dbinc_auto/dbreg_ext.h"
+#include "dbinc_auto/log_ext.h"
+#endif /* !_LOG_H_ */
diff --git a/bdb/dbinc/mp.h b/bdb/dbinc/mp.h
new file mode 100644
index 00000000000..5c805b92364
--- /dev/null
+++ b/bdb/dbinc/mp.h
@@ -0,0 +1,293 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: mp.h,v 11.44 2002/08/06 06:11:21 bostic Exp $
+ */
+
+#ifndef _DB_MP_H_
+#define _DB_MP_H_
+
+struct __bh; typedef struct __bh BH;
+struct __db_mpool_hash; typedef struct __db_mpool_hash DB_MPOOL_HASH;
+struct __db_mpreg; typedef struct __db_mpreg DB_MPREG;
+struct __mpool; typedef struct __mpool MPOOL;
+
+ /* We require at least 20KB of cache. */
+#define DB_CACHESIZE_MIN (20 * 1024)
+
+typedef enum {
+ DB_SYNC_ALLOC, /* Flush for allocation. */
+ DB_SYNC_CACHE, /* Checkpoint or flush entire cache. */
+ DB_SYNC_FILE, /* Flush file. */
+ DB_SYNC_TRICKLE /* Trickle sync. */
+} db_sync_op;
+
+/*
+ * DB_MPOOL --
+ * Per-process memory pool structure.
+ */
+struct __db_mpool {
+ /* These fields need to be protected for multi-threaded support. */
+ DB_MUTEX *mutexp; /* Structure thread lock. */
+
+ /* List of pgin/pgout routines. */
+ LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
+
+ /* List of DB_MPOOLFILE's. */
+ TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;
+
+ /*
+ * The dbenv, nreg and reginfo fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ DB_ENV *dbenv; /* Enclosing environment. */
+
+ u_int32_t nreg; /* N underlying cache regions. */
+ REGINFO *reginfo; /* Underlying cache regions. */
+};
+
+/*
+ * DB_MPREG --
+ * DB_MPOOL registry of pgin/pgout functions.
+ */
+struct __db_mpreg {
+ LIST_ENTRY(__db_mpreg) q; /* Linked list. */
+
+ int32_t ftype; /* File type. */
+ /* Pgin, pgout routines. */
+ int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
+};
+
+/*
+ * NCACHE --
+ * Select a cache based on the file and the page number. Assumes accesses
+ * are uniform across pages, which is probably OK. What we really want to
+ * avoid is anything that puts all pages from any single file in the same
+ * cache, as we expect that file access will be bursty, and to avoid
+ * putting all page number N pages in the same cache as we expect access
+ * to the metapages (page 0) and the root of a btree (page 1) to be much
+ * more frequent than a random data page.
+ */
+#define NCACHE(mp, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) >> 3)) % ((MPOOL *)mp)->nreg)
+
+/*
+ * NBUCKET --
+ * We make the assumption that early pages of the file are more likely
+ * to be retrieved than the later pages, which means the top bits will
+ * be more interesting for hashing as they're less likely to collide.
+ * That said, as 512 8K pages represents a 4MB file, so only reasonably
+ * large files will have page numbers with any other than the bottom 9
+ * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the
+ * page, since that should also be unique for the page. We don't want
+ * to do anything very fancy -- speed is more important to us than using
+ * good hashing.
+ */
+#define NBUCKET(mc, mf_offset, pgno) \
+ (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
+
+/*
+ * MPOOL --
+ * Shared memory pool region.
+ */
+struct __mpool {
+ /*
+ * The memory pool can be broken up into individual pieces/files.
+ * Not what we would have liked, but on Solaris you can allocate
+ * only a little more than 2GB of memory in a contiguous chunk,
+ * and I expect to see more systems with similar issues.
+ *
+ * While this structure is duplicated in each piece of the cache,
+ * the first of these pieces/files describes the entire pool, the
+ * second only describe a piece of the cache.
+ */
+
+ /*
+ * The lsn field and list of underlying MPOOLFILEs are thread protected
+ * by the region lock.
+ */
+ DB_LSN lsn; /* Maximum checkpoint LSN. */
+
+ SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */
+
+ /*
+ * The nreg, regids and maint_off fields are not thread protected,
+ * as they are initialized during mpool creation, and not modified
+ * again.
+ */
+ u_int32_t nreg; /* Number of underlying REGIONS. */
+ roff_t regids; /* Array of underlying REGION Ids. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ roff_t maint_off; /* Maintenance information offset */
+#endif
+
+ /*
+ * The following structure fields only describe the per-cache portion
+ * of the region.
+ *
+ * The htab and htab_buckets fields are not thread protected as they
+ * are initialized during mpool creation, and not modified again.
+ *
+ * The last_checked and lru_count fields are thread protected by
+ * the region lock.
+ */
+ int htab_buckets; /* Number of hash table entries. */
+ roff_t htab; /* Hash table offset. */
+ u_int32_t last_checked; /* Last bucket checked for free. */
+ u_int32_t lru_count; /* Counter for buffer LRU */
+
+ /*
+ * The stat fields are generally not thread protected, and cannot be
+ * trusted. Note that st_pages is an exception, and is always updated
+ * inside a region lock (although it is sometimes read outside of the
+ * region lock).
+ */
+ DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */
+};
+
+struct __db_mpool_hash {
+ DB_MUTEX hash_mutex; /* Per-bucket mutex. */
+
+ DB_HASHTAB hash_bucket; /* Head of bucket. */
+
+ u_int32_t hash_page_dirty;/* Count of dirty pages. */
+ u_int32_t hash_priority; /* Minimum priority of bucket buffer. */
+};
+
+/*
+ * The base mpool priority is 1/4th of the name space, or just under 2^30.
+ * When the LRU counter wraps, we shift everybody down to a base-relative
+ * value.
+ */
+#define MPOOL_BASE_DECREMENT (UINT32_T_MAX - (UINT32_T_MAX / 4))
+
+/*
+ * Mpool priorities from low to high. Defined in terms of fractions of the
+ * buffers in the pool.
+ */
+#define MPOOL_PRI_VERY_LOW -1 /* Dead duck. Check and set to 0. */
+#define MPOOL_PRI_LOW -2 /* Low. */
+#define MPOOL_PRI_DEFAULT 0 /* No adjustment -- special case.*/
+#define MPOOL_PRI_HIGH 10 /* With the dirty buffers. */
+#define MPOOL_PRI_DIRTY 10 /* Dirty gets a 10% boost. */
+#define MPOOL_PRI_VERY_HIGH 1 /* Add number of buffers in pool. */
+
+/*
+ * MPOOLFILE_IGNORE --
+ * Discard an MPOOLFILE and any buffers it references: update the flags
+ * so we never try to write buffers associated with the file, nor can we
+ * find it when looking for files to join. In addition, clear the ftype
+ * field, there's no reason to post-process pages, they can be discarded
+ * by any thread.
+ *
+ * Expects the MPOOLFILE mutex to be held.
+ */
+#define MPOOLFILE_IGNORE(mfp) { \
+ (mfp)->ftype = 0; \
+ F_SET(mfp, MP_DEADFILE); \
+}
+
+/*
+ * MPOOLFILE --
+ * Shared DB_MPOOLFILE information.
+ */
+struct __mpoolfile {
+ DB_MUTEX mutex;
+
+ /* Protected by MPOOLFILE mutex. */
+ u_int32_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */
+ u_int32_t block_cnt; /* Ref count: blocks in cache. */
+
+ roff_t path_off; /* File name location. */
+
+ /* Protected by mpool cache 0 region lock. */
+ SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */
+ db_pgno_t last_pgno; /* Last page in the file. */
+ db_pgno_t orig_last_pgno; /* Original last page in the file. */
+
+ /*
+ * None of the following fields are thread protected.
+ *
+ * There are potential races with the ftype field because it's read
+ * without holding a lock. However, it has to be set before adding
+ * any buffers to the cache that depend on it being set, so there
+ * would need to be incorrect operation ordering to have a problem.
+ *
+ * There are potential races with the priority field because it's read
+ * without holding a lock. However, a collision is unlikely and if it
+ * happens is of little consequence.
+ *
+ * We do not protect the statistics in "stat" because of the cost of
+ * the mutex in the get/put routines. There is a chance that a count
+ * will get lost.
+ *
+ * The remaining fields are initialized at open and never subsequently
+ * modified, except for the MP_DEADFILE, which is only set and never
+ * unset. (If there was more than one flag that was subsequently set,
+ * there might be a race, but with a single flag there can't be.)
+ */
+ int32_t ftype; /* File type. */
+
+ int32_t priority; /* Priority when unpinning buffer. */
+
+ DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */
+
+ int32_t lsn_off; /* Page's LSN offset. */
+ u_int32_t clear_len; /* Bytes to clear on page create. */
+
+ roff_t fileid_off; /* File ID string location. */
+
+ roff_t pgcookie_len; /* Pgin/pgout cookie length. */
+ roff_t pgcookie_off; /* Pgin/pgout cookie location. */
+
+#define MP_CAN_MMAP 0x01 /* If the file can be mmap'd. */
+#define MP_DEADFILE 0x02 /* Dirty pages can simply be trashed. */
+#define MP_DIRECT 0x04 /* No OS buffering. */
+#define MP_EXTENT 0x08 /* Extent file. */
+#define MP_TEMP 0x10 /* Backing file is a temporary. */
+#define MP_UNLINK 0x20 /* Unlink file on last close. */
+ u_int32_t flags;
+};
+
+/*
+ * BH --
+ * Buffer header.
+ */
+struct __bh {
+ DB_MUTEX mutex; /* Buffer thread/process lock. */
+
+ u_int16_t ref; /* Reference count. */
+ u_int16_t ref_sync; /* Sync wait-for reference count. */
+
+#define BH_CALLPGIN 0x001 /* Convert the page before use. */
+#define BH_DIRTY 0x002 /* Page was modified. */
+#define BH_DIRTY_CREATE 0x004 /* Page created, must be written. */
+#define BH_DISCARD 0x008 /* Page is useless. */
+#define BH_LOCKED 0x010 /* Page is locked (I/O in progress). */
+#define BH_TRASH 0x020 /* Page is garbage. */
+ u_int16_t flags;
+
+ u_int32_t priority; /* LRU priority. */
+ SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */
+
+ db_pgno_t pgno; /* Underlying MPOOLFILE page number. */
+ roff_t mf_offset; /* Associated MPOOLFILE offset. */
+
+ /*
+ * !!!
+ * This array must be at least size_t aligned -- the DB access methods
+ * put PAGE and other structures into it, and then access them directly.
+ * (We guarantee size_t alignment to applications in the documentation,
+ * too.)
+ */
+ u_int8_t buf[1]; /* Variable length data. */
+};
+
+#include "dbinc_auto/mp_ext.h"
+#endif /* !_DB_MP_H_ */
diff --git a/bdb/include/mutex.h b/bdb/dbinc/mutex.h
index 4c1b265355d..41bb1b4bb59 100644
--- a/bdb/include/mutex.h
+++ b/bdb/dbinc/mutex.h
@@ -1,12 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: mutex.h,v 11.41 2000/12/22 19:28:15 bostic Exp $
+ * $Id: mutex.h,v 11.71 2002/09/10 01:36:48 bostic Exp $
*/
+#ifndef _DB_MUTEX_H_
+#define _DB_MUTEX_H_
+
/*
* Some of the Berkeley DB ports require single-threading at various
* places in the code. In those cases, these #defines will be set.
@@ -14,16 +17,6 @@
#define DB_BEGIN_SINGLE_THREAD
#define DB_END_SINGLE_THREAD
-/*
- * When the underlying system mutexes require system resources, we have
- * to clean up after application failure. This violates the rule that
- * we never look at a shared region after a failure, but there's no other
- * choice. In those cases, this #define is set.
- */
-#ifdef HAVE_QNX
-#define MUTEX_SYSTEM_RESOURCES
-#endif
-
/*********************************************************************
* POSIX.1 pthreads interface.
*********************************************************************/
@@ -239,15 +232,13 @@ typedef unsigned char tsl_t;
* when we're first looking for a DB environment.
*********************************************************************/
#ifdef HAVE_MUTEX_VXWORKS
-#define MUTEX_SYSTEM_RESOURCES
-
-#include "semLib.h"
+#include "taskLib.h"
typedef SEM_ID tsl_t;
#define MUTEX_ALIGN sizeof(unsigned int)
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
-#define MUTEX_UNSET(tsl) (semGive((*tsl)) == OK)
+#define MUTEX_UNSET(tsl) (semGive((*tsl)))
#define MUTEX_INIT(tsl) \
((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
#define MUTEX_DESTROY(tsl) semDelete(*tsl)
@@ -305,13 +296,18 @@ typedef unsigned int tsl_t;
* Win32
*********************************************************************/
#ifdef HAVE_MUTEX_WIN32
-typedef unsigned int tsl_t;
-#define MUTEX_ALIGN sizeof(unsigned int)
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-#define MUTEX_INIT(x) 0
+#define MUTEX_FIELDS \
+ LONG tas; \
+ LONG nwaiters; \
+ union { \
+ HANDLE event; /* Windows event HANDLE for wakeups */ \
+ u_int32_t id; /* ID used for shared mutexes */ \
+ } /* anonymous */;
+
+#if defined(LOAD_ACTUAL_MUTEX_CODE)
#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
#endif
#endif
@@ -394,6 +390,31 @@ MUTEX_UNSET(tsl_t *tsl) {
#endif
/*********************************************************************
+ * ARM/gcc assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
+typedef unsigned char tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For arm/gcc, 0 is clear, 1 is set.
+ */
+#define MUTEX_SET(tsl) ({ \
+ int __r; \
+ asm volatile("swpb %0, %1, [%2]" \
+ : "=r" (__r) \
+ : "0" (1), "r" (tsl) \
+ : "memory" \
+ ); \
+ __r & 1; \
+})
+
+#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
* HPPA/gcc assembly.
*********************************************************************/
#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
@@ -414,7 +435,7 @@ typedef u_int32_t tsl_t;
})
#define MUTEX_UNSET(tsl) (*(tsl) = -1)
-#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
@@ -447,7 +468,8 @@ typedef unsigned char tsl_t;
/*********************************************************************
* PowerPC/gcc assembly.
*********************************************************************/
-#ifdef HAVE_MUTEX_PPC_GCC_ASSEMBLY
+#if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) || \
+ (HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
typedef u_int32_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
@@ -480,8 +502,38 @@ typedef u_int32_t tsl_t;
* common case of a locked mutex without wasting cycles making a reservation.
*
* 'set' mutexes have the value 1, like on Intel; the returned value from
- * MUTEX_SET() is 1 if the mutex previously had its low bit set, 0 otherwise.
+ * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
+ *
+ * Mutexes on Mac OS X work the same way as the standard PowerPC version, but
+ * the assembler syntax is subtly different -- the standard PowerPC version
+ * assembles but doesn't work correctly. This version makes (unnecessary?)
+ * use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the
+ * ___db_mutex_set label is used as a function name.
*/
+#ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
+extern int __db_mutex_set __P((volatile tsl_t *));
+void
+__db_mutex_tas_dummy()
+{
+ __asm__ __volatile__(" \n\
+ .globl ___db_mutex_set \n\
+___db_mutex_set: \n\
+ lwarx r5,0,r3 \n\
+ cmpwi r5,0 \n\
+ bne fail \n\
+ addi r5,r5,1 \n\
+ stwcx. r5,0,r3 \n\
+ beq success \n\
+fail: \n\
+ li r3,0 \n\
+ blr \n\
+success: \n\
+ li r3,1 \n\
+ blr");
+}
+#define MUTEX_SET(tsl) __db_mutex_set(tsl)
+#endif
+#ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
#define MUTEX_SET(tsl) ({ \
int __one = 1; \
int __r; \
@@ -496,8 +548,38 @@ typedef u_int32_t tsl_t;
1:" \
: "=&r" (__r) \
: "r" (__l), "r" (__one)); \
- __r & 1; \
+ !(__r & 1); \
})
+#endif
+#define MUTEX_UNSET(tsl) (*(tsl) = 0)
+#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
+#endif
+#endif
+
+/*********************************************************************
+ * S/390 32-bit assembly.
+ *********************************************************************/
+#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
+typedef int tsl_t;
+
+#ifdef LOAD_ACTUAL_MUTEX_CODE
+/*
+ * For gcc/S390, 0 is clear, 1 is set.
+ */
+static inline int
+MUTEX_SET(tsl_t *tsl) { \
+ register tsl_t *__l = (tsl); \
+ int __r; \
+ asm volatile( \
+ " la 1,%1\n" \
+ " lhi 0,1\n" \
+ " l %0,%1\n" \
+ "0: cs %0,0,0(1)\n" \
+ " jl 0b" \
+ : "=&d" (__r), "+m" (*__l) \
+ : : "0", "1", "cc"); \
+ return !__r; \
+}
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
@@ -552,9 +634,9 @@ typedef unsigned char tsl_t;
* argument is never read, but only overwritten.)
*
* The stbar is needed for v8, and is implemented as membar #sync on v9,
- + so is functional there as well. For v7, stbar may generate an illegal
- + instruction and we have no way to tell what we're running on. Some
- + operating systems notice and skip this instruction in the fault handler.
+ * so is functional there as well. For v7, stbar may generate an illegal
+ * instruction and we have no way to tell what we're running on. Some
+ * operating systems notice and skip this instruction in the fault handler.
*
* For gcc/sparc, 0 is clear, 1 is set.
*/
@@ -611,28 +693,6 @@ typedef unsigned char tsl_t;
#endif
#endif
-#ifdef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
-typedef unsigned char tsl_t;
-
-#ifdef LOAD_ACTUAL_MUTEX_CODE
-/*
- * For gcc/x86, 0 is clear, 1 is set.
- */
-#define MUTEX_SET(tsl) ({ \
- register tsl_t *__l = (tsl); \
- int __r; \
- asm volatile("mov $1,%%rax; lock; xchgb %1,%%al; xor $1,%%rax"\
- : "=&a" (__r), "=m" (*__l) \
- : "1" (*__l) \
- ); \
- __r & 1; \
-})
-
-#define MUTEX_UNSET(tsl) (*(tsl) = 0)
-#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
-#endif
-#endif
-
/*
* Mutex alignment defaults to one byte.
*
@@ -655,10 +715,35 @@ typedef unsigned char tsl_t;
#endif
#endif
+/*
+ * !!!
+ * These defines are separated into the u_int8_t flags stored in the
+ * mutex below, and the 32 bit flags passed to __db_mutex_setup.
+ * But they must co-exist and not overlap. Flags to __db_mutex_setup are:
+ *
+ * MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated.
+ * The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever
+ * you use this flag. If this flag is not set, the 'ptr' arg is
+ * a DB_MUTEX *.
+ * MUTEX_NO_RECORD - Explicitly do not record the mutex in the region.
+ * Otherwise the mutex will be recorded by default. If you set
+ * this you need to understand why you don't need it recorded. The
+ * *only* ones not recorded are those that are part of region structures
+ * that only get destroyed when the regions are destroyed.
+ * MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise
+ * the region will be locked by default.
+ * MUTEX_SELF_BLOCK - Set if self blocking mutex.
+ * MUTEX_THREAD - Set if mutex is a thread-only mutex.
+ */
#define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
#define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
-#define MUTEX_SELF_BLOCK 0x004 /* Must block self. */
-#define MUTEX_THREAD 0x008 /* Thread-only mutex. */
+#define MUTEX_MPOOL 0x004 /* Allocated from mpool. */
+#define MUTEX_SELF_BLOCK 0x008 /* Must block self. */
+/* Flags only, may be larger than 0xff. */
+#define MUTEX_ALLOC 0x00000100 /* Allocate and init a mutex */
+#define MUTEX_NO_RECORD 0x00000200 /* Do not record lock */
+#define MUTEX_NO_RLOCK 0x00000400 /* Do not acquire region lock */
+#define MUTEX_THREAD 0x00000800 /* Thread-only mutex. */
/* Mutex. */
struct __mutex_t {
@@ -676,7 +761,9 @@ struct __mutex_t {
#endif
u_int32_t mutex_set_wait; /* Granted after wait. */
u_int32_t mutex_set_nowait; /* Granted without waiting. */
-#ifdef MUTEX_SYSTEM_RESOURCES
+ u_int32_t mutex_set_spin; /* Granted without spinning. */
+ u_int32_t mutex_set_spins; /* Total number of spins. */
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
roff_t reg_off; /* Shared lock info offset. */
#endif
@@ -685,36 +772,43 @@ struct __mutex_t {
/* Redirect calls to the correct functions. */
#ifdef HAVE_MUTEX_THREADS
-#if defined(HAVE_MUTEX_PTHREADS) || defined(HAVE_MUTEX_SOLARIS_LWP) || defined(HAVE_MUTEX_UI_THREADS)
-#define __db_mutex_init(a, b, c, d) __db_pthread_mutex_init(a, b, d)
-#define __db_mutex_lock(a, b, c) __db_pthread_mutex_lock(a, b)
+#if defined(HAVE_MUTEX_PTHREADS) || \
+ defined(HAVE_MUTEX_SOLARIS_LWP) || \
+ defined(HAVE_MUTEX_UI_THREADS)
+#define __db_mutex_init_int(a, b, c, d) __db_pthread_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b)
#define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
#define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
+#elif defined(HAVE_MUTEX_WIN32)
+#define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b)
+#define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b)
+#define __db_mutex_destroy(a) __db_win32_mutex_destroy(a)
#else
-#define __db_mutex_init(a, b, c, d) __db_tas_mutex_init(a, b, d)
-#define __db_mutex_lock(a, b, c) __db_tas_mutex_lock(a, b)
+#define __db_mutex_init_int(a, b, c, d) __db_tas_mutex_init(a, b, d)
+#define __db_mutex_lock(a, b) __db_tas_mutex_lock(a, b)
#define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
#define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
#endif
#else
-#define __db_mutex_init(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
-#define __db_mutex_lock(a, b, c) __db_fcntl_mutex_lock(a, b, c)
+#define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
+#define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b)
#define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
#define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
#endif
/* Redirect system resource calls to correct functions */
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
#define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
#define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
#define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
-#define __db_shmutex_init(a, b, c, d, e, f) \
+#define __db_mutex_init(a, b, c, d, e, f) \
__db_shreg_mutex_init(a, b, c, d, e, f)
#else
#define __db_maintinit(a, b, c)
#define __db_shlocks_clear(a, b, c)
#define __db_shlocks_destroy(a, b)
-#define __db_shmutex_init(a, b, c, d, e, f) __db_mutex_init(a, b, c, d)
+#define __db_mutex_init(a, b, c, d, e, f) __db_mutex_init_int(a, b, c, d)
#endif
/*
@@ -731,22 +825,22 @@ struct __mutex_t {
* We want to switch threads as often as possible. Yield every time
* we get a mutex to ensure contention.
*/
-#define MUTEX_LOCK(dbenv, mp, fh) \
- if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
- (void)__db_mutex_lock(dbenv, mp, fh); \
- if (DB_GLOBAL(db_pageyield)) \
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0); \
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) \
__os_yield(NULL, 1);
#else
-#define MUTEX_LOCK(dbenv, mp, fh) \
- if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
- (void)__db_mutex_lock(dbenv, mp, fh);
+#define MUTEX_LOCK(dbenv, mp) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
+ (void)__db_mutex_lock(dbenv, mp);
#endif
#define MUTEX_UNLOCK(dbenv, mp) \
- if (!F_ISSET((MUTEX *)(mp), MUTEX_IGNORE)) \
+ if (!F_ISSET((mp), MUTEX_IGNORE)) \
(void)__db_mutex_unlock(dbenv, mp);
#define MUTEX_THREAD_LOCK(dbenv, mp) \
if (mp != NULL) \
- MUTEX_LOCK(dbenv, mp, NULL)
+ MUTEX_LOCK(dbenv, mp)
#define MUTEX_THREAD_UNLOCK(dbenv, mp) \
if (mp != NULL) \
MUTEX_UNLOCK(dbenv, mp)
@@ -764,3 +858,22 @@ struct __mutex_t {
#define DB_FCNTL_OFF_GEN 0 /* Everything else. */
#define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
#define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+/*
+ * When the underlying mutexes require library (most likely heap) or system
+ * resources, we have to clean up when we discard mutexes (for the library
+ * resources) and both when discarding mutexes and after application failure
+ * (for the mutexes requiring system resources). This violates the rule that
+ * we never look at a shared region after application failure, but we've no
+ * other choice. In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
+ * set.
+ *
+ * To support mutex release after application failure, allocate thread-handle
+ * mutexes in shared memory instead of in the heap. The number of slots we
+ * allocate for this purpose isn't configurable, but this tends to be an issue
+ * only on embedded systems where we don't expect large server applications.
+ */
+#define DB_MAX_HANDLES 100 /* Mutex slots for handles. */
+#endif
+#endif /* !_DB_MUTEX_H_ */
diff --git a/bdb/include/os.h b/bdb/dbinc/os.h
index b5d469e88fa..01ca0ac470d 100644
--- a/bdb/include/os.h
+++ b/bdb/dbinc/os.h
@@ -1,28 +1,33 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: os.h,v 11.5 2000/10/27 20:32:01 dda Exp $
+ * $Id: os.h,v 11.14 2002/03/27 04:34:55 bostic Exp $
*/
+#ifndef _DB_OS_H_
+#define _DB_OS_H_
+
#if defined(__cplusplus)
extern "C" {
#endif
-/*
- * Filehandle.
- */
+
+/* DB filehandle. */
struct __fh_t {
#if defined(DB_WIN32)
HANDLE handle; /* Windows/32 file handle. */
#endif
int fd; /* POSIX file descriptor. */
+ char *name; /* File name. */
u_int32_t log_size; /* XXX: Log file size. */
+ u_int32_t pagesize; /* XXX: Page size. */
#define DB_FH_NOSYNC 0x01 /* Handle doesn't need to be sync'd. */
-#define DB_FH_VALID 0x02 /* Handle is valid. */
+#define DB_FH_UNLINK 0x02 /* Unlink on close */
+#define DB_FH_VALID 0x04 /* Handle is valid. */
u_int8_t flags;
};
@@ -34,7 +39,7 @@ struct __fh_t {
#define DB_IO_WRITE 2
typedef struct __io_t {
DB_FH *fhp; /* I/O file handle. */
- MUTEX *mutexp; /* Mutex to lock. */
+ DB_MUTEX *mutexp; /* Mutex to lock. */
size_t pagesize; /* Page size. */
db_pgno_t pgno; /* Page number. */
u_int8_t *buf; /* Buffer. */
@@ -44,3 +49,6 @@ typedef struct __io_t {
#if defined(__cplusplus)
}
#endif
+
+#include "dbinc_auto/os_ext.h"
+#endif /* !_DB_OS_H_ */
diff --git a/bdb/include/qam.h b/bdb/dbinc/qam.h
index 88cd68776a8..0306ed07d2a 100644
--- a/bdb/include/qam.h
+++ b/bdb/dbinc/qam.h
@@ -1,12 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: qam.h,v 11.26 2001/01/11 18:19:52 bostic Exp $
+ * $Id: qam.h,v 11.38 2002/08/06 06:11:21 bostic Exp $
*/
+#ifndef _DB_QAM_H_
+#define _DB_QAM_H_
+
/*
* QAM data elements: a status field and the data.
*/
@@ -32,10 +35,6 @@ struct __qcursor {
u_int32_t flags;
};
-/*
- * The in-memory, per-tree queue data structure.
- */
-
typedef struct __mpfarray {
u_int32_t n_extent; /* Number of extents in table. */
u_int32_t low_extent; /* First extent open. */
@@ -46,6 +45,9 @@ typedef struct __mpfarray {
} *mpfarray; /* Array of open extents. */
} MPFARRAY;
+/*
+ * The in-memory, per-tree queue data structure.
+ */
struct __queue {
db_pgno_t q_meta; /* Database meta-data page. */
db_pgno_t q_root; /* Database root page. */
@@ -55,9 +57,11 @@ struct __queue {
u_int32_t rec_page; /* records per page */
u_int32_t page_ext; /* Pages per extent */
MPFARRAY array1, array2; /* File arrays. */
- DB_MPOOL_FINFO finfo; /* Initialized info struct. */
- DB_PGINFO pginfo; /* Initialized pginfo struct. */
+
+ /* Extent file configuration: */
DBT pgcookie; /* Initialized pgcookie. */
+ DB_PGINFO pginfo; /* Initialized pginfo struct. */
+
char *path; /* Space allocated to file pathname. */
char *name; /* The name of the file. */
char *dir; /* The dir of the file. */
@@ -65,7 +69,7 @@ struct __queue {
};
/* Format for queue extent names. */
-#define QUEUE_EXTENT "%s/__dbq.%s.%d"
+#define QUEUE_EXTENT "%s%c__dbq.%s.%d"
typedef struct __qam_filelist {
DB_MPOOLFILE *mpf;
@@ -81,14 +85,14 @@ typedef struct __qam_filelist {
* Page number for record =
* divide the physical record number by the records per page
* add the root page number
- * For now the root page will always be 1, but we might want to change
+ * For now the root page will always be 1, but we might want to change
* in the future (e.g. multiple fixed len queues per file).
*
* Index of record on page =
* physical record number, less the logical pno times records/page
*/
#define CALC_QAM_RECNO_PER_PAGE(dbp) \
- (((dbp)->pgsize - sizeof(QPAGE)) / \
+ (((dbp)->pgsize - QPAGE_SZ(dbp)) / \
ALIGN(((QUEUE *)(dbp)->q_internal)->re_len + \
sizeof(QAMDATA) - SSZA(QAMDATA, data), sizeof(u_int32_t)))
@@ -104,7 +108,7 @@ typedef struct __qam_filelist {
#define QAM_GET_RECORD(dbp, page, index) \
((QAMDATA *)((u_int8_t *)(page) + \
- sizeof(QPAGE) + (ALIGN(sizeof(QAMDATA) - SSZA(QAMDATA, data) + \
+ QPAGE_SZ(dbp) + (ALIGN(sizeof(QAMDATA) - SSZA(QAMDATA, data) + \
((QUEUE *)(dbp)->q_internal)->re_len, sizeof(u_int32_t)) * index)))
#define QAM_AFTER_CURRENT(meta, recno) \
@@ -124,6 +128,7 @@ typedef struct __qam_filelist {
*/
#define QAM_SETFIRST 0x01
#define QAM_SETCUR 0x02
+#define QAM_TRUNCATE 0x04
/*
* Parameter to __qam_position.
@@ -146,5 +151,6 @@ typedef enum {
#define __qam_fput(dbp, pageno, addrp, flags) \
__qam_fprobe(dbp, pageno, addrp, QAM_PROBE_PUT, flags)
-#include "qam_auto.h"
-#include "qam_ext.h"
+#include "dbinc_auto/qam_auto.h"
+#include "dbinc_auto/qam_ext.h"
+#endif /* !_DB_QAM_H_ */
diff --git a/bdb/include/queue.h b/bdb/dbinc/queue.h
index 8d4a771add6..8d4a771add6 100644
--- a/bdb/include/queue.h
+++ b/bdb/dbinc/queue.h
diff --git a/bdb/include/region.h b/bdb/dbinc/region.h
index c5882d09aad..9ee6c81062f 100644
--- a/bdb/include/region.h
+++ b/bdb/dbinc/region.h
@@ -1,12 +1,15 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: region.h,v 11.13 2000/11/15 19:25:37 sue Exp $
+ * $Id: region.h,v 11.33 2002/08/06 06:11:22 bostic Exp $
*/
+#ifndef _DB_REGION_H_
+#define _DB_REGION_H_
+
/*
* The DB environment consists of some number of "regions", which are described
* by the following four structures:
@@ -87,7 +90,7 @@
* work. That all said, current versions of DB don't implement region grow
* because some systems don't support mutex copying, e.g., from OSF1 V4.0:
*
- * The address of an msemaphore structure may be significant. If the
+ * The address of an msemaphore structure may be significant. If the
* msemaphore structure contains any value copied from an msemaphore
* structure at a different address, the result is undefined.
*/
@@ -129,7 +132,7 @@ typedef enum {
/* Reference describing system memory version of REGENV. */
typedef struct __db_reg_env_ref {
roff_t size; /* Region size. */
- long segid; /* UNIX shmget(2) ID. */
+ long segid; /* UNIX shmget ID, VxWorks ID. */
} REGENV_REF;
/* Per-environment region information. */
@@ -139,7 +142,7 @@ typedef struct __db_reg_env {
* The mutex must be the first entry in the structure to guarantee
* correct alignment.
*/
- MUTEX mutex; /* Environment mutex. */
+ DB_MUTEX mutex; /* Environment mutex. */
/*
* !!!
@@ -154,19 +157,22 @@ typedef struct __db_reg_env {
*/
u_int32_t magic; /* Valid region magic number. */
- int panic; /* Environment is dead. */
+ int envpanic; /* Environment is dead. */
int majver; /* Major DB version number. */
int minver; /* Minor DB version number. */
int patch; /* Patch DB version number. */
u_int32_t init_flags; /* Flags the env was initialized with.*/
+ roff_t cipher_off; /* Offset of cipher area */
/* List of regions. */
SH_LIST_HEAD(__db_regionh) regionq;
u_int32_t refcnt; /* References to the environment. */
+ roff_t rep_off; /* Offset of the replication area. */
+
size_t pad; /* Guarantee that following memory is
* size_t aligned. This is necessary
* because we're going to store the
@@ -181,7 +187,7 @@ typedef struct __db_region {
* The mutex must be the first entry in the structure to guarantee
* correct alignment.
*/
- MUTEX mutex; /* Region mutex. */
+ DB_MUTEX mutex; /* Region mutex. */
/*
* !!!
@@ -265,28 +271,34 @@ typedef struct __db_regmaint_t {
* R_UNLOCK
*/
#define R_LOCK(dbenv, reginfo) \
- MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex, (dbenv)->lockfhp)
+ MUTEX_LOCK(dbenv, &(reginfo)->rp->mutex)
#define R_UNLOCK(dbenv, reginfo) \
MUTEX_UNLOCK(dbenv, &(reginfo)->rp->mutex)
/* PANIC_CHECK: Check to see if the DB environment is dead. */
#define PANIC_CHECK(dbenv) \
- if (DB_GLOBAL(db_panic) && \
+ if (!F_ISSET((dbenv), DB_ENV_NOPANIC) && \
(dbenv)->reginfo != NULL && ((REGENV *) \
- ((REGINFO *)(dbenv)->reginfo)->primary)->panic != 0) \
- return (DB_RUNRECOVERY);
+ ((REGINFO *)(dbenv)->reginfo)->primary)->envpanic != 0) \
+ return (__db_panic_msg(dbenv));
+
+#define PANIC_SET(dbenv, onoff) \
+ ((REGENV *)((REGINFO *)(dbenv)->reginfo)->primary)->envpanic = (onoff);
/*
- * All regions are created on 8K boundaries out of sheer paranoia, so that
- * we don't make some underlying VM unhappy.
+ * All regions are created on 8K boundaries out of sheer paranoia, so we
+ * don't make some underlying VM unhappy. Make sure we don't overflow or
+ * underflow.
*/
-#define OS_ROUNDOFF(i, s) { \
- (i) += (s) - 1; \
- (i) -= (i) % (s); \
-}
#define OS_VMPAGESIZE (8 * 1024)
-#define OS_VMROUNDOFF(i) OS_ROUNDOFF(i, OS_VMPAGESIZE)
+#define OS_VMROUNDOFF(i) { \
+ if ((i) < \
+ (UINT32_T_MAX - OS_VMPAGESIZE) + 1 || (i) < OS_VMPAGESIZE) \
+ (i) += OS_VMPAGESIZE - 1; \
+ (i) -= (i) % OS_VMPAGESIZE; \
+}
#if defined(__cplusplus)
}
#endif
+#endif /* !_DB_REGION_H_ */
diff --git a/bdb/dbinc/rep.h b/bdb/dbinc/rep.h
new file mode 100644
index 00000000000..1e315494c87
--- /dev/null
+++ b/bdb/dbinc/rep.h
@@ -0,0 +1,184 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef _REP_H_
+#define _REP_H_
+
+#define REP_ALIVE 1 /* I am alive message. */
+#define REP_ALIVE_REQ 2 /* Request for alive messages. */
+#define REP_ALL_REQ 3 /* Request all log records greater than LSN. */
+#define REP_ELECT 4 /* Indicates that all listeners should */
+ /* begin master election */
+#define REP_FILE 6 /* Page of a database file. */
+#define REP_FILE_REQ 7 /* Request for a database file. */
+#define REP_LOG 8 /* Log record. */
+#define REP_LOG_MORE 9 /* There are more log records to request. */
+#define REP_LOG_REQ 10 /* Request for a log record. */
+#define REP_MASTER_REQ 11 /* Who is the master */
+#define REP_NEWCLIENT 12 /* Announces the presence of a new client. */
+#define REP_NEWFILE 13 /* Announce a log file change. */
+#define REP_NEWMASTER 14 /* Announces who the master is. */
+#define REP_NEWSITE 15 /* Announces that a site has heard from a new
+ * site; like NEWCLIENT, but indirect. A
+ * NEWCLIENT message comes directly from the new
+ * client while a NEWSITE comes indirectly from
+ * someone who heard about a NEWSITE.
+ */
+#define REP_PAGE 16 /* Database page. */
+#define REP_PAGE_REQ 17 /* Request for a database page. */
+#define REP_PLIST 18 /* Database page list. */
+#define REP_PLIST_REQ 19 /* Request for a page list. */
+#define REP_VERIFY 20 /* A log record for verification. */
+#define REP_VERIFY_FAIL 21 /* The client is outdated. */
+#define REP_VERIFY_REQ 22 /* Request for a log record to verify. */
+#define REP_VOTE1 23 /* Send out your information for an election. */
+#define REP_VOTE2 24 /* Send a "you are master" vote. */
+
+/* Used to consistently designate which messages ought to be received where. */
+#define MASTER_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_MASTER)) return (EINVAL)
+
+#define CLIENT_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_CLIENT)) return (EINVAL)
+
+#define ANYSITE(dbenv)
+
+/* Shared replication structure. */
+
+typedef struct __rep {
+ /*
+ * Due to alignment constraints on some architectures (e.g. HP-UX),
+ * DB_MUTEXes must be the first element of shalloced structures,
+ * and as a corollary there can be only one per structure. Thus,
+ * db_mutex_off points to a mutex in a separately-allocated chunk.
+ */
+ DB_MUTEX mutex; /* Region lock. */
+ roff_t db_mutex_off; /* Client database mutex. */
+ u_int32_t tally_off; /* Offset of the tally region. */
+ int eid; /* Environment id. */
+ int master_id; /* ID of the master site. */
+ u_int32_t gen; /* Replication generation number */
+ int asites; /* Space allocated for sites. */
+ int nsites; /* Number of sites in group. */
+ int priority; /* My priority in an election. */
+ u_int32_t gbytes; /* Limit on data sent in single... */
+ u_int32_t bytes; /* __rep_process_message call. */
+#define DB_REP_REQUEST_GAP 4
+#define DB_REP_MAX_GAP 128
+ u_int32_t request_gap; /* # of records to receive before we
+ * request a missing log record. */
+ u_int32_t max_gap; /* Maximum number of records before
+ * requesting a missing log record. */
+
+ /* Vote tallying information. */
+ int sites; /* Sites heard from. */
+ int winner; /* Current winner. */
+ int w_priority; /* Winner priority. */
+ u_int32_t w_gen; /* Winner generation. */
+ DB_LSN w_lsn; /* Winner LSN. */
+ int w_tiebreaker; /* Winner tiebreaking value. */
+ int votes; /* Number of votes for this site. */
+
+ /* Statistics. */
+ DB_REP_STAT stat;
+
+#define REP_F_EPHASE1 0x01 /* In phase 1 of election. */
+#define REP_F_EPHASE2 0x02 /* In phase 2 of election. */
+#define REP_F_LOGSONLY 0x04 /* Log-site only; cannot be upgraded. */
+#define REP_F_MASTER 0x08 /* Master replica. */
+#define REP_F_RECOVER 0x10
+#define REP_F_UPGRADE 0x20 /* Upgradeable replica. */
+#define REP_ISCLIENT (REP_F_UPGRADE | REP_F_LOGSONLY)
+ u_int32_t flags;
+} REP;
+
+#define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+#define ELECTION_DONE(R) F_CLR((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+
+/*
+ * Per-process replication structure.
+ */
+struct __db_rep {
+ DB_MUTEX *mutexp;
+
+ DB_MUTEX *db_mutexp; /* Mutex for bookkeeping database. */
+ DB *rep_db; /* Bookkeeping database. */
+
+ REP *region; /* In memory structure. */
+ int (*rep_send) /* Send function. */
+ __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+};
+
+/*
+ * Control structure for replication communication infrastructure.
+ *
+ * Note that the version information should be at the beginning of the
+ * structure, so that we can rearrange the rest of it while letting the
+ * version checks continue to work. DB_REPVERSION should be revved any time
+ * the rest of the structure changes.
+ */
+typedef struct __rep_control {
+#define DB_REPVERSION 1
+ u_int32_t rep_version; /* Replication version number. */
+ u_int32_t log_version; /* Log version number. */
+
+ DB_LSN lsn; /* Log sequence number. */
+ u_int32_t rectype; /* Message type. */
+ u_int32_t gen; /* Generation number. */
+ u_int32_t flags; /* log_put flag value. */
+} REP_CONTROL;
+
+/* Election vote information. */
+typedef struct __rep_vote {
+ int priority; /* My site's priority. */
+ int nsites; /* Number of sites I've been in
+ * communication with. */
+ int tiebreaker; /* Tie-breaking quasi-random int. */
+} REP_VOTE_INFO;
+
+/*
+ * This structure takes care of representing a transaction.
+ * It holds all the records, sorted by page number so that
+ * we can obtain locks and apply updates in a deadlock free
+ * order.
+ */
+typedef struct __lsn_page {
+ DB_LSN lsn;
+ u_int32_t fid;
+ DB_LOCK_ILOCK pgdesc;
+#define LSN_PAGE_NOLOCK 0x0001 /* No lock necessary for log rec. */
+ u_int32_t flags;
+} LSN_PAGE;
+
+typedef struct __txn_recs {
+ int npages;
+ int nalloc;
+ LSN_PAGE *array;
+ u_int32_t txnid;
+ u_int32_t lockid;
+} TXN_RECS;
+
+typedef struct __lsn_collection {
+ int nlsns;
+ int nalloc;
+ DB_LSN *array;
+} LSN_COLLECTION;
+
+/*
+ * This is used by the page-prep routines to do the lock_vec call to
+ * apply the updates for a single transaction or a collection of
+ * transactions.
+ */
+typedef struct _linfo {
+ int n;
+ DB_LOCKREQ *reqs;
+ DBT *objs;
+} linfo_t;
+
+#include "dbinc_auto/rep_ext.h"
+#endif /* !_REP_H_ */
diff --git a/bdb/include/shqueue.h b/bdb/dbinc/shqueue.h
index 115c5d39e88..47fdf12ac92 100644
--- a/bdb/include/shqueue.h
+++ b/bdb/dbinc/shqueue.h
@@ -1,11 +1,12 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: shqueue.h,v 11.6 2000/11/14 20:20:28 bostic Exp $
+ * $Id: shqueue.h,v 11.9 2002/01/11 15:52:30 bostic Exp $
*/
+
#ifndef _SYS_SHQUEUE_H_
#define _SYS_SHQUEUE_H_
@@ -333,5 +334,4 @@ struct { \
#if defined(__cplusplus)
}
#endif
-
#endif /* !_SYS_SHQUEUE_H_ */
diff --git a/bdb/include/tcl_db.h b/bdb/dbinc/tcl_db.h
index 254006c2f6d..8c04d545295 100644
--- a/bdb/include/tcl_db.h
+++ b/bdb/dbinc/tcl_db.h
@@ -1,18 +1,22 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: tcl_db.h,v 11.9 2000/12/12 17:43:56 bostic Exp $
+ * $Id: tcl_db.h,v 11.30 2002/08/06 06:11:22 bostic Exp $
*/
+#ifndef _DB_TCL_DB_H_
+#define _DB_TCL_DB_H_
+
#define MSG_SIZE 100 /* Message size */
enum INFOTYPE {
- I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_NDBM, I_MUTEX };
+ I_ENV, I_DB, I_DBC, I_TXN, I_MP, I_PG, I_LOCK, I_LOGC, I_NDBM, I_MUTEX };
#define MAX_ID 8 /* Maximum number of sub-id's we need */
+#define DBTCL_PREP 64 /* Size of txn_recover preplist */
#define DBTCL_DBM 1
#define DBTCL_NDBM 2
@@ -20,7 +24,7 @@ enum INFOTYPE {
typedef struct _mutex_entry {
union {
struct {
- MUTEX real_m;
+ DB_MUTEX real_m;
u_int32_t real_val;
} r;
/*
@@ -87,9 +91,7 @@ typedef struct dbtcl_info {
DB_MPOOLFILE *mp;
DB_LOCK *lock;
_MUTEX_DATA *mutex;
-#if 0
- DBM *ndbmp; /* Compatibility */
-#endif
+ DB_LOGC *logc;
} un;
union data {
int anydata;
@@ -103,13 +105,21 @@ typedef struct dbtcl_info {
DBT i_lockobj;
FILE *i_err;
char *i_errpfx;
+
+ /* Callbacks--Tcl_Objs containing proc names */
+ Tcl_Obj *i_btcompare;
+ Tcl_Obj *i_dupcompare;
+ Tcl_Obj *i_hashproc;
+ Tcl_Obj *i_rep_send;
+ Tcl_Obj *i_second_call;
+
+ /* Environment ID for the i_rep_send callback. */
+ Tcl_Obj *i_rep_eid;
+
struct dbtcl_info *i_parent;
int i_otherid[MAX_ID];
} DBTCL_INFO;
-extern int __debug_on, __debug_print, __debug_stop, __debug_test;
-LIST_HEAD(infohead, dbtcl_info) __db_infohead;
-
#define i_anyp un.anyp
#define i_pagep un.anyp
#define i_envp un.envp
@@ -119,9 +129,7 @@ LIST_HEAD(infohead, dbtcl_info) __db_infohead;
#define i_mp un.mp
#define i_lock un.lock
#define i_mutex un.mutex
-#if 0
-#define i_ndbm un.ndbmp
-#endif
+#define i_logc un.logc
#define i_data und.anydata
#define i_pgno und.pgno
@@ -133,11 +141,21 @@ LIST_HEAD(infohead, dbtcl_info) __db_infohead;
#define i_envmpid i_otherid[1]
#define i_envlockid i_otherid[2]
#define i_envmutexid i_otherid[3]
+#define i_envlogcid i_otherid[4]
#define i_mppgid i_otherid[0]
#define i_dbdbcid i_otherid[0]
+extern int __debug_on, __debug_print, __debug_stop, __debug_test;
+
+typedef struct dbtcl_global {
+ LIST_HEAD(infohead, dbtcl_info) g_infohead;
+} DBTCL_GLOBAL;
+#define __db_infohead __dbtcl_global.g_infohead
+
+extern DBTCL_GLOBAL __dbtcl_global;
+
#define NAME_TO_ENV(name) (DB_ENV *)_NameToPtr((name))
#define NAME_TO_DB(name) (DB *)_NameToPtr((name))
#define NAME_TO_DBC(name) (DBC *)_NameToPtr((name))
@@ -161,6 +179,29 @@ do { \
} while (0)
/*
+ * MAKE_STAT_LSN appends a {name {LSNfile LSNoffset}} pair to a result list
+ * that MUST be called 'res' that is a Tcl_Obj * in the local
+ * function. This macro also assumes a label "error" to go to
+ * in the even of a Tcl error. For stat functions this will
+ * typically go before the "free" function to free the stat structure
+ * returned by DB.
+ */
+#define MAKE_STAT_LSN(s, lsn) \
+do { \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewLongObj((long)(lsn)->file); \
+ myobjv[1] = Tcl_NewLongObj((long)(lsn)->offset); \
+ lsnlist = Tcl_NewListObj(myobjc, myobjv); \
+ myobjc = 2; \
+ myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \
+ myobjv[1] = lsnlist; \
+ thislist = Tcl_NewListObj(myobjc, myobjv); \
+ result = Tcl_ListObjAppendElement(interp, res, thislist); \
+ if (result != TCL_OK) \
+ goto error; \
+} while (0)
+
+/*
* MAKE_STAT_STRLIST appends a {name string} pair to a result list
* that MUST be called 'res' that is a Tcl_Obj * in the local
* function. This macro also assumes a label "error" to go to
@@ -170,7 +211,7 @@ do { \
*/
#define MAKE_STAT_STRLIST(s,s1) \
do { \
- result = _SetListElem(interp, res, (s), strlen(s), \
+ result = _SetListElem(interp, res, (s), strlen(s), \
(s1), strlen(s1)); \
if (result != TCL_OK) \
goto error; \
@@ -198,7 +239,7 @@ do { \
*/
#define FLAG_CHECK2(flag,val) \
do { \
- if ((flag) != 0 && (flag) != (val)) { \
+ if (((flag) & ~(val)) != 0) { \
Tcl_SetResult(interp, \
" Only 1 policy can be specified.\n", \
TCL_STATIC); \
@@ -216,4 +257,5 @@ do { \
#define IS_HELP(s) \
(strcmp(Tcl_GetStringFromObj(s,NULL), "-?") == 0) ? TCL_OK : TCL_ERROR
-#include "tcl_ext.h"
+#include "dbinc_auto/tcl_ext.h"
+#endif /* !_DB_TCL_DB_H_ */
diff --git a/bdb/include/txn.h b/bdb/dbinc/txn.h
index 009a1ca1589..31b00a6ba74 100644
--- a/bdb/include/txn.h
+++ b/bdb/dbinc/txn.h
@@ -1,18 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: txn.h,v 11.12 2001/01/02 17:23:39 margo Exp $
+ * $Id: txn.h,v 11.43 2002/08/29 14:22:19 margo Exp $
*/
#ifndef _TXN_H_
#define _TXN_H_
-#include "xa.h"
+#include "dbinc/xa.h"
+
+/* Operation parameters to the delayed commit processing code. */
+typedef enum {
+ TXN_REMOVE, /* Remove a file. */
+ TXN_TRADE, /* Trade lockers. */
+ TXN_TRADED /* Already traded; downgrade lock. */
+} TXN_EVENT_T;
-struct __db_txnmgr; typedef struct __db_txnmgr DB_TXNMGR;
struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION;
/*
@@ -20,36 +26,14 @@ struct __db_txnregion; typedef struct __db_txnregion DB_TXNREGION;
* TXN_MINIMUM = (DB_LOCK_MAXID + 1) but this makes compilers complain.
*/
#define TXN_MINIMUM 0x80000000
-#define TXN_INVALID 0xffffffff /* Maximum number of txn ids. */
-#define TXN_INVALID_ID 0 /* Invalid transaction ID. */
+#define TXN_MAXIMUM 0xffffffff /* Maximum number of txn ids. */
+#define TXN_INVALID 0 /* Invalid transaction ID. */
#define DEF_MAX_TXNS 20 /* Default max transactions. */
-/* The structure allocated for every transaction. */
-struct __db_txn {
- DB_TXNMGR *mgrp; /* Pointer to transaction manager. */
- DB_TXN *parent; /* Pointer to transaction's parent. */
- DB_LSN last_lsn; /* Lsn of last log write. */
- u_int32_t txnid; /* Unique transaction id. */
- roff_t off; /* Detail structure within region. */
- TAILQ_ENTRY(__db_txn) links; /* Links transactions off manager. */
- TAILQ_HEAD(__kids, __db_txn) kids; /* Child transactions. */
- TAILQ_ENTRY(__db_txn) klinks; /* Links child transactions. */
- u_int32_t cursors; /* Number of cursors open for txn */
-
-#define TXN_CHILDCOMMIT 0x01 /* Transaction that has committed. */
-#define TXN_MALLOC 0x02 /* Structure allocated by TXN system. */
-#define TXN_NOSYNC 0x04 /* Do not sync on prepare and commit. */
-#define TXN_NOWAIT 0x08 /* Do not wait on locks. */
-#define TXN_SYNC 0x10 /* Sync on prepare and commit. */
- u_int32_t flags;
-};
-
/*
* Internal data maintained in shared memory for each transaction.
*/
-typedef char DB_XID[XIDDATASIZE];
-
typedef struct __txn_detail {
u_int32_t txnid; /* current transaction id
used to link free list also */
@@ -62,6 +46,9 @@ typedef struct __txn_detail {
#define TXN_PREPARED 3
#define TXN_COMMITTED 4
u_int32_t status; /* status of the transaction */
+#define TXN_COLLECTED 0x1
+#define TXN_RESTORED 0x2
+ u_int32_t flags; /* collected during txn_recover */
SH_TAILQ_ENTRY links; /* free/active list */
@@ -77,7 +64,7 @@ typedef struct __txn_detail {
* XID (xid_t) structure: because these fields are logged, the
* sizes have to be explicit.
*/
- DB_XID xid; /* XA global transaction id */
+ u_int8_t xid[XIDDATASIZE]; /* XA global transaction id */
u_int32_t bqual; /* bqual_length from XID */
u_int32_t gtrid; /* gtrid_length from XID */
int32_t format; /* XA format */
@@ -96,12 +83,13 @@ struct __db_txnmgr {
* to be stored elsewhere on architectures unable to support mutexes in heap
* memory, e.g., HP/UX 9.
*/
- MUTEX *mutexp; /* Lock list of active transactions
+ DB_MUTEX *mutexp; /* Lock list of active transactions
* (including the content of each
* TXN_DETAIL structure on the list).
*/
/* List of active transactions. */
TAILQ_HEAD(_chain, __db_txn) txn_chain;
+ u_int32_t n_discards; /* Number of txns discarded. */
/* These fields are never updated after creation, and so not protected. */
DB_ENV *dbenv; /* Environment. */
@@ -115,36 +103,41 @@ struct __db_txnmgr {
struct __db_txnregion {
u_int32_t maxtxns; /* maximum number of active TXNs */
u_int32_t last_txnid; /* last transaction id given out */
- DB_LSN pending_ckp; /* last checkpoint did not finish */
+ u_int32_t cur_maxid; /* current max unused id. */
DB_LSN last_ckp; /* lsn of the last checkpoint */
time_t time_ckp; /* time of last checkpoint */
u_int32_t logtype; /* type of logging */
u_int32_t locktype; /* lock type */
- u_int32_t naborts; /* number of aborted TXNs */
- u_int32_t ncommits; /* number of committed TXNs */
- u_int32_t nbegins; /* number of begun TXNs */
- u_int32_t nactive; /* number of active TXNs */
- u_int32_t maxnactive; /* maximum number of active TXNs */
+ DB_TXN_STAT stat; /* Statistics for txns. */
+
+#define TXN_IN_RECOVERY 0x01 /* environment is being recovered */
+ u_int32_t flags;
/* active TXN list */
SH_TAILQ_HEAD(__active) active_txn;
-};
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+#define TXN_MAINT_SIZE (sizeof(roff_t) * DB_MAX_HANDLES)
-/*
- * Make the region large enough to hold N transaction detail structures
- * plus some space to hold thread handles and the beginning of the shalloc
- * region.
- */
-#define TXN_REGION_SIZE(N) \
- (sizeof(DB_TXNREGION) + N * sizeof(TXN_DETAIL) + 1000)
+ roff_t maint_off; /* offset of region maintenance info */
+#endif
+};
/*
- * Log record types.
+ * Log record types. Note that these are *not* alphabetical. This is
+ * intentional so that we don't change the meaning of values between
+ * software upgrades. EXPECTED, UNEXPECTED, IGNORE, NOTFOUND and OK
+ * are used in the
+ * txnlist functions.
*/
+#define TXN_OK 0
#define TXN_COMMIT 1
#define TXN_PREPARE 2
-
-#include "txn_auto.h"
-#include "txn_ext.h"
-
-#include "xa_ext.h"
+#define TXN_ABORT 3
+#define TXN_NOTFOUND 4
+#define TXN_IGNORE 5
+#define TXN_EXPECTED 6
+#define TXN_UNEXPECTED 7
+
+#include "dbinc_auto/txn_auto.h"
+#include "dbinc_auto/txn_ext.h"
+#include "dbinc_auto/xa_ext.h"
#endif /* !_TXN_H_ */
diff --git a/bdb/include/xa.h b/bdb/dbinc/xa.h
index ce46179263a..64bdac8c914 100644
--- a/bdb/include/xa.h
+++ b/bdb/dbinc/xa.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: xa.h,v 11.3 2000/02/14 02:59:55 bostic Exp $
+ * $Id: xa.h,v 11.5 2002/01/11 15:52:30 bostic Exp $
*/
/*
* Start of xa.h header
diff --git a/bdb/dbinc_auto/btree_auto.h b/bdb/dbinc_auto/btree_auto.h
new file mode 100644
index 00000000000..4feb07ad94c
--- /dev/null
+++ b/bdb/dbinc_auto/btree_auto.h
@@ -0,0 +1,128 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __bam_AUTO_H
+#define __bam_AUTO_H
+#define DB___bam_split 62
+typedef struct ___bam_split_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t left;
+ DB_LSN llsn;
+ db_pgno_t right;
+ DB_LSN rlsn;
+ u_int32_t indx;
+ db_pgno_t npgno;
+ DB_LSN nlsn;
+ db_pgno_t root_pgno;
+ DBT pg;
+ u_int32_t opflags;
+} __bam_split_args;
+
+#define DB___bam_rsplit 63
+typedef struct ___bam_rsplit_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT pgdbt;
+ db_pgno_t root_pgno;
+ db_pgno_t nrec;
+ DBT rootent;
+ DB_LSN rootlsn;
+} __bam_rsplit_args;
+
+#define DB___bam_adj 55
+typedef struct ___bam_adj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t indx_copy;
+ u_int32_t is_insert;
+} __bam_adj_args;
+
+#define DB___bam_cadjust 56
+typedef struct ___bam_cadjust_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ int32_t adjust;
+ u_int32_t opflags;
+} __bam_cadjust_args;
+
+#define DB___bam_cdel 57
+typedef struct ___bam_cdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+} __bam_cdel_args;
+
+#define DB___bam_repl 58
+typedef struct ___bam_repl_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ u_int32_t indx;
+ u_int32_t isdeleted;
+ DBT orig;
+ DBT repl;
+ u_int32_t prefix;
+ u_int32_t suffix;
+} __bam_repl_args;
+
+#define DB___bam_root 59
+typedef struct ___bam_root_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t meta_pgno;
+ db_pgno_t root_pgno;
+ DB_LSN meta_lsn;
+} __bam_root_args;
+
+#define DB___bam_curadj 64
+typedef struct ___bam_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ca_mode mode;
+ db_pgno_t from_pgno;
+ db_pgno_t to_pgno;
+ db_pgno_t left_pgno;
+ u_int32_t first_indx;
+ u_int32_t from_indx;
+ u_int32_t to_indx;
+} __bam_curadj_args;
+
+#define DB___bam_rcuradj 65
+typedef struct ___bam_rcuradj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ ca_recno_arg mode;
+ db_pgno_t root;
+ db_recno_t recno;
+ u_int32_t order;
+} __bam_rcuradj_args;
+
+#endif
diff --git a/bdb/dbinc_auto/btree_ext.h b/bdb/dbinc_auto/btree_ext.h
new file mode 100644
index 00000000000..ec5468acf1c
--- /dev/null
+++ b/bdb/dbinc_auto/btree_ext.h
@@ -0,0 +1,132 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _btree_ext_h_
+#define _btree_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __bam_cmp __P((DB *, const DBT *, PAGE *, u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+int __bam_defcmp __P((DB *, const DBT *, const DBT *));
+size_t __bam_defpfx __P((DB *, const DBT *, const DBT *));
+int __bam_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __bam_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __bam_mswap __P((PAGE *));
+void __bam_cprint __P((DBC *));
+int __bam_ca_delete __P((DB *, db_pgno_t, u_int32_t, int));
+int __ram_ca_delete __P((DB *, db_pgno_t));
+int __bam_ca_di __P((DBC *, db_pgno_t, u_int32_t, int));
+int __bam_ca_dup __P((DBC *, u_int32_t, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
+int __bam_ca_undodup __P((DB *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t));
+int __bam_ca_rsplit __P((DBC *, db_pgno_t, db_pgno_t));
+int __bam_ca_split __P((DBC *, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, int));
+void __bam_ca_undosplit __P((DB *, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t));
+int __bam_c_init __P((DBC *, DBTYPE));
+int __bam_c_refresh __P((DBC *));
+int __bam_c_count __P((DBC *, db_recno_t *));
+int __bam_c_dup __P((DBC *, DBC *));
+int __bam_bulk_overflow __P((DBC *, u_int32_t, db_pgno_t, u_int8_t *));
+int __bam_bulk_duplicates __P((DBC *, db_pgno_t, u_int8_t *, int32_t *, int32_t **, u_int8_t **, u_int32_t *, int));
+int __bam_c_rget __P((DBC *, DBT *));
+int __bam_ditem __P((DBC *, PAGE *, u_int32_t));
+int __bam_adjindx __P((DBC *, PAGE *, u_int32_t, u_int32_t, int));
+int __bam_dpages __P((DBC *, EPG *));
+int __bam_db_create __P((DB *));
+int __bam_db_close __P((DB *));
+int __bam_set_flags __P((DB *, u_int32_t *flagsp));
+int __ram_set_flags __P((DB *, u_int32_t *flagsp));
+int __bam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t));
+int __bam_metachk __P((DB *, const char *, BTMETA *));
+int __bam_read_root __P((DB *, DB_TXN *, db_pgno_t, u_int32_t));
+int __bam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __bam_new_subdb __P((DB *, DB *, DB_TXN *));
+int __bam_iitem __P((DBC *, DBT *, DBT *, u_int32_t, u_int32_t));
+int __bam_ritem __P((DBC *, PAGE *, u_int32_t, DBT *));
+int __bam_split_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_reclaim __P((DB *, DB_TXN *));
+int __bam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+int __ram_open __P((DB *, DB_TXN *, const char *, db_pgno_t, u_int32_t));
+int __ram_append __P((DBC *, DBT *, DBT *));
+int __ram_c_del __P((DBC *));
+int __ram_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+int __ram_ca __P((DBC *, ca_recno_arg));
+int __ram_getno __P((DBC *, const DBT *, db_recno_t *, int));
+int __ram_writeback __P((DB *));
+int __bam_rsearch __P((DBC *, db_recno_t *, u_int32_t, int, int *));
+int __bam_adjust __P((DBC *, int32_t));
+int __bam_nrecs __P((DBC *, db_recno_t *));
+db_recno_t __bam_total __P((DB *, PAGE *));
+int __bam_search __P((DBC *, db_pgno_t, const DBT *, u_int32_t, int, db_recno_t *, int *));
+int __bam_stkrel __P((DBC *, u_int32_t));
+int __bam_stkgrow __P((DB_ENV *, BTREE_CURSOR *));
+int __bam_split __P((DBC *, void *, db_pgno_t *));
+int __bam_copy __P((DB *, PAGE *, PAGE *, u_int32_t, u_int32_t));
+int __bam_stat __P((DB *, void *, u_int32_t));
+int __bam_traverse __P((DBC *, db_lockmode_t, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __bam_stat_callback __P((DB *, PAGE *, void *, int *));
+int __bam_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __bam_30_btreemeta __P((DB *, char *, u_int8_t *));
+int __bam_31_btreemeta __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_31_lbtree __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __bam_vrfy_meta __P((DB *, VRFY_DBINFO *, BTMETA *, db_pgno_t, u_int32_t));
+int __ram_vrfy_leaf __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __bam_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __bam_vrfy_itemorder __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t, int, int, u_int32_t));
+int __bam_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __bam_vrfy_subtree __P((DB *, VRFY_DBINFO *, db_pgno_t, void *, void *, u_int32_t, u_int32_t *, u_int32_t *, u_int32_t *));
+int __bam_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, PAGE *, void *, int (*)(void *, const void *), DBT *, u_int32_t));
+int __bam_salvage_walkdupint __P((DB *, VRFY_DBINFO *, PAGE *, DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __bam_meta2pgset __P((DB *, VRFY_DBINFO *, BTMETA *, u_int32_t, DB *));
+int __bam_split_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, u_int32_t));
+int __bam_split_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_split_read __P((DB_ENV *, void *, __bam_split_args **));
+int __bam_rsplit_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *));
+int __bam_rsplit_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rsplit_read __P((DB_ENV *, void *, __bam_rsplit_args **));
+int __bam_adj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, u_int32_t));
+int __bam_adj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_adj_read __P((DB_ENV *, void *, __bam_adj_args **));
+int __bam_cadjust_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, int32_t, u_int32_t));
+int __bam_cadjust_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cadjust_read __P((DB_ENV *, void *, __bam_cadjust_args **));
+int __bam_cdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t));
+int __bam_cdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_cdel_read __P((DB_ENV *, void *, __bam_cdel_args **));
+int __bam_repl_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, u_int32_t, u_int32_t));
+int __bam_repl_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_repl_read __P((DB_ENV *, void *, __bam_repl_args **));
+int __bam_root_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, db_pgno_t, DB_LSN *));
+int __bam_root_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_root_read __P((DB_ENV *, void *, __bam_root_args **));
+int __bam_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ca_mode, db_pgno_t, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t));
+int __bam_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_curadj_read __P((DB_ENV *, void *, __bam_curadj_args **));
+int __bam_rcuradj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, ca_recno_arg, db_pgno_t, db_recno_t, u_int32_t));
+int __bam_rcuradj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __bam_rcuradj_read __P((DB_ENV *, void *, __bam_rcuradj_args **));
+int __bam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __bam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __bam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_btree_ext_h_ */
diff --git a/bdb/dbinc_auto/clib_ext.h b/bdb/dbinc_auto/clib_ext.h
new file mode 100644
index 00000000000..7e2817d620e
--- /dev/null
+++ b/bdb/dbinc_auto/clib_ext.h
@@ -0,0 +1,49 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _clib_ext_h_
+#define _clib_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef HAVE_GETCWD
+char *getcwd __P((char *, size_t));
+#endif
+#ifndef HAVE_GETOPT
+int getopt __P((int, char * const *, const char *));
+#endif
+#ifndef HAVE_MEMCMP
+int memcmp __P((const void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMCPY
+void *memcpy __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_MEMMOVE
+void *memmove __P((void *, const void *, size_t));
+#endif
+#ifndef HAVE_RAISE
+int raise __P((int));
+#endif
+#ifndef HAVE_SNPRINTF
+int snprintf __P((char *, size_t, const char *, ...));
+#endif
+#ifndef HAVE_STRCASECMP
+int strcasecmp __P((const char *, const char *));
+#endif
+#ifndef HAVE_STRCASECMP
+int strncasecmp __P((const char *, const char *, size_t));
+#endif
+#ifndef HAVE_STRDUP
+char *strdup __P((const char *));
+#endif
+#ifndef HAVE_STRERROR
+char *strerror __P((int));
+#endif
+#ifndef HAVE_VSNPRINTF
+int vsnprintf __P((char *, size_t, const char *, va_list));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_clib_ext_h_ */
diff --git a/bdb/dbinc_auto/common_ext.h b/bdb/dbinc_auto/common_ext.h
new file mode 100644
index 00000000000..7744982fe41
--- /dev/null
+++ b/bdb/dbinc_auto/common_ext.h
@@ -0,0 +1,44 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _common_ext_h_
+#define _common_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_isbigendian __P((void));
+int __db_byteorder __P((DB_ENV *, int));
+int __db_fchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t));
+int __db_fcchk __P((DB_ENV *, const char *, u_int32_t, u_int32_t, u_int32_t));
+int __db_ferr __P((const DB_ENV *, const char *, int));
+void __db_pgerr __P((DB *, db_pgno_t, int));
+int __db_pgfmt __P((DB_ENV *, db_pgno_t));
+int __db_eopnotsup __P((const DB_ENV *));
+#ifdef DIAGNOSTIC
+void __db_assert __P((const char *, const char *, int));
+#endif
+int __db_panic_msg __P((DB_ENV *));
+int __db_panic __P((DB_ENV *, int));
+void __db_err __P((const DB_ENV *, const char *, ...));
+void __db_errcall __P((const DB_ENV *, int, int, const char *, va_list));
+void __db_errfile __P((const DB_ENV *, int, int, const char *, va_list));
+void __db_logmsg __P((const DB_ENV *, DB_TXN *, const char *, u_int32_t, const char *, ...));
+int __db_unknown_flag __P((DB_ENV *, char *, u_int32_t));
+int __db_unknown_type __P((DB_ENV *, char *, DBTYPE));
+int __db_check_txn __P((DB *, DB_TXN *, u_int32_t, int));
+int __db_not_txn_env __P((DB_ENV *));
+int __db_getlong __P((DB *, const char *, char *, long, long, long *));
+int __db_getulong __P((DB *, const char *, char *, u_long, u_long, u_long *));
+void __db_idspace __P((u_int32_t *, int, u_int32_t *, u_int32_t *));
+u_int32_t __db_log2 __P((u_int32_t));
+int __db_util_arg __P((char *, char *, int *, char ***));
+int __db_util_cache __P((DB_ENV *, DB *, u_int32_t *, int *));
+int __db_util_logset __P((const char *, char *));
+void __db_util_siginit __P((void));
+int __db_util_interrupted __P((void));
+void __db_util_sigresend __P((void));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_common_ext_h_ */
diff --git a/bdb/dbinc_auto/crdel_auto.h b/bdb/dbinc_auto/crdel_auto.h
new file mode 100644
index 00000000000..bdae193fac8
--- /dev/null
+++ b/bdb/dbinc_auto/crdel_auto.h
@@ -0,0 +1,16 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __crdel_AUTO_H
+#define __crdel_AUTO_H
+#define DB___crdel_metasub 142
+typedef struct ___crdel_metasub_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DBT page;
+ DB_LSN lsn;
+} __crdel_metasub_args;
+
+#endif
diff --git a/bdb/dbinc_auto/crypto_ext.h b/bdb/dbinc_auto/crypto_ext.h
new file mode 100644
index 00000000000..e37a895d91a
--- /dev/null
+++ b/bdb/dbinc_auto/crypto_ext.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _crypto_ext_h_
+#define _crypto_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __aes_setup __P((DB_ENV *, DB_CIPHER *));
+int __aes_adj_size __P((size_t));
+int __aes_close __P((DB_ENV *, void *));
+int __aes_decrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+int __aes_encrypt __P((DB_ENV *, void *, void *, u_int8_t *, size_t));
+int __aes_init __P((DB_ENV *, DB_CIPHER *));
+int __crypto_region_init __P((DB_ENV *));
+int __crypto_dbenv_close __P((DB_ENV *));
+int __crypto_algsetup __P((DB_ENV *, DB_CIPHER *, u_int32_t, int));
+int __crypto_decrypt_meta __P((DB_ENV *, DB *, u_int8_t *, int));
+int __db_generate_iv __P((DB_ENV *, u_int32_t *));
+int __db_rijndaelKeySetupEnc __P((u32 *, const u8 *, int));
+int __db_rijndaelKeySetupDec __P((u32 *, const u8 *, int));
+void __db_rijndaelEncrypt __P((u32 *, int, const u8 *, u8 *));
+void __db_rijndaelDecrypt __P((u32 *, int, const u8 *, u8 *));
+void __db_rijndaelEncryptRound __P((const u32 *, int, u8 *, int));
+void __db_rijndaelDecryptRound __P((const u32 *, int, u8 *, int));
+int __db_makeKey __P((keyInstance *, int, int, char *));
+int __db_cipherInit __P((cipherInstance *, int, char *));
+int __db_blockEncrypt __P((cipherInstance *, keyInstance *, BYTE *, size_t, BYTE *));
+int __db_padEncrypt __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *));
+int __db_blockDecrypt __P((cipherInstance *, keyInstance *, BYTE *, size_t, BYTE *));
+int __db_padDecrypt __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *));
+int __db_cipherUpdateRounds __P((cipherInstance *, keyInstance *, BYTE *, int, BYTE *, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_crypto_ext_h_ */
diff --git a/bdb/dbinc_auto/db_auto.h b/bdb/dbinc_auto/db_auto.h
new file mode 100644
index 00000000000..e56f38b384b
--- /dev/null
+++ b/bdb/dbinc_auto/db_auto.h
@@ -0,0 +1,118 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __db_AUTO_H
+#define __db_AUTO_H
+#define DB___db_addrem 41
+typedef struct ___db_addrem_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t nbytes;
+ DBT hdr;
+ DBT dbt;
+ DB_LSN pagelsn;
+} __db_addrem_args;
+
+#define DB___db_big 43
+typedef struct ___db_big_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ db_pgno_t prev_pgno;
+ db_pgno_t next_pgno;
+ DBT dbt;
+ DB_LSN pagelsn;
+ DB_LSN prevlsn;
+ DB_LSN nextlsn;
+} __db_big_args;
+
+#define DB___db_ovref 44
+typedef struct ___db_ovref_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ int32_t adjust;
+ DB_LSN lsn;
+} __db_ovref_args;
+
+#define DB___db_relink 45
+typedef struct ___db_relink_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN lsn;
+ db_pgno_t prev;
+ DB_LSN lsn_prev;
+ db_pgno_t next;
+ DB_LSN lsn_next;
+} __db_relink_args;
+
+#define DB___db_debug 47
+typedef struct ___db_debug_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT op;
+ int32_t fileid;
+ DBT key;
+ DBT data;
+ u_int32_t arg_flags;
+} __db_debug_args;
+
+#define DB___db_noop 48
+typedef struct ___db_noop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN prevlsn;
+} __db_noop_args;
+
+#define DB___db_pg_alloc 49
+typedef struct ___db_pg_alloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ db_pgno_t meta_pgno;
+ DB_LSN page_lsn;
+ db_pgno_t pgno;
+ u_int32_t ptype;
+ db_pgno_t next;
+} __db_pg_alloc_args;
+
+#define DB___db_pg_free 50
+typedef struct ___db_pg_free_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN meta_lsn;
+ db_pgno_t meta_pgno;
+ DBT header;
+ db_pgno_t next;
+} __db_pg_free_args;
+
+#define DB___db_cksum 51
+typedef struct ___db_cksum_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+} __db_cksum_args;
+
+#endif
diff --git a/bdb/dbinc_auto/db_ext.h b/bdb/dbinc_auto/db_ext.h
new file mode 100644
index 00000000000..24a13975c89
--- /dev/null
+++ b/bdb/dbinc_auto/db_ext.h
@@ -0,0 +1,224 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _db_ext_h_
+#define _db_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __crdel_metasub_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __crdel_metasub_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __crdel_metasub_read __P((DB_ENV *, void *, __crdel_metasub_args **));
+int __crdel_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __crdel_metasub_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_master_open __P((DB *, DB_TXN *, const char *, u_int32_t, int, DB **));
+int __db_master_update __P((DB *, DB *, DB_TXN *, const char *, DBTYPE, mu_action, const char *, u_int32_t));
+int __db_dbenv_setup __P((DB *, DB_TXN *, const char *, u_int32_t, u_int32_t));
+int __db_close __P((DB *, u_int32_t));
+int __db_close_i __P((DB *, DB_TXN *, u_int32_t));
+int __db_refresh __P((DB *, DB_TXN *, u_int32_t));
+int __db_log_page __P((DB *, DB_TXN *, DB_LSN *, db_pgno_t, PAGE *));
+int __db_backup_name __P((DB_ENV *, const char *, DB_TXN *, char **));
+DB *__dblist_get __P((DB_ENV *, u_int32_t));
+#if CONFIG_TEST
+int __db_testcopy __P((DB_ENV *, DB *, const char *));
+#endif
+int __db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __db_icursor __P((DB *, DB_TXN *, DBTYPE, db_pgno_t, int, u_int32_t, DBC **));
+int __db_cprint __P((DB *));
+int __db_fd __P((DB *, int *));
+int __db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __db_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __db_sync __P((DB *, u_int32_t));
+int __db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+int __db_addrem_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, const DBT *, const DBT *, DB_LSN *));
+int __db_addrem_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addrem_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_addrem_read __P((DB_ENV *, void *, __db_addrem_args **));
+int __db_big_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, db_pgno_t, db_pgno_t, const DBT *, DB_LSN *, DB_LSN *, DB_LSN *));
+int __db_big_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_read __P((DB_ENV *, void *, __db_big_args **));
+int __db_ovref_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, int32_t, DB_LSN *));
+int __db_ovref_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_read __P((DB_ENV *, void *, __db_ovref_args **));
+int __db_relink_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __db_relink_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_read __P((DB_ENV *, void *, __db_relink_args **));
+int __db_debug_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __db_debug_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_read __P((DB_ENV *, void *, __db_debug_args **));
+int __db_noop_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *));
+int __db_noop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_read __P((DB_ENV *, void *, __db_noop_args **));
+int __db_pg_alloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __db_pg_alloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_read __P((DB_ENV *, void *, __db_pg_alloc_args **));
+int __db_pg_free_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, const DBT *, db_pgno_t));
+int __db_pg_free_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_read __P((DB_ENV *, void *, __db_pg_free_args **));
+int __db_cksum_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t));
+int __db_cksum_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_read __P((DB_ENV *, void *, __db_cksum_args **));
+int __db_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __db_c_close __P((DBC *));
+int __db_c_destroy __P((DBC *));
+int __db_c_count __P((DBC *, db_recno_t *, u_int32_t));
+int __db_c_del __P((DBC *, u_int32_t));
+int __db_c_dup __P((DBC *, DBC **, u_int32_t));
+int __db_c_idup __P((DBC *, DBC **, u_int32_t));
+int __db_c_newopd __P((DBC *, db_pgno_t, DBC *, DBC **));
+int __db_c_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_c_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_duperr __P((DB *, u_int32_t));
+int __db_c_secondary_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __db_c_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+int __db_c_del_primary __P((DBC *));
+DB *__db_s_first __P((DB *));
+int __db_s_next __P((DB **));
+int __db_s_done __P((DB *));
+u_int32_t __db_partsize __P((u_int32_t, DBT *));
+int __db_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __db_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+void __db_metaswap __P((PAGE *));
+int __db_byteswap __P((DB_ENV *, DB *, db_pgno_t, PAGE *, size_t, int));
+int __db_dispatch __P((DB_ENV *, int (**)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *)), size_t, DBT *, DB_LSN *, db_recops, void *));
+int __db_add_recovery __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+int __db_txnlist_init __P((DB_ENV *, u_int32_t, u_int32_t, DB_LSN *, void *));
+int __db_txnlist_add __P((DB_ENV *, void *, u_int32_t, int32_t, DB_LSN *));
+int __db_txnlist_remove __P((DB_ENV *, void *, u_int32_t));
+void __db_txnlist_ckp __P((DB_ENV *, void *, DB_LSN *));
+void __db_txnlist_end __P((DB_ENV *, void *));
+int __db_txnlist_find __P((DB_ENV *, void *, u_int32_t));
+int __db_txnlist_update __P((DB_ENV *, void *, u_int32_t, u_int32_t, DB_LSN *));
+int __db_txnlist_gen __P((DB_ENV *, void *, int, u_int32_t, u_int32_t));
+int __db_txnlist_lsnadd __P((DB_ENV *, void *, DB_LSN *, u_int32_t));
+int __db_txnlist_lsninit __P((DB_ENV *, DB_TXNHEAD *, DB_LSN *));
+int __db_add_limbo __P((DB_ENV *, void *, int32_t, db_pgno_t, int32_t));
+int __db_do_the_limbo __P((DB_ENV *, DB_TXN *, DB_TXN *, DB_TXNHEAD *));
+void __db_txnlist_print __P((void *));
+int __db_ditem __P((DBC *, PAGE *, u_int32_t, u_int32_t));
+int __db_pitem __P((DBC *, PAGE *, u_int32_t, u_int32_t, DBT *, DBT *));
+int __db_relink __P((DBC *, u_int32_t, PAGE *, PAGE **, int));
+int __db_cursorchk __P((const DB *, u_int32_t));
+int __db_ccountchk __P((const DB *, u_int32_t, int));
+int __db_cdelchk __P((const DB *, u_int32_t, int));
+int __db_cgetchk __P((const DB *, DBT *, DBT *, u_int32_t, int));
+int __db_cputchk __P((const DB *, const DBT *, DBT *, u_int32_t, int));
+int __db_pgetchk __P((const DB *, const DBT *, DBT *, DBT *, u_int32_t));
+int __db_cpgetchk __P((const DB *, DBT *, DBT *, DBT *, u_int32_t, int));
+int __db_delchk __P((const DB *, DBT *, u_int32_t));
+int __db_getchk __P((const DB *, const DBT *, DBT *, u_int32_t));
+int __db_joinchk __P((const DB *, DBC * const *, u_int32_t));
+int __db_joingetchk __P((const DB *, DBT *, u_int32_t));
+int __db_putchk __P((const DB *, DBT *, const DBT *, u_int32_t, int));
+int __db_statchk __P((const DB *, u_int32_t));
+int __db_syncchk __P((const DB *, u_int32_t));
+int __db_secondary_corrupt __P((DB *));
+int __db_associatechk __P((DB *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __db_txn_auto __P((DB *, DB_TXN **));
+int __db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __db_new __P((DBC *, u_int32_t, PAGE **));
+int __db_free __P((DBC *, PAGE *));
+int __db_lprint __P((DBC *));
+int __db_lget __P((DBC *, int, db_pgno_t, db_lockmode_t, u_int32_t, DB_LOCK *));
+int __db_lput __P((DBC *, DB_LOCK *));
+int __dbh_am_chk __P((DB *, u_int32_t));
+int __db_set_lorder __P((DB *, int));
+int __db_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __db_dbopen __P((DB *, DB_TXN *, const char *, const char *, u_int32_t, int, db_pgno_t));
+int __db_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __db_init_subdb __P((DB *, DB *, const char *, DB_TXN *));
+int __db_chk_meta __P((DB_ENV *, DB *, DBMETA *, int));
+int __db_meta_setup __P((DB_ENV *, DB *, const char *, DBMETA *, u_int32_t, int));
+int __db_goff __P((DB *, DBT *, u_int32_t, db_pgno_t, void **, u_int32_t *));
+int __db_poff __P((DBC *, const DBT *, db_pgno_t *));
+int __db_ovref __P((DBC *, db_pgno_t, int32_t));
+int __db_doff __P((DBC *, db_pgno_t));
+int __db_moff __P((DB *, const DBT *, db_pgno_t, u_int32_t, int (*)(DB *, const DBT *, const DBT *), int *));
+int __db_vrfy_overflow __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __db_vrfy_ovfl_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t, u_int32_t));
+int __db_safe_goff __P((DB *, VRFY_DBINFO *, db_pgno_t, DBT *, void **, u_int32_t));
+void __db_loadme __P((void));
+int __db_dump __P((DB *, char *, char *));
+void __db_inmemdbflags __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)));
+int __db_prnpage __P((DB *, db_pgno_t, FILE *));
+int __db_prpage __P((DB *, PAGE *, FILE *, u_int32_t));
+void __db_pr __P((u_int8_t *, u_int32_t, FILE *));
+int __db_prdbt __P((DBT *, int, const char *, void *, int (*)(void *, const void *), int, VRFY_DBINFO *));
+void __db_prflags __P((u_int32_t, const FN *, void *));
+const char * __db_dbtype_to_string __P((DBTYPE));
+int __db_prheader __P((DB *, char *, int, int, void *, int (*)(void *, const void *), VRFY_DBINFO *, db_pgno_t));
+int __db_prfooter __P((void *, int (*)(void *, const void *)));
+int __db_addrem_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_big_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_ovref_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_relink_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_debug_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_noop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_alloc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_pg_free_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_cksum_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __db_traverse_big __P((DB *, db_pgno_t, int (*)(DB *, PAGE *, void *, int *), void *));
+int __db_reclaim_callback __P((DB *, PAGE *, void *, int *));
+int __db_truncate_callback __P((DB *, PAGE *, void *, int *));
+int __dbenv_dbremove __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t));
+int __db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __db_remove_i __P((DB *, DB_TXN *, const char *, const char *));
+int __dbenv_dbrename __P((DB_ENV *, DB_TXN *, const char *, const char *, const char *, u_int32_t));
+int __db_rename __P((DB *, const char *, const char *, const char *, u_int32_t));
+int __db_rename_i __P((DB *, DB_TXN *, const char *, const char *, const char *));
+int __db_ret __P((DB *, PAGE *, u_int32_t, DBT *, void **, u_int32_t *));
+int __db_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t *));
+int __db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+int __db_upgrade __P((DB *, const char *, u_int32_t));
+int __db_lastpgno __P((DB *, char *, DB_FH *, db_pgno_t *));
+int __db_31_offdup __P((DB *, char *, DB_FH *, int, db_pgno_t *));
+int __db_verify __P((DB *, const char *, const char *, FILE *, u_int32_t));
+int __db_verify_callback __P((void *, const void *));
+int __db_verify_internal __P((DB *, const char *, const char *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_datapage __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __db_vrfy_meta __P((DB *, VRFY_DBINFO *, DBMETA *, db_pgno_t, u_int32_t));
+void __db_vrfy_struct_feedback __P((DB *, VRFY_DBINFO *));
+int __db_vrfy_inpitem __P((DB *, PAGE *, db_pgno_t, u_int32_t, int, u_int32_t, u_int32_t *, u_int32_t *));
+int __db_vrfy_duptype __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __db_salvage_duptree __P((DB *, VRFY_DBINFO *, db_pgno_t, DBT *, void *, int (*)(void *, const void *), u_int32_t));
+int __db_vrfy_dbinfo_create __P((DB_ENV *, u_int32_t, VRFY_DBINFO **));
+int __db_vrfy_dbinfo_destroy __P((DB_ENV *, VRFY_DBINFO *));
+int __db_vrfy_getpageinfo __P((VRFY_DBINFO *, db_pgno_t, VRFY_PAGEINFO **));
+int __db_vrfy_putpageinfo __P((DB_ENV *, VRFY_DBINFO *, VRFY_PAGEINFO *));
+int __db_vrfy_pgset __P((DB_ENV *, u_int32_t, DB **));
+int __db_vrfy_pgset_get __P((DB *, db_pgno_t, int *));
+int __db_vrfy_pgset_inc __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_dec __P((DB *, db_pgno_t));
+int __db_vrfy_pgset_next __P((DBC *, db_pgno_t *));
+int __db_vrfy_childcursor __P((VRFY_DBINFO *, DBC **));
+int __db_vrfy_childput __P((VRFY_DBINFO *, db_pgno_t, VRFY_CHILDINFO *));
+int __db_vrfy_ccset __P((DBC *, db_pgno_t, VRFY_CHILDINFO **));
+int __db_vrfy_ccnext __P((DBC *, VRFY_CHILDINFO **));
+int __db_vrfy_ccclose __P((DBC *));
+int __db_salvage_init __P((VRFY_DBINFO *));
+void __db_salvage_destroy __P((VRFY_DBINFO *));
+int __db_salvage_getnext __P((VRFY_DBINFO *, db_pgno_t *, u_int32_t *));
+int __db_salvage_isdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markdone __P((VRFY_DBINFO *, db_pgno_t));
+int __db_salvage_markneeded __P((VRFY_DBINFO *, db_pgno_t, u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_db_ext_h_ */
diff --git a/bdb/dbinc_auto/db_server.h b/bdb/dbinc_auto/db_server.h
new file mode 100644
index 00000000000..3409eed1a9f
--- /dev/null
+++ b/bdb/dbinc_auto/db_server.h
@@ -0,0 +1,1006 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef _DB_SERVER_H_RPCGEN
+#define _DB_SERVER_H_RPCGEN
+
+
+struct __env_cachesize_msg {
+ u_int dbenvcl_id;
+ u_int gbytes;
+ u_int bytes;
+ u_int ncache;
+};
+typedef struct __env_cachesize_msg __env_cachesize_msg;
+
+struct __env_cachesize_reply {
+ int status;
+};
+typedef struct __env_cachesize_reply __env_cachesize_reply;
+
+struct __env_close_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+};
+typedef struct __env_close_msg __env_close_msg;
+
+struct __env_close_reply {
+ int status;
+};
+typedef struct __env_close_reply __env_close_reply;
+
+struct __env_create_msg {
+ u_int timeout;
+};
+typedef struct __env_create_msg __env_create_msg;
+
+struct __env_create_reply {
+ int status;
+ u_int envcl_id;
+};
+typedef struct __env_create_reply __env_create_reply;
+
+struct __env_dbremove_msg {
+ u_int dbenvcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int flags;
+};
+typedef struct __env_dbremove_msg __env_dbremove_msg;
+
+struct __env_dbremove_reply {
+ int status;
+};
+typedef struct __env_dbremove_reply __env_dbremove_reply;
+
+struct __env_dbrename_msg {
+ u_int dbenvcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int flags;
+};
+typedef struct __env_dbrename_msg __env_dbrename_msg;
+
+struct __env_dbrename_reply {
+ int status;
+};
+typedef struct __env_dbrename_reply __env_dbrename_reply;
+
+struct __env_encrypt_msg {
+ u_int dbenvcl_id;
+ char *passwd;
+ u_int flags;
+};
+typedef struct __env_encrypt_msg __env_encrypt_msg;
+
+struct __env_encrypt_reply {
+ int status;
+};
+typedef struct __env_encrypt_reply __env_encrypt_reply;
+
+struct __env_flags_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+ u_int onoff;
+};
+typedef struct __env_flags_msg __env_flags_msg;
+
+struct __env_flags_reply {
+ int status;
+};
+typedef struct __env_flags_reply __env_flags_reply;
+
+struct __env_open_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __env_open_msg __env_open_msg;
+
+struct __env_open_reply {
+ int status;
+ u_int envcl_id;
+};
+typedef struct __env_open_reply __env_open_reply;
+
+struct __env_remove_msg {
+ u_int dbenvcl_id;
+ char *home;
+ u_int flags;
+};
+typedef struct __env_remove_msg __env_remove_msg;
+
+struct __env_remove_reply {
+ int status;
+};
+typedef struct __env_remove_reply __env_remove_reply;
+
+struct __txn_abort_msg {
+ u_int txnpcl_id;
+};
+typedef struct __txn_abort_msg __txn_abort_msg;
+
+struct __txn_abort_reply {
+ int status;
+};
+typedef struct __txn_abort_reply __txn_abort_reply;
+
+struct __txn_begin_msg {
+ u_int dbenvcl_id;
+ u_int parentcl_id;
+ u_int flags;
+};
+typedef struct __txn_begin_msg __txn_begin_msg;
+
+struct __txn_begin_reply {
+ int status;
+ u_int txnidcl_id;
+};
+typedef struct __txn_begin_reply __txn_begin_reply;
+
+struct __txn_commit_msg {
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __txn_commit_msg __txn_commit_msg;
+
+struct __txn_commit_reply {
+ int status;
+};
+typedef struct __txn_commit_reply __txn_commit_reply;
+
+struct __txn_discard_msg {
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __txn_discard_msg __txn_discard_msg;
+
+struct __txn_discard_reply {
+ int status;
+};
+typedef struct __txn_discard_reply __txn_discard_reply;
+
+struct __txn_prepare_msg {
+ u_int txnpcl_id;
+ char gid[128];
+};
+typedef struct __txn_prepare_msg __txn_prepare_msg;
+
+struct __txn_prepare_reply {
+ int status;
+};
+typedef struct __txn_prepare_reply __txn_prepare_reply;
+
+struct __txn_recover_msg {
+ u_int dbenvcl_id;
+ u_int count;
+ u_int flags;
+};
+typedef struct __txn_recover_msg __txn_recover_msg;
+
+struct __txn_recover_reply {
+ int status;
+ struct {
+ u_int txn_len;
+ u_int *txn_val;
+ } txn;
+ struct {
+ u_int gid_len;
+ char *gid_val;
+ } gid;
+ u_int retcount;
+};
+typedef struct __txn_recover_reply __txn_recover_reply;
+
+struct __db_associate_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int sdbpcl_id;
+ u_int flags;
+};
+typedef struct __db_associate_msg __db_associate_msg;
+
+struct __db_associate_reply {
+ int status;
+};
+typedef struct __db_associate_reply __db_associate_reply;
+
+struct __db_bt_maxkey_msg {
+ u_int dbpcl_id;
+ u_int maxkey;
+};
+typedef struct __db_bt_maxkey_msg __db_bt_maxkey_msg;
+
+struct __db_bt_maxkey_reply {
+ int status;
+};
+typedef struct __db_bt_maxkey_reply __db_bt_maxkey_reply;
+
+struct __db_bt_minkey_msg {
+ u_int dbpcl_id;
+ u_int minkey;
+};
+typedef struct __db_bt_minkey_msg __db_bt_minkey_msg;
+
+struct __db_bt_minkey_reply {
+ int status;
+};
+typedef struct __db_bt_minkey_reply __db_bt_minkey_reply;
+
+struct __db_close_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_close_msg __db_close_msg;
+
+struct __db_close_reply {
+ int status;
+};
+typedef struct __db_close_reply __db_close_reply;
+
+struct __db_create_msg {
+ u_int dbenvcl_id;
+ u_int flags;
+};
+typedef struct __db_create_msg __db_create_msg;
+
+struct __db_create_reply {
+ int status;
+ u_int dbcl_id;
+};
+typedef struct __db_create_reply __db_create_reply;
+
+struct __db_del_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_del_msg __db_del_msg;
+
+struct __db_del_reply {
+ int status;
+};
+typedef struct __db_del_reply __db_del_reply;
+
+struct __db_encrypt_msg {
+ u_int dbpcl_id;
+ char *passwd;
+ u_int flags;
+};
+typedef struct __db_encrypt_msg __db_encrypt_msg;
+
+struct __db_encrypt_reply {
+ int status;
+};
+typedef struct __db_encrypt_reply __db_encrypt_reply;
+
+struct __db_extentsize_msg {
+ u_int dbpcl_id;
+ u_int extentsize;
+};
+typedef struct __db_extentsize_msg __db_extentsize_msg;
+
+struct __db_extentsize_reply {
+ int status;
+};
+typedef struct __db_extentsize_reply __db_extentsize_reply;
+
+struct __db_flags_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_flags_msg __db_flags_msg;
+
+struct __db_flags_reply {
+ int status;
+};
+typedef struct __db_flags_reply __db_flags_reply;
+
+struct __db_get_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_get_msg __db_get_msg;
+
+struct __db_get_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __db_get_reply __db_get_reply;
+
+struct __db_h_ffactor_msg {
+ u_int dbpcl_id;
+ u_int ffactor;
+};
+typedef struct __db_h_ffactor_msg __db_h_ffactor_msg;
+
+struct __db_h_ffactor_reply {
+ int status;
+};
+typedef struct __db_h_ffactor_reply __db_h_ffactor_reply;
+
+struct __db_h_nelem_msg {
+ u_int dbpcl_id;
+ u_int nelem;
+};
+typedef struct __db_h_nelem_msg __db_h_nelem_msg;
+
+struct __db_h_nelem_reply {
+ int status;
+};
+typedef struct __db_h_nelem_reply __db_h_nelem_reply;
+
+struct __db_key_range_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int flags;
+};
+typedef struct __db_key_range_msg __db_key_range_msg;
+
+struct __db_key_range_reply {
+ int status;
+ double less;
+ double equal;
+ double greater;
+};
+typedef struct __db_key_range_reply __db_key_range_reply;
+
+struct __db_lorder_msg {
+ u_int dbpcl_id;
+ u_int lorder;
+};
+typedef struct __db_lorder_msg __db_lorder_msg;
+
+struct __db_lorder_reply {
+ int status;
+};
+typedef struct __db_lorder_reply __db_lorder_reply;
+
+struct __db_open_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int type;
+ u_int flags;
+ u_int mode;
+};
+typedef struct __db_open_msg __db_open_msg;
+
+struct __db_open_reply {
+ int status;
+ u_int dbcl_id;
+ u_int type;
+ u_int dbflags;
+ u_int lorder;
+};
+typedef struct __db_open_reply __db_open_reply;
+
+struct __db_pagesize_msg {
+ u_int dbpcl_id;
+ u_int pagesize;
+};
+typedef struct __db_pagesize_msg __db_pagesize_msg;
+
+struct __db_pagesize_reply {
+ int status;
+};
+typedef struct __db_pagesize_reply __db_pagesize_reply;
+
+struct __db_pget_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int skeydlen;
+ u_int skeydoff;
+ u_int skeyulen;
+ u_int skeyflags;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ u_int pkeydlen;
+ u_int pkeydoff;
+ u_int pkeyulen;
+ u_int pkeyflags;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_pget_msg __db_pget_msg;
+
+struct __db_pget_reply {
+ int status;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __db_pget_reply __db_pget_reply;
+
+struct __db_put_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __db_put_msg __db_put_msg;
+
+struct __db_put_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __db_put_reply __db_put_reply;
+
+struct __db_re_delim_msg {
+ u_int dbpcl_id;
+ u_int delim;
+};
+typedef struct __db_re_delim_msg __db_re_delim_msg;
+
+struct __db_re_delim_reply {
+ int status;
+};
+typedef struct __db_re_delim_reply __db_re_delim_reply;
+
+struct __db_re_len_msg {
+ u_int dbpcl_id;
+ u_int len;
+};
+typedef struct __db_re_len_msg __db_re_len_msg;
+
+struct __db_re_len_reply {
+ int status;
+};
+typedef struct __db_re_len_reply __db_re_len_reply;
+
+struct __db_re_pad_msg {
+ u_int dbpcl_id;
+ u_int pad;
+};
+typedef struct __db_re_pad_msg __db_re_pad_msg;
+
+struct __db_re_pad_reply {
+ int status;
+};
+typedef struct __db_re_pad_reply __db_re_pad_reply;
+
+struct __db_remove_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int flags;
+};
+typedef struct __db_remove_msg __db_remove_msg;
+
+struct __db_remove_reply {
+ int status;
+};
+typedef struct __db_remove_reply __db_remove_reply;
+
+struct __db_rename_msg {
+ u_int dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int flags;
+};
+typedef struct __db_rename_msg __db_rename_msg;
+
+struct __db_rename_reply {
+ int status;
+};
+typedef struct __db_rename_reply __db_rename_reply;
+
+struct __db_stat_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_stat_msg __db_stat_msg;
+
+struct __db_stat_reply {
+ int status;
+ struct {
+ u_int stats_len;
+ u_int *stats_val;
+ } stats;
+};
+typedef struct __db_stat_reply __db_stat_reply;
+
+struct __db_sync_msg {
+ u_int dbpcl_id;
+ u_int flags;
+};
+typedef struct __db_sync_msg __db_sync_msg;
+
+struct __db_sync_reply {
+ int status;
+};
+typedef struct __db_sync_reply __db_sync_reply;
+
+struct __db_truncate_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __db_truncate_msg __db_truncate_msg;
+
+struct __db_truncate_reply {
+ int status;
+ u_int count;
+};
+typedef struct __db_truncate_reply __db_truncate_reply;
+
+struct __db_cursor_msg {
+ u_int dbpcl_id;
+ u_int txnpcl_id;
+ u_int flags;
+};
+typedef struct __db_cursor_msg __db_cursor_msg;
+
+struct __db_cursor_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_cursor_reply __db_cursor_reply;
+
+struct __db_join_msg {
+ u_int dbpcl_id;
+ struct {
+ u_int curs_len;
+ u_int *curs_val;
+ } curs;
+ u_int flags;
+};
+typedef struct __db_join_msg __db_join_msg;
+
+struct __db_join_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __db_join_reply __db_join_reply;
+
+struct __dbc_close_msg {
+ u_int dbccl_id;
+};
+typedef struct __dbc_close_msg __dbc_close_msg;
+
+struct __dbc_close_reply {
+ int status;
+};
+typedef struct __dbc_close_reply __dbc_close_reply;
+
+struct __dbc_count_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_count_msg __dbc_count_msg;
+
+struct __dbc_count_reply {
+ int status;
+ u_int dupcount;
+};
+typedef struct __dbc_count_reply __dbc_count_reply;
+
+struct __dbc_del_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_del_msg __dbc_del_msg;
+
+struct __dbc_del_reply {
+ int status;
+};
+typedef struct __dbc_del_reply __dbc_del_reply;
+
+struct __dbc_dup_msg {
+ u_int dbccl_id;
+ u_int flags;
+};
+typedef struct __dbc_dup_msg __dbc_dup_msg;
+
+struct __dbc_dup_reply {
+ int status;
+ u_int dbcidcl_id;
+};
+typedef struct __dbc_dup_reply __dbc_dup_reply;
+
+struct __dbc_get_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_get_msg __dbc_get_msg;
+
+struct __dbc_get_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __dbc_get_reply __dbc_get_reply;
+
+struct __dbc_pget_msg {
+ u_int dbccl_id;
+ u_int skeydlen;
+ u_int skeydoff;
+ u_int skeyulen;
+ u_int skeyflags;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ u_int pkeydlen;
+ u_int pkeydoff;
+ u_int pkeyulen;
+ u_int pkeyflags;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_pget_msg __dbc_pget_msg;
+
+struct __dbc_pget_reply {
+ int status;
+ struct {
+ u_int skeydata_len;
+ char *skeydata_val;
+ } skeydata;
+ struct {
+ u_int pkeydata_len;
+ char *pkeydata_val;
+ } pkeydata;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+};
+typedef struct __dbc_pget_reply __dbc_pget_reply;
+
+struct __dbc_put_msg {
+ u_int dbccl_id;
+ u_int keydlen;
+ u_int keydoff;
+ u_int keyulen;
+ u_int keyflags;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+ u_int datadlen;
+ u_int datadoff;
+ u_int dataulen;
+ u_int dataflags;
+ struct {
+ u_int datadata_len;
+ char *datadata_val;
+ } datadata;
+ u_int flags;
+};
+typedef struct __dbc_put_msg __dbc_put_msg;
+
+struct __dbc_put_reply {
+ int status;
+ struct {
+ u_int keydata_len;
+ char *keydata_val;
+ } keydata;
+};
+typedef struct __dbc_put_reply __dbc_put_reply;
+
+#define __DB_env_cachesize ((unsigned long)(1))
+extern __env_cachesize_reply * __db_env_cachesize_4001();
+#define __DB_env_close ((unsigned long)(2))
+extern __env_close_reply * __db_env_close_4001();
+#define __DB_env_create ((unsigned long)(3))
+extern __env_create_reply * __db_env_create_4001();
+#define __DB_env_dbremove ((unsigned long)(4))
+extern __env_dbremove_reply * __db_env_dbremove_4001();
+#define __DB_env_dbrename ((unsigned long)(5))
+extern __env_dbrename_reply * __db_env_dbrename_4001();
+#define __DB_env_encrypt ((unsigned long)(6))
+extern __env_encrypt_reply * __db_env_encrypt_4001();
+#define __DB_env_flags ((unsigned long)(7))
+extern __env_flags_reply * __db_env_flags_4001();
+#define __DB_env_open ((unsigned long)(8))
+extern __env_open_reply * __db_env_open_4001();
+#define __DB_env_remove ((unsigned long)(9))
+extern __env_remove_reply * __db_env_remove_4001();
+#define __DB_txn_abort ((unsigned long)(10))
+extern __txn_abort_reply * __db_txn_abort_4001();
+#define __DB_txn_begin ((unsigned long)(11))
+extern __txn_begin_reply * __db_txn_begin_4001();
+#define __DB_txn_commit ((unsigned long)(12))
+extern __txn_commit_reply * __db_txn_commit_4001();
+#define __DB_txn_discard ((unsigned long)(13))
+extern __txn_discard_reply * __db_txn_discard_4001();
+#define __DB_txn_prepare ((unsigned long)(14))
+extern __txn_prepare_reply * __db_txn_prepare_4001();
+#define __DB_txn_recover ((unsigned long)(15))
+extern __txn_recover_reply * __db_txn_recover_4001();
+#define __DB_db_associate ((unsigned long)(16))
+extern __db_associate_reply * __db_db_associate_4001();
+#define __DB_db_bt_maxkey ((unsigned long)(17))
+extern __db_bt_maxkey_reply * __db_db_bt_maxkey_4001();
+#define __DB_db_bt_minkey ((unsigned long)(18))
+extern __db_bt_minkey_reply * __db_db_bt_minkey_4001();
+#define __DB_db_close ((unsigned long)(19))
+extern __db_close_reply * __db_db_close_4001();
+#define __DB_db_create ((unsigned long)(20))
+extern __db_create_reply * __db_db_create_4001();
+#define __DB_db_del ((unsigned long)(21))
+extern __db_del_reply * __db_db_del_4001();
+#define __DB_db_encrypt ((unsigned long)(22))
+extern __db_encrypt_reply * __db_db_encrypt_4001();
+#define __DB_db_extentsize ((unsigned long)(23))
+extern __db_extentsize_reply * __db_db_extentsize_4001();
+#define __DB_db_flags ((unsigned long)(24))
+extern __db_flags_reply * __db_db_flags_4001();
+#define __DB_db_get ((unsigned long)(25))
+extern __db_get_reply * __db_db_get_4001();
+#define __DB_db_h_ffactor ((unsigned long)(26))
+extern __db_h_ffactor_reply * __db_db_h_ffactor_4001();
+#define __DB_db_h_nelem ((unsigned long)(27))
+extern __db_h_nelem_reply * __db_db_h_nelem_4001();
+#define __DB_db_key_range ((unsigned long)(28))
+extern __db_key_range_reply * __db_db_key_range_4001();
+#define __DB_db_lorder ((unsigned long)(29))
+extern __db_lorder_reply * __db_db_lorder_4001();
+#define __DB_db_open ((unsigned long)(30))
+extern __db_open_reply * __db_db_open_4001();
+#define __DB_db_pagesize ((unsigned long)(31))
+extern __db_pagesize_reply * __db_db_pagesize_4001();
+#define __DB_db_pget ((unsigned long)(32))
+extern __db_pget_reply * __db_db_pget_4001();
+#define __DB_db_put ((unsigned long)(33))
+extern __db_put_reply * __db_db_put_4001();
+#define __DB_db_re_delim ((unsigned long)(34))
+extern __db_re_delim_reply * __db_db_re_delim_4001();
+#define __DB_db_re_len ((unsigned long)(35))
+extern __db_re_len_reply * __db_db_re_len_4001();
+#define __DB_db_re_pad ((unsigned long)(36))
+extern __db_re_pad_reply * __db_db_re_pad_4001();
+#define __DB_db_remove ((unsigned long)(37))
+extern __db_remove_reply * __db_db_remove_4001();
+#define __DB_db_rename ((unsigned long)(38))
+extern __db_rename_reply * __db_db_rename_4001();
+#define __DB_db_stat ((unsigned long)(39))
+extern __db_stat_reply * __db_db_stat_4001();
+#define __DB_db_sync ((unsigned long)(40))
+extern __db_sync_reply * __db_db_sync_4001();
+#define __DB_db_truncate ((unsigned long)(41))
+extern __db_truncate_reply * __db_db_truncate_4001();
+#define __DB_db_cursor ((unsigned long)(42))
+extern __db_cursor_reply * __db_db_cursor_4001();
+#define __DB_db_join ((unsigned long)(43))
+extern __db_join_reply * __db_db_join_4001();
+#define __DB_dbc_close ((unsigned long)(44))
+extern __dbc_close_reply * __db_dbc_close_4001();
+#define __DB_dbc_count ((unsigned long)(45))
+extern __dbc_count_reply * __db_dbc_count_4001();
+#define __DB_dbc_del ((unsigned long)(46))
+extern __dbc_del_reply * __db_dbc_del_4001();
+#define __DB_dbc_dup ((unsigned long)(47))
+extern __dbc_dup_reply * __db_dbc_dup_4001();
+#define __DB_dbc_get ((unsigned long)(48))
+extern __dbc_get_reply * __db_dbc_get_4001();
+#define __DB_dbc_pget ((unsigned long)(49))
+extern __dbc_pget_reply * __db_dbc_pget_4001();
+#define __DB_dbc_put ((unsigned long)(50))
+extern __dbc_put_reply * __db_dbc_put_4001();
+extern int db_rpc_serverprog_4001_freeresult();
+
+/* the xdr functions */
+extern bool_t xdr___env_cachesize_msg();
+extern bool_t xdr___env_cachesize_reply();
+extern bool_t xdr___env_close_msg();
+extern bool_t xdr___env_close_reply();
+extern bool_t xdr___env_create_msg();
+extern bool_t xdr___env_create_reply();
+extern bool_t xdr___env_dbremove_msg();
+extern bool_t xdr___env_dbremove_reply();
+extern bool_t xdr___env_dbrename_msg();
+extern bool_t xdr___env_dbrename_reply();
+extern bool_t xdr___env_encrypt_msg();
+extern bool_t xdr___env_encrypt_reply();
+extern bool_t xdr___env_flags_msg();
+extern bool_t xdr___env_flags_reply();
+extern bool_t xdr___env_open_msg();
+extern bool_t xdr___env_open_reply();
+extern bool_t xdr___env_remove_msg();
+extern bool_t xdr___env_remove_reply();
+extern bool_t xdr___txn_abort_msg();
+extern bool_t xdr___txn_abort_reply();
+extern bool_t xdr___txn_begin_msg();
+extern bool_t xdr___txn_begin_reply();
+extern bool_t xdr___txn_commit_msg();
+extern bool_t xdr___txn_commit_reply();
+extern bool_t xdr___txn_discard_msg();
+extern bool_t xdr___txn_discard_reply();
+extern bool_t xdr___txn_prepare_msg();
+extern bool_t xdr___txn_prepare_reply();
+extern bool_t xdr___txn_recover_msg();
+extern bool_t xdr___txn_recover_reply();
+extern bool_t xdr___db_associate_msg();
+extern bool_t xdr___db_associate_reply();
+extern bool_t xdr___db_bt_maxkey_msg();
+extern bool_t xdr___db_bt_maxkey_reply();
+extern bool_t xdr___db_bt_minkey_msg();
+extern bool_t xdr___db_bt_minkey_reply();
+extern bool_t xdr___db_close_msg();
+extern bool_t xdr___db_close_reply();
+extern bool_t xdr___db_create_msg();
+extern bool_t xdr___db_create_reply();
+extern bool_t xdr___db_del_msg();
+extern bool_t xdr___db_del_reply();
+extern bool_t xdr___db_encrypt_msg();
+extern bool_t xdr___db_encrypt_reply();
+extern bool_t xdr___db_extentsize_msg();
+extern bool_t xdr___db_extentsize_reply();
+extern bool_t xdr___db_flags_msg();
+extern bool_t xdr___db_flags_reply();
+extern bool_t xdr___db_get_msg();
+extern bool_t xdr___db_get_reply();
+extern bool_t xdr___db_h_ffactor_msg();
+extern bool_t xdr___db_h_ffactor_reply();
+extern bool_t xdr___db_h_nelem_msg();
+extern bool_t xdr___db_h_nelem_reply();
+extern bool_t xdr___db_key_range_msg();
+extern bool_t xdr___db_key_range_reply();
+extern bool_t xdr___db_lorder_msg();
+extern bool_t xdr___db_lorder_reply();
+extern bool_t xdr___db_open_msg();
+extern bool_t xdr___db_open_reply();
+extern bool_t xdr___db_pagesize_msg();
+extern bool_t xdr___db_pagesize_reply();
+extern bool_t xdr___db_pget_msg();
+extern bool_t xdr___db_pget_reply();
+extern bool_t xdr___db_put_msg();
+extern bool_t xdr___db_put_reply();
+extern bool_t xdr___db_re_delim_msg();
+extern bool_t xdr___db_re_delim_reply();
+extern bool_t xdr___db_re_len_msg();
+extern bool_t xdr___db_re_len_reply();
+extern bool_t xdr___db_re_pad_msg();
+extern bool_t xdr___db_re_pad_reply();
+extern bool_t xdr___db_remove_msg();
+extern bool_t xdr___db_remove_reply();
+extern bool_t xdr___db_rename_msg();
+extern bool_t xdr___db_rename_reply();
+extern bool_t xdr___db_stat_msg();
+extern bool_t xdr___db_stat_reply();
+extern bool_t xdr___db_sync_msg();
+extern bool_t xdr___db_sync_reply();
+extern bool_t xdr___db_truncate_msg();
+extern bool_t xdr___db_truncate_reply();
+extern bool_t xdr___db_cursor_msg();
+extern bool_t xdr___db_cursor_reply();
+extern bool_t xdr___db_join_msg();
+extern bool_t xdr___db_join_reply();
+extern bool_t xdr___dbc_close_msg();
+extern bool_t xdr___dbc_close_reply();
+extern bool_t xdr___dbc_count_msg();
+extern bool_t xdr___dbc_count_reply();
+extern bool_t xdr___dbc_del_msg();
+extern bool_t xdr___dbc_del_reply();
+extern bool_t xdr___dbc_dup_msg();
+extern bool_t xdr___dbc_dup_reply();
+extern bool_t xdr___dbc_get_msg();
+extern bool_t xdr___dbc_get_reply();
+extern bool_t xdr___dbc_pget_msg();
+extern bool_t xdr___dbc_pget_reply();
+extern bool_t xdr___dbc_put_msg();
+extern bool_t xdr___dbc_put_reply();
+
+#endif /* !_DB_SERVER_H_RPCGEN */
diff --git a/bdb/dbinc_auto/dbreg_auto.h b/bdb/dbinc_auto/dbreg_auto.h
new file mode 100644
index 00000000000..4d7d4a91b45
--- /dev/null
+++ b/bdb/dbinc_auto/dbreg_auto.h
@@ -0,0 +1,19 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __dbreg_AUTO_H
+#define __dbreg_AUTO_H
+#define DB___dbreg_register 2
+typedef struct ___dbreg_register_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT name;
+ DBT uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+ u_int32_t id;
+} __dbreg_register_args;
+
+#endif
diff --git a/bdb/dbinc_auto/dbreg_ext.h b/bdb/dbinc_auto/dbreg_ext.h
new file mode 100644
index 00000000000..eda26206d86
--- /dev/null
+++ b/bdb/dbinc_auto/dbreg_ext.h
@@ -0,0 +1,43 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _dbreg_ext_h_
+#define _dbreg_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __dbreg_setup __P((DB *, const char *, u_int32_t));
+int __dbreg_teardown __P((DB *));
+int __dbreg_new_id __P((DB *, DB_TXN *));
+int __dbreg_assign_id __P((DB *, int32_t));
+int __dbreg_revoke_id __P((DB *, int));
+int __dbreg_close_id __P((DB *, DB_TXN *));
+int __dbreg_register_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *, int32_t, DBTYPE, db_pgno_t, u_int32_t));
+int __dbreg_register_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_register_read __P((DB_ENV *, void *, __dbreg_register_args **));
+int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __dbreg_register_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+int __dbreg_open_files __P((DB_ENV *));
+int __dbreg_close_files __P((DB_ENV *));
+int __dbreg_nofiles __P((DB_ENV *));
+int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int));
+int __dbreg_id_to_db_int __P((DB_ENV *, DB_TXN *, DB **, int32_t, int, int));
+int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **));
+int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **));
+int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **));
+int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+int __dbreg_lazy_id __P((DB *));
+int __dbreg_push_id __P((DB_ENV *, int32_t));
+int __dbreg_pop_id __P((DB_ENV *, int32_t *));
+int __dbreg_pluck_id __P((DB_ENV *, int32_t));
+void __dbreg_print_dblist __P((DB_ENV *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_dbreg_ext_h_ */
diff --git a/bdb/dbinc_auto/env_ext.h b/bdb/dbinc_auto/env_ext.h
new file mode 100644
index 00000000000..4bd0eee4a83
--- /dev/null
+++ b/bdb/dbinc_auto/env_ext.h
@@ -0,0 +1,39 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _env_ext_h_
+#define _env_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __db_shalloc_init __P((void *, size_t));
+int __db_shalloc_size __P((size_t, size_t));
+int __db_shalloc __P((void *, size_t, size_t, void *));
+void __db_shalloc_free __P((void *, void *));
+size_t __db_shsizeof __P((void *));
+void __db_shalloc_dump __P((void *, FILE *));
+int __db_tablesize __P((u_int32_t));
+void __db_hashinit __P((void *, u_int32_t));
+int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+int __db_overwrite __P((DB_ENV *, const char *));
+int __db_mi_env __P((DB_ENV *, const char *));
+int __db_mi_open __P((DB_ENV *, const char *, int));
+int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+int __dbenv_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbenv_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbenv_close __P((DB_ENV *, u_int32_t));
+int __db_appname __P((DB_ENV *, APPNAME, const char *, u_int32_t, DB_FH *, char **));
+int __db_home __P((DB_ENV *, const char *, u_int32_t));
+int __db_apprec __P((DB_ENV *, DB_LSN *, u_int32_t));
+int __env_openfiles __P((DB_ENV *, DB_LOGC *, void *, DBT *, DB_LSN *, DB_LSN *, double, int));
+int __db_e_attach __P((DB_ENV *, u_int32_t *));
+int __db_e_detach __P((DB_ENV *, int));
+int __db_e_remove __P((DB_ENV *, u_int32_t));
+int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *, u_int32_t));
+int __db_r_attach __P((DB_ENV *, REGINFO *, size_t));
+int __db_r_detach __P((DB_ENV *, REGINFO *, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_env_ext_h_ */
diff --git a/bdb/dbinc_auto/ext_185_def.in b/bdb/dbinc_auto/ext_185_def.in
new file mode 100644
index 00000000000..8da68a8df9d
--- /dev/null
+++ b/bdb/dbinc_auto/ext_185_def.in
@@ -0,0 +1,12 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_185_DEF_IN_
+#define _DB_EXT_185_DEF_IN_
+
+#ifdef _DB185_INT_H_
+#define __db185_open __db185_open@DB_VERSION_UNIQUE_NAME@
+#else
+#define __db185_open __db185_open@DB_VERSION_UNIQUE_NAME@
+#endif
+
+#endif /* !_DB_EXT_185_DEF_IN_ */
diff --git a/bdb/dbinc_auto/ext_185_prot.in b/bdb/dbinc_auto/ext_185_prot.in
new file mode 100644
index 00000000000..dfd8d3d476e
--- /dev/null
+++ b/bdb/dbinc_auto/ext_185_prot.in
@@ -0,0 +1,19 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_185_PROT_IN_
+#define _DB_EXT_185_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifdef _DB185_INT_H_
+DB185 *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+#else
+DB *__db185_open __P((const char *, int, int, DBTYPE, const void *));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_185_PROT_IN_ */
diff --git a/bdb/dbinc_auto/ext_def.in b/bdb/dbinc_auto/ext_def.in
new file mode 100644
index 00000000000..7bef2465645
--- /dev/null
+++ b/bdb/dbinc_auto/ext_def.in
@@ -0,0 +1,61 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_DEF_IN_
+#define _DB_EXT_DEF_IN_
+
+#define db_create db_create@DB_VERSION_UNIQUE_NAME@
+#define db_strerror db_strerror@DB_VERSION_UNIQUE_NAME@
+#define db_env_create db_env_create@DB_VERSION_UNIQUE_NAME@
+#define db_version db_version@DB_VERSION_UNIQUE_NAME@
+#define log_compare log_compare@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_close db_env_set_func_close@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_dirfree db_env_set_func_dirfree@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_dirlist db_env_set_func_dirlist@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_exists db_env_set_func_exists@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_free db_env_set_func_free@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_fsync db_env_set_func_fsync@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_ioinfo db_env_set_func_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_malloc db_env_set_func_malloc@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_map db_env_set_func_map@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_open db_env_set_func_open@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_read db_env_set_func_read@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_realloc db_env_set_func_realloc@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_rename db_env_set_func_rename@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_seek db_env_set_func_seek@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_sleep db_env_set_func_sleep@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_unlink db_env_set_func_unlink@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_unmap db_env_set_func_unmap@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_write db_env_set_func_write@DB_VERSION_UNIQUE_NAME@
+#define db_env_set_func_yield db_env_set_func_yield@DB_VERSION_UNIQUE_NAME@
+#if DB_DBM_HSEARCH != 0
+#define __db_ndbm_clearerr __db_ndbm_clearerr@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_close __db_ndbm_close@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_delete __db_ndbm_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_dirfno __db_ndbm_dirfno@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_error __db_ndbm_error@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_fetch __db_ndbm_fetch@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_firstkey __db_ndbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_nextkey __db_ndbm_nextkey@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_open __db_ndbm_open@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_pagfno __db_ndbm_pagfno@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_rdonly __db_ndbm_rdonly@DB_VERSION_UNIQUE_NAME@
+#define __db_ndbm_store __db_ndbm_store@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_close __db_dbm_close@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_dbrdonly __db_dbm_dbrdonly@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_delete __db_dbm_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_dirf __db_dbm_dirf@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_fetch __db_dbm_fetch@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_firstkey __db_dbm_firstkey@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_init __db_dbm_init@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_nextkey __db_dbm_nextkey@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_pagf __db_dbm_pagf@DB_VERSION_UNIQUE_NAME@
+#define __db_dbm_store __db_dbm_store@DB_VERSION_UNIQUE_NAME@
+#endif
+#if DB_DBM_HSEARCH != 0
+#define __db_hcreate __db_hcreate@DB_VERSION_UNIQUE_NAME@
+#define __db_hsearch __db_hsearch@DB_VERSION_UNIQUE_NAME@
+#define __db_hdestroy __db_hdestroy@DB_VERSION_UNIQUE_NAME@
+#endif
+#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_EXT_DEF_IN_ */
diff --git a/bdb/dbinc_auto/ext_prot.in b/bdb/dbinc_auto/ext_prot.in
new file mode 100644
index 00000000000..42c77a1f763
--- /dev/null
+++ b/bdb/dbinc_auto/ext_prot.in
@@ -0,0 +1,70 @@
+
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_EXT_PROT_IN_
+#define _DB_EXT_PROT_IN_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int db_create __P((DB **, DB_ENV *, u_int32_t));
+char *db_strerror __P((int));
+int db_env_create __P((DB_ENV **, u_int32_t));
+char *db_version __P((int *, int *, int *));
+int log_compare __P((const DB_LSN *, const DB_LSN *));
+int db_env_set_func_close __P((int (*)(int)));
+int db_env_set_func_dirfree __P((void (*)(char **, int)));
+int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
+int db_env_set_func_exists __P((int (*)(const char *, int *)));
+int db_env_set_func_free __P((void (*)(void *)));
+int db_env_set_func_fsync __P((int (*)(int)));
+int db_env_set_func_ioinfo __P((int (*)(const char *, int, u_int32_t *, u_int32_t *, u_int32_t *)));
+int db_env_set_func_malloc __P((void *(*)(size_t)));
+int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
+int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+int db_env_set_func_rename __P((int (*)(const char *, const char *)));
+int db_env_set_func_seek __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+int db_env_set_func_unlink __P((int (*)(const char *)));
+int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
+int db_env_set_func_yield __P((int (*)(void)));
+int txn_abort __P((DB_TXN *));
+int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int txn_commit __P((DB_TXN *, u_int32_t));
+#if DB_DBM_HSEARCH != 0
+int __db_ndbm_clearerr __P((DBM *));
+void __db_ndbm_close __P((DBM *));
+int __db_ndbm_delete __P((DBM *, datum));
+int __db_ndbm_dirfno __P((DBM *));
+int __db_ndbm_error __P((DBM *));
+datum __db_ndbm_fetch __P((DBM *, datum));
+datum __db_ndbm_firstkey __P((DBM *));
+datum __db_ndbm_nextkey __P((DBM *));
+DBM *__db_ndbm_open __P((const char *, int, int));
+int __db_ndbm_pagfno __P((DBM *));
+int __db_ndbm_rdonly __P((DBM *));
+int __db_ndbm_store __P((DBM *, datum, datum, int));
+int __db_dbm_close __P((void));
+int __db_dbm_dbrdonly __P((void));
+int __db_dbm_delete __P((datum));
+int __db_dbm_dirf __P((void));
+datum __db_dbm_fetch __P((datum));
+datum __db_dbm_firstkey __P((void));
+int __db_dbm_init __P((char *));
+datum __db_dbm_nextkey __P((datum));
+int __db_dbm_pagf __P((void));
+int __db_dbm_store __P((datum, datum));
+#endif
+#if DB_DBM_HSEARCH != 0
+int __db_hcreate __P((size_t));
+ENTRY *__db_hsearch __P((ENTRY, ACTION));
+void __db_hdestroy __P((void));
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_DB_EXT_PROT_IN_ */
diff --git a/bdb/dbinc_auto/fileops_auto.h b/bdb/dbinc_auto/fileops_auto.h
new file mode 100644
index 00000000000..ee1f58616ce
--- /dev/null
+++ b/bdb/dbinc_auto/fileops_auto.h
@@ -0,0 +1,60 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __fop_AUTO_H
+#define __fop_AUTO_H
+#define DB___fop_create 143
+typedef struct ___fop_create_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t mode;
+} __fop_create_args;
+
+#define DB___fop_remove 144
+typedef struct ___fop_remove_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ DBT fid;
+ u_int32_t appname;
+} __fop_remove_args;
+
+#define DB___fop_write 145
+typedef struct ___fop_write_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t offset;
+ DBT page;
+ u_int32_t flag;
+} __fop_write_args;
+
+#define DB___fop_rename 146
+typedef struct ___fop_rename_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT oldname;
+ DBT newname;
+ DBT fileid;
+ u_int32_t appname;
+} __fop_rename_args;
+
+#define DB___fop_file_remove 141
+typedef struct ___fop_file_remove_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT real_fid;
+ DBT tmp_fid;
+ DBT name;
+ u_int32_t appname;
+ u_int32_t child;
+} __fop_file_remove_args;
+
+#endif
diff --git a/bdb/dbinc_auto/fileops_ext.h b/bdb/dbinc_auto/fileops_ext.h
new file mode 100644
index 00000000000..66d80e60ad9
--- /dev/null
+++ b/bdb/dbinc_auto/fileops_ext.h
@@ -0,0 +1,52 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _fileops_ext_h_
+#define _fileops_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __fop_create_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t));
+int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **));
+int __fop_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, u_int32_t));
+int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **));
+int __fop_write_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, u_int32_t, u_int32_t, const DBT *, u_int32_t));
+int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **));
+int __fop_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t));
+int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **));
+int __fop_file_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t, u_int32_t));
+int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_read __P((DB_ENV *, void *, __fop_file_remove_args **));
+int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __fop_create __P((DB_ENV *, DB_TXN *, DB_FH *, const char *, APPNAME, int));
+int __fop_remove __P((DB_ENV *, DB_TXN *, u_int8_t *, const char *, APPNAME));
+int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME, DB_FH *, u_int32_t, u_int8_t *, u_int32_t, u_int32_t));
+int __fop_rename __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int8_t *, APPNAME));
+int __fop_create_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_remove_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_write_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_rename_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_file_remove_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __fop_lock_handle __P((DB_ENV *, DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t));
+int __fop_file_setup __P((DB *, DB_TXN *, const char *, int, u_int32_t, u_int32_t *));
+int __fop_subdb_setup __P((DB *, DB_TXN *, const char *, const char *, int, u_int32_t));
+int __fop_remove_setup __P((DB *, DB_TXN *, const char *, u_int32_t));
+int __fop_read_meta __P((DB_ENV *, const char *, u_int8_t *, size_t, DB_FH *, int, u_int32_t));
+int __fop_dummy __P((DB *, DB_TXN *, const char *, const char *, u_int32_t));
+int __fop_dbrename __P((DB *, const char *, const char *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_fileops_ext_h_ */
diff --git a/bdb/dbinc_auto/hash_auto.h b/bdb/dbinc_auto/hash_auto.h
new file mode 100644
index 00000000000..7ec3fb7ef08
--- /dev/null
+++ b/bdb/dbinc_auto/hash_auto.h
@@ -0,0 +1,132 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __ham_AUTO_H
+#define __ham_AUTO_H
+#define DB___ham_insdel 21
+typedef struct ___ham_insdel_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ DBT key;
+ DBT data;
+} __ham_insdel_args;
+
+#define DB___ham_newpage 22
+typedef struct ___ham_newpage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_pgno_t prev_pgno;
+ DB_LSN prevlsn;
+ db_pgno_t new_pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+} __ham_newpage_args;
+
+#define DB___ham_splitdata 24
+typedef struct ___ham_splitdata_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t opcode;
+ db_pgno_t pgno;
+ DBT pageimage;
+ DB_LSN pagelsn;
+} __ham_splitdata_args;
+
+#define DB___ham_replace 25
+typedef struct ___ham_replace_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t ndx;
+ DB_LSN pagelsn;
+ int32_t off;
+ DBT olditem;
+ DBT newitem;
+ u_int32_t makedup;
+} __ham_replace_args;
+
+#define DB___ham_copypage 28
+typedef struct ___ham_copypage_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ DB_LSN pagelsn;
+ db_pgno_t next_pgno;
+ DB_LSN nextlsn;
+ db_pgno_t nnext_pgno;
+ DB_LSN nnextlsn;
+ DBT page;
+} __ham_copypage_args;
+
+#define DB___ham_metagroup 29
+typedef struct ___ham_metagroup_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ u_int32_t bucket;
+ db_pgno_t mmpgno;
+ DB_LSN mmetalsn;
+ db_pgno_t mpgno;
+ DB_LSN metalsn;
+ db_pgno_t pgno;
+ DB_LSN pagelsn;
+ u_int32_t newalloc;
+} __ham_metagroup_args;
+
+#define DB___ham_groupalloc 32
+typedef struct ___ham_groupalloc_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN meta_lsn;
+ db_pgno_t start_pgno;
+ u_int32_t num;
+ db_pgno_t free;
+} __ham_groupalloc_args;
+
+#define DB___ham_curadj 33
+typedef struct ___ham_curadj_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ u_int32_t len;
+ u_int32_t dup_off;
+ int add;
+ int is_dup;
+ u_int32_t order;
+} __ham_curadj_args;
+
+#define DB___ham_chgpg 34
+typedef struct ___ham_chgpg_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_ham_mode mode;
+ db_pgno_t old_pgno;
+ db_pgno_t new_pgno;
+ u_int32_t old_indx;
+ u_int32_t new_indx;
+} __ham_chgpg_args;
+
+#endif
diff --git a/bdb/dbinc_auto/hash_ext.h b/bdb/dbinc_auto/hash_ext.h
new file mode 100644
index 00000000000..1ee2398706f
--- /dev/null
+++ b/bdb/dbinc_auto/hash_ext.h
@@ -0,0 +1,125 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _hash_ext_h_
+#define _hash_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __ham_quick_delete __P((DBC *));
+int __ham_c_init __P((DBC *));
+int __ham_c_count __P((DBC *, db_recno_t *));
+int __ham_c_dup __P((DBC *, DBC *));
+u_int32_t __ham_call_hash __P((DBC *, u_int8_t *, int32_t));
+int __ham_init_dbt __P((DB_ENV *, DBT *, u_int32_t, void **, u_int32_t *));
+int __ham_c_update __P((DBC *, u_int32_t, int, int));
+int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***));
+int __ham_insdel_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, const DBT *, const DBT *));
+int __ham_insdel_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_insdel_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_insdel_read __P((DB_ENV *, void *, __ham_insdel_args **));
+int __ham_newpage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *));
+int __ham_newpage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_read __P((DB_ENV *, void *, __ham_newpage_args **));
+int __ham_splitdata_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, const DBT *, DB_LSN *));
+int __ham_splitdata_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_read __P((DB_ENV *, void *, __ham_splitdata_args **));
+int __ham_replace_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, DB_LSN *, int32_t, const DBT *, const DBT *, u_int32_t));
+int __ham_replace_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_read __P((DB_ENV *, void *, __ham_replace_args **));
+int __ham_copypage_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, const DBT *));
+int __ham_copypage_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_read __P((DB_ENV *, void *, __ham_copypage_args **));
+int __ham_metagroup_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, db_pgno_t, DB_LSN *, u_int32_t));
+int __ham_metagroup_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_read __P((DB_ENV *, void *, __ham_metagroup_args **));
+int __ham_groupalloc_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_pgno_t));
+int __ham_groupalloc_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_read __P((DB_ENV *, void *, __ham_groupalloc_args **));
+int __ham_curadj_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t, u_int32_t, int, int, u_int32_t));
+int __ham_curadj_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_read __P((DB_ENV *, void *, __ham_curadj_args **));
+int __ham_chgpg_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_ham_mode, db_pgno_t, db_pgno_t, u_int32_t, u_int32_t));
+int __ham_chgpg_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_read __P((DB_ENV *, void *, __ham_chgpg_args **));
+int __ham_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
+int __ham_mswap __P((void *));
+int __ham_add_dup __P((DBC *, DBT *, u_int32_t, db_pgno_t *));
+int __ham_dup_convert __P((DBC *));
+int __ham_make_dup __P((DB_ENV *, const DBT *, DBT *d, void **, u_int32_t *));
+void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t));
+void __ham_cprint __P((DBC *));
+u_int32_t __ham_func2 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func3 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func4 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_func5 __P((DB *, const void *, u_int32_t));
+u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+int __ham_get_meta __P((DBC *));
+int __ham_release_meta __P((DBC *));
+int __ham_dirty_meta __P((DBC *));
+int __ham_db_create __P((DB *));
+int __ham_db_close __P((DB *));
+int __ham_open __P((DB *, DB_TXN *, const char * name, db_pgno_t, u_int32_t));
+int __ham_metachk __P((DB *, const char *, HMETA *));
+int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __ham_new_subdb __P((DB *, DB *, DB_TXN *));
+int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_reset __P((DBC *));
+void __ham_item_init __P((DBC *));
+int __ham_item_last __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_first __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_prev __P((DBC *, db_lockmode_t, db_pgno_t *));
+int __ham_item_next __P((DBC *, db_lockmode_t, db_pgno_t *));
+void __ham_putitem __P((DB *, PAGE *p, const DBT *, int));
+void __ham_reputpair __P((DB *, PAGE *, u_int32_t, const DBT *, const DBT *));
+int __ham_del_pair __P((DBC *, int));
+int __ham_replpair __P((DBC *, DBT *, u_int32_t));
+void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t, int32_t, int32_t, DBT *));
+int __ham_split_page __P((DBC *, u_int32_t, u_int32_t));
+int __ham_add_el __P((DBC *, const DBT *, const DBT *, int));
+void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *));
+int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
+int __ham_get_cpage __P((DBC *, db_lockmode_t));
+int __ham_next_cpage __P((DBC *, db_pgno_t, int));
+int __ham_lock_bucket __P((DBC *, db_lockmode_t));
+void __ham_dpair __P((DB *, PAGE *, u_int32_t));
+int __ham_insdel_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_newpage_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_replace_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_splitdata_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_copypage_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_metagroup_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_groupalloc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_curadj_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_chgpg_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __ham_reclaim __P((DB *, DB_TXN *txn));
+int __ham_truncate __P((DB *, DB_TXN *txn, u_int32_t *));
+int __ham_stat __P((DB *, void *, u_int32_t));
+int __ham_traverse __P((DBC *, db_lockmode_t, int (*)(DB *, PAGE *, void *, int *), void *, int));
+int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
+int __ham_30_sizefix __P((DB *, DB_FH *, char *, u_int8_t *));
+int __ham_31_hashmeta __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_31_hash __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
+int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *, db_pgno_t, u_int32_t));
+int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t, u_int32_t));
+int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t, u_int32_t));
+int __ham_vrfy_hashing __P((DB *, u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t, u_int32_t (*) __P((DB *, const void *, u_int32_t))));
+int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *, void *, int (*)(void *, const void *), u_int32_t));
+int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t, DB *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_hash_ext_h_ */
diff --git a/bdb/dbinc_auto/hmac_ext.h b/bdb/dbinc_auto/hmac_ext.h
new file mode 100644
index 00000000000..d161a7291f4
--- /dev/null
+++ b/bdb/dbinc_auto/hmac_ext.h
@@ -0,0 +1,20 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _hmac_ext_h_
+#define _hmac_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __db_chksum __P((u_int8_t *, size_t, u_int8_t *, u_int8_t *));
+void __db_derive_mac __P((u_int8_t *, size_t, u_int8_t *));
+int __db_check_chksum __P((DB_ENV *, DB_CIPHER *, u_int8_t *, void *, size_t, int));
+void __db_SHA1Transform __P((u_int32_t *, unsigned char *));
+void __db_SHA1Init __P((SHA1_CTX *));
+void __db_SHA1Update __P((SHA1_CTX *, unsigned char *, size_t));
+void __db_SHA1Final __P((unsigned char *, SHA1_CTX *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_hmac_ext_h_ */
diff --git a/bdb/dbinc_auto/int_def.in b/bdb/dbinc_auto/int_def.in
new file mode 100644
index 00000000000..003a861f4f5
--- /dev/null
+++ b/bdb/dbinc_auto/int_def.in
@@ -0,0 +1,1328 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _DB_INT_DEF_IN_
+#define _DB_INT_DEF_IN_
+
+#define __crdel_metasub_log __crdel_metasub_log@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_getpgnos __crdel_metasub_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_print __crdel_metasub_print@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_read __crdel_metasub_read@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_print __crdel_init_print@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_getpgnos __crdel_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __crdel_init_recover __crdel_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __crdel_metasub_recover __crdel_metasub_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_master_open __db_master_open@DB_VERSION_UNIQUE_NAME@
+#define __db_master_update __db_master_update@DB_VERSION_UNIQUE_NAME@
+#define __db_dbenv_setup __db_dbenv_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_close __db_close@DB_VERSION_UNIQUE_NAME@
+#define __db_close_i __db_close_i@DB_VERSION_UNIQUE_NAME@
+#define __db_refresh __db_refresh@DB_VERSION_UNIQUE_NAME@
+#define __db_log_page __db_log_page@DB_VERSION_UNIQUE_NAME@
+#define __db_backup_name __db_backup_name@DB_VERSION_UNIQUE_NAME@
+#define __dblist_get __dblist_get@DB_VERSION_UNIQUE_NAME@
+#if CONFIG_TEST
+#define __db_testcopy __db_testcopy@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_cursor __db_cursor@DB_VERSION_UNIQUE_NAME@
+#define __db_icursor __db_icursor@DB_VERSION_UNIQUE_NAME@
+#define __db_cprint __db_cprint@DB_VERSION_UNIQUE_NAME@
+#define __db_fd __db_fd@DB_VERSION_UNIQUE_NAME@
+#define __db_get __db_get@DB_VERSION_UNIQUE_NAME@
+#define __db_put __db_put@DB_VERSION_UNIQUE_NAME@
+#define __db_delete __db_delete@DB_VERSION_UNIQUE_NAME@
+#define __db_sync __db_sync@DB_VERSION_UNIQUE_NAME@
+#define __db_associate __db_associate@DB_VERSION_UNIQUE_NAME@
+#define __db_pget __db_pget@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_log __db_addrem_log@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_getpgnos __db_addrem_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_print __db_addrem_print@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_read __db_addrem_read@DB_VERSION_UNIQUE_NAME@
+#define __db_big_log __db_big_log@DB_VERSION_UNIQUE_NAME@
+#define __db_big_getpgnos __db_big_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_big_print __db_big_print@DB_VERSION_UNIQUE_NAME@
+#define __db_big_read __db_big_read@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_log __db_ovref_log@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_getpgnos __db_ovref_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_print __db_ovref_print@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_read __db_ovref_read@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_log __db_relink_log@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_getpgnos __db_relink_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_print __db_relink_print@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_read __db_relink_read@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_log __db_debug_log@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_getpgnos __db_debug_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_print __db_debug_print@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_read __db_debug_read@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_log __db_noop_log@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_getpgnos __db_noop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_print __db_noop_print@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_read __db_noop_read@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_log __db_pg_alloc_log@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_getpgnos __db_pg_alloc_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_print __db_pg_alloc_print@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_read __db_pg_alloc_read@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_log __db_pg_free_log@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_getpgnos __db_pg_free_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_print __db_pg_free_print@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_read __db_pg_free_read@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_log __db_cksum_log@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_getpgnos __db_cksum_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_print __db_cksum_print@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_read __db_cksum_read@DB_VERSION_UNIQUE_NAME@
+#define __db_init_print __db_init_print@DB_VERSION_UNIQUE_NAME@
+#define __db_init_getpgnos __db_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __db_init_recover __db_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_c_close __db_c_close@DB_VERSION_UNIQUE_NAME@
+#define __db_c_destroy __db_c_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_c_count __db_c_count@DB_VERSION_UNIQUE_NAME@
+#define __db_c_del __db_c_del@DB_VERSION_UNIQUE_NAME@
+#define __db_c_dup __db_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __db_c_idup __db_c_idup@DB_VERSION_UNIQUE_NAME@
+#define __db_c_newopd __db_c_newopd@DB_VERSION_UNIQUE_NAME@
+#define __db_c_get __db_c_get@DB_VERSION_UNIQUE_NAME@
+#define __db_c_put __db_c_put@DB_VERSION_UNIQUE_NAME@
+#define __db_duperr __db_duperr@DB_VERSION_UNIQUE_NAME@
+#define __db_c_secondary_get __db_c_secondary_get@DB_VERSION_UNIQUE_NAME@
+#define __db_c_pget __db_c_pget@DB_VERSION_UNIQUE_NAME@
+#define __db_c_del_primary __db_c_del_primary@DB_VERSION_UNIQUE_NAME@
+#define __db_s_first __db_s_first@DB_VERSION_UNIQUE_NAME@
+#define __db_s_next __db_s_next@DB_VERSION_UNIQUE_NAME@
+#define __db_s_done __db_s_done@DB_VERSION_UNIQUE_NAME@
+#define __db_partsize __db_partsize@DB_VERSION_UNIQUE_NAME@
+#define __db_pgin __db_pgin@DB_VERSION_UNIQUE_NAME@
+#define __db_pgout __db_pgout@DB_VERSION_UNIQUE_NAME@
+#define __db_metaswap __db_metaswap@DB_VERSION_UNIQUE_NAME@
+#define __db_byteswap __db_byteswap@DB_VERSION_UNIQUE_NAME@
+#define __db_dispatch __db_dispatch@DB_VERSION_UNIQUE_NAME@
+#define __db_add_recovery __db_add_recovery@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_init __db_txnlist_init@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_add __db_txnlist_add@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_remove __db_txnlist_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_ckp __db_txnlist_ckp@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_end __db_txnlist_end@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_find __db_txnlist_find@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_update __db_txnlist_update@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_gen __db_txnlist_gen@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_lsnadd __db_txnlist_lsnadd@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_lsninit __db_txnlist_lsninit@DB_VERSION_UNIQUE_NAME@
+#define __db_add_limbo __db_add_limbo@DB_VERSION_UNIQUE_NAME@
+#define __db_do_the_limbo __db_do_the_limbo@DB_VERSION_UNIQUE_NAME@
+#define __db_txnlist_print __db_txnlist_print@DB_VERSION_UNIQUE_NAME@
+#define __db_ditem __db_ditem@DB_VERSION_UNIQUE_NAME@
+#define __db_pitem __db_pitem@DB_VERSION_UNIQUE_NAME@
+#define __db_relink __db_relink@DB_VERSION_UNIQUE_NAME@
+#define __db_cursorchk __db_cursorchk@DB_VERSION_UNIQUE_NAME@
+#define __db_ccountchk __db_ccountchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cdelchk __db_cdelchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cgetchk __db_cgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cputchk __db_cputchk@DB_VERSION_UNIQUE_NAME@
+#define __db_pgetchk __db_pgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_cpgetchk __db_cpgetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_delchk __db_delchk@DB_VERSION_UNIQUE_NAME@
+#define __db_getchk __db_getchk@DB_VERSION_UNIQUE_NAME@
+#define __db_joinchk __db_joinchk@DB_VERSION_UNIQUE_NAME@
+#define __db_joingetchk __db_joingetchk@DB_VERSION_UNIQUE_NAME@
+#define __db_putchk __db_putchk@DB_VERSION_UNIQUE_NAME@
+#define __db_statchk __db_statchk@DB_VERSION_UNIQUE_NAME@
+#define __db_syncchk __db_syncchk@DB_VERSION_UNIQUE_NAME@
+#define __db_secondary_corrupt __db_secondary_corrupt@DB_VERSION_UNIQUE_NAME@
+#define __db_associatechk __db_associatechk@DB_VERSION_UNIQUE_NAME@
+#define __db_txn_auto __db_txn_auto@DB_VERSION_UNIQUE_NAME@
+#define __db_join __db_join@DB_VERSION_UNIQUE_NAME@
+#define __db_new __db_new@DB_VERSION_UNIQUE_NAME@
+#define __db_free __db_free@DB_VERSION_UNIQUE_NAME@
+#define __db_lprint __db_lprint@DB_VERSION_UNIQUE_NAME@
+#define __db_lget __db_lget@DB_VERSION_UNIQUE_NAME@
+#define __db_lput __db_lput@DB_VERSION_UNIQUE_NAME@
+#define __dbh_am_chk __dbh_am_chk@DB_VERSION_UNIQUE_NAME@
+#define __db_set_lorder __db_set_lorder@DB_VERSION_UNIQUE_NAME@
+#define __db_open __db_open@DB_VERSION_UNIQUE_NAME@
+#define __db_dbopen __db_dbopen@DB_VERSION_UNIQUE_NAME@
+#define __db_new_file __db_new_file@DB_VERSION_UNIQUE_NAME@
+#define __db_init_subdb __db_init_subdb@DB_VERSION_UNIQUE_NAME@
+#define __db_chk_meta __db_chk_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_meta_setup __db_meta_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_goff __db_goff@DB_VERSION_UNIQUE_NAME@
+#define __db_poff __db_poff@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref __db_ovref@DB_VERSION_UNIQUE_NAME@
+#define __db_doff __db_doff@DB_VERSION_UNIQUE_NAME@
+#define __db_moff __db_moff@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_overflow __db_vrfy_overflow@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ovfl_structure __db_vrfy_ovfl_structure@DB_VERSION_UNIQUE_NAME@
+#define __db_safe_goff __db_safe_goff@DB_VERSION_UNIQUE_NAME@
+#define __db_loadme __db_loadme@DB_VERSION_UNIQUE_NAME@
+#define __db_dump __db_dump@DB_VERSION_UNIQUE_NAME@
+#define __db_inmemdbflags __db_inmemdbflags@DB_VERSION_UNIQUE_NAME@
+#define __db_prnpage __db_prnpage@DB_VERSION_UNIQUE_NAME@
+#define __db_prpage __db_prpage@DB_VERSION_UNIQUE_NAME@
+#define __db_pr __db_pr@DB_VERSION_UNIQUE_NAME@
+#define __db_prdbt __db_prdbt@DB_VERSION_UNIQUE_NAME@
+#define __db_prflags __db_prflags@DB_VERSION_UNIQUE_NAME@
+#define __db_dbtype_to_string __db_dbtype_to_string@DB_VERSION_UNIQUE_NAME@
+#define __db_prheader __db_prheader@DB_VERSION_UNIQUE_NAME@
+#define __db_prfooter __db_prfooter@DB_VERSION_UNIQUE_NAME@
+#define __db_addrem_recover __db_addrem_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_big_recover __db_big_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_ovref_recover __db_ovref_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_relink_recover __db_relink_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_debug_recover __db_debug_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_noop_recover __db_noop_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_alloc_recover __db_pg_alloc_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_pg_free_recover __db_pg_free_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_cksum_recover __db_cksum_recover@DB_VERSION_UNIQUE_NAME@
+#define __db_traverse_big __db_traverse_big@DB_VERSION_UNIQUE_NAME@
+#define __db_reclaim_callback __db_reclaim_callback@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate_callback __db_truncate_callback@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_dbremove __dbenv_dbremove@DB_VERSION_UNIQUE_NAME@
+#define __db_remove __db_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_remove_i __db_remove_i@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_dbrename __dbenv_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __db_rename __db_rename@DB_VERSION_UNIQUE_NAME@
+#define __db_rename_i __db_rename_i@DB_VERSION_UNIQUE_NAME@
+#define __db_ret __db_ret@DB_VERSION_UNIQUE_NAME@
+#define __db_retcopy __db_retcopy@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate __db_truncate@DB_VERSION_UNIQUE_NAME@
+#define __db_upgrade __db_upgrade@DB_VERSION_UNIQUE_NAME@
+#define __db_lastpgno __db_lastpgno@DB_VERSION_UNIQUE_NAME@
+#define __db_31_offdup __db_31_offdup@DB_VERSION_UNIQUE_NAME@
+#define __db_verify __db_verify@DB_VERSION_UNIQUE_NAME@
+#define __db_verify_callback __db_verify_callback@DB_VERSION_UNIQUE_NAME@
+#define __db_verify_internal __db_verify_internal@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_datapage __db_vrfy_datapage@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_meta __db_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_struct_feedback __db_vrfy_struct_feedback@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_inpitem __db_vrfy_inpitem@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_duptype __db_vrfy_duptype@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_duptree __db_salvage_duptree@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_dbinfo_create __db_vrfy_dbinfo_create@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_dbinfo_destroy __db_vrfy_dbinfo_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_getpageinfo __db_vrfy_getpageinfo@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_putpageinfo __db_vrfy_putpageinfo@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset __db_vrfy_pgset@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_get __db_vrfy_pgset_get@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_inc __db_vrfy_pgset_inc@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_dec __db_vrfy_pgset_dec@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_pgset_next __db_vrfy_pgset_next@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_childcursor __db_vrfy_childcursor@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_childput __db_vrfy_childput@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccset __db_vrfy_ccset@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccnext __db_vrfy_ccnext@DB_VERSION_UNIQUE_NAME@
+#define __db_vrfy_ccclose __db_vrfy_ccclose@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_init __db_salvage_init@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_destroy __db_salvage_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_getnext __db_salvage_getnext@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_isdone __db_salvage_isdone@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_markdone __db_salvage_markdone@DB_VERSION_UNIQUE_NAME@
+#define __db_salvage_markneeded __db_salvage_markneeded@DB_VERSION_UNIQUE_NAME@
+#define __bam_cmp __bam_cmp@DB_VERSION_UNIQUE_NAME@
+#define __bam_defcmp __bam_defcmp@DB_VERSION_UNIQUE_NAME@
+#define __bam_defpfx __bam_defpfx@DB_VERSION_UNIQUE_NAME@
+#define __bam_pgin __bam_pgin@DB_VERSION_UNIQUE_NAME@
+#define __bam_pgout __bam_pgout@DB_VERSION_UNIQUE_NAME@
+#define __bam_mswap __bam_mswap@DB_VERSION_UNIQUE_NAME@
+#define __bam_cprint __bam_cprint@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_delete __bam_ca_delete@DB_VERSION_UNIQUE_NAME@
+#define __ram_ca_delete __ram_ca_delete@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_di __bam_ca_di@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_dup __bam_ca_dup@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_undodup __bam_ca_undodup@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_rsplit __bam_ca_rsplit@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_split __bam_ca_split@DB_VERSION_UNIQUE_NAME@
+#define __bam_ca_undosplit __bam_ca_undosplit@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_init __bam_c_init@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_refresh __bam_c_refresh@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_count __bam_c_count@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_dup __bam_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __bam_bulk_overflow __bam_bulk_overflow@DB_VERSION_UNIQUE_NAME@
+#define __bam_bulk_duplicates __bam_bulk_duplicates@DB_VERSION_UNIQUE_NAME@
+#define __bam_c_rget __bam_c_rget@DB_VERSION_UNIQUE_NAME@
+#define __bam_ditem __bam_ditem@DB_VERSION_UNIQUE_NAME@
+#define __bam_adjindx __bam_adjindx@DB_VERSION_UNIQUE_NAME@
+#define __bam_dpages __bam_dpages@DB_VERSION_UNIQUE_NAME@
+#define __bam_db_create __bam_db_create@DB_VERSION_UNIQUE_NAME@
+#define __bam_db_close __bam_db_close@DB_VERSION_UNIQUE_NAME@
+#define __bam_set_flags __bam_set_flags@DB_VERSION_UNIQUE_NAME@
+#define __ram_set_flags __ram_set_flags@DB_VERSION_UNIQUE_NAME@
+#define __bam_open __bam_open@DB_VERSION_UNIQUE_NAME@
+#define __bam_metachk __bam_metachk@DB_VERSION_UNIQUE_NAME@
+#define __bam_read_root __bam_read_root@DB_VERSION_UNIQUE_NAME@
+#define __bam_new_file __bam_new_file@DB_VERSION_UNIQUE_NAME@
+#define __bam_new_subdb __bam_new_subdb@DB_VERSION_UNIQUE_NAME@
+#define __bam_iitem __bam_iitem@DB_VERSION_UNIQUE_NAME@
+#define __bam_ritem __bam_ritem@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_recover __bam_split_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_recover __bam_rsplit_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_recover __bam_adj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_recover __bam_cadjust_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_recover __bam_cdel_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_recover __bam_repl_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_recover __bam_root_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_recover __bam_curadj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_recover __bam_rcuradj_recover@DB_VERSION_UNIQUE_NAME@
+#define __bam_reclaim __bam_reclaim@DB_VERSION_UNIQUE_NAME@
+#define __bam_truncate __bam_truncate@DB_VERSION_UNIQUE_NAME@
+#define __ram_open __ram_open@DB_VERSION_UNIQUE_NAME@
+#define __ram_append __ram_append@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_del __ram_c_del@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_get __ram_c_get@DB_VERSION_UNIQUE_NAME@
+#define __ram_c_put __ram_c_put@DB_VERSION_UNIQUE_NAME@
+#define __ram_ca __ram_ca@DB_VERSION_UNIQUE_NAME@
+#define __ram_getno __ram_getno@DB_VERSION_UNIQUE_NAME@
+#define __ram_writeback __ram_writeback@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsearch __bam_rsearch@DB_VERSION_UNIQUE_NAME@
+#define __bam_adjust __bam_adjust@DB_VERSION_UNIQUE_NAME@
+#define __bam_nrecs __bam_nrecs@DB_VERSION_UNIQUE_NAME@
+#define __bam_total __bam_total@DB_VERSION_UNIQUE_NAME@
+#define __bam_search __bam_search@DB_VERSION_UNIQUE_NAME@
+#define __bam_stkrel __bam_stkrel@DB_VERSION_UNIQUE_NAME@
+#define __bam_stkgrow __bam_stkgrow@DB_VERSION_UNIQUE_NAME@
+#define __bam_split __bam_split@DB_VERSION_UNIQUE_NAME@
+#define __bam_copy __bam_copy@DB_VERSION_UNIQUE_NAME@
+#define __bam_stat __bam_stat@DB_VERSION_UNIQUE_NAME@
+#define __bam_traverse __bam_traverse@DB_VERSION_UNIQUE_NAME@
+#define __bam_stat_callback __bam_stat_callback@DB_VERSION_UNIQUE_NAME@
+#define __bam_key_range __bam_key_range@DB_VERSION_UNIQUE_NAME@
+#define __bam_30_btreemeta __bam_30_btreemeta@DB_VERSION_UNIQUE_NAME@
+#define __bam_31_btreemeta __bam_31_btreemeta@DB_VERSION_UNIQUE_NAME@
+#define __bam_31_lbtree __bam_31_lbtree@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_meta __bam_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __ram_vrfy_leaf __ram_vrfy_leaf@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy __bam_vrfy@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_itemorder __bam_vrfy_itemorder@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_structure __bam_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __bam_vrfy_subtree __bam_vrfy_subtree@DB_VERSION_UNIQUE_NAME@
+#define __bam_salvage __bam_salvage@DB_VERSION_UNIQUE_NAME@
+#define __bam_salvage_walkdupint __bam_salvage_walkdupint@DB_VERSION_UNIQUE_NAME@
+#define __bam_meta2pgset __bam_meta2pgset@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_log __bam_split_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_getpgnos __bam_split_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_print __bam_split_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_split_read __bam_split_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_log __bam_rsplit_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_getpgnos __bam_rsplit_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_print __bam_rsplit_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_rsplit_read __bam_rsplit_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_log __bam_adj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_getpgnos __bam_adj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_print __bam_adj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_adj_read __bam_adj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_log __bam_cadjust_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_getpgnos __bam_cadjust_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_print __bam_cadjust_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_cadjust_read __bam_cadjust_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_log __bam_cdel_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_getpgnos __bam_cdel_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_print __bam_cdel_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_cdel_read __bam_cdel_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_log __bam_repl_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_getpgnos __bam_repl_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_print __bam_repl_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_repl_read __bam_repl_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_log __bam_root_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_getpgnos __bam_root_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_print __bam_root_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_root_read __bam_root_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_log __bam_curadj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_getpgnos __bam_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_print __bam_curadj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_curadj_read __bam_curadj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_log __bam_rcuradj_log@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_getpgnos __bam_rcuradj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_print __bam_rcuradj_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_rcuradj_read __bam_rcuradj_read@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_print __bam_init_print@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_getpgnos __bam_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __bam_init_recover __bam_init_recover@DB_VERSION_UNIQUE_NAME@
+#ifndef HAVE_GETCWD
+#define getcwd getcwd@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_GETOPT
+#define getopt getopt@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp memcmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy memcpy@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove memmove@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_RAISE
+#define raise raise@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf snprintf@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp strcasecmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRCASECMP
+#define strncasecmp strncasecmp@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRDUP
+#define strdup strdup@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_STRERROR
+#define strerror strerror@DB_VERSION_UNIQUE_NAME@
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf vsnprintf@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_isbigendian __db_isbigendian@DB_VERSION_UNIQUE_NAME@
+#define __db_byteorder __db_byteorder@DB_VERSION_UNIQUE_NAME@
+#define __db_fchk __db_fchk@DB_VERSION_UNIQUE_NAME@
+#define __db_fcchk __db_fcchk@DB_VERSION_UNIQUE_NAME@
+#define __db_ferr __db_ferr@DB_VERSION_UNIQUE_NAME@
+#define __db_pgerr __db_pgerr@DB_VERSION_UNIQUE_NAME@
+#define __db_pgfmt __db_pgfmt@DB_VERSION_UNIQUE_NAME@
+#define __db_eopnotsup __db_eopnotsup@DB_VERSION_UNIQUE_NAME@
+#ifdef DIAGNOSTIC
+#define __db_assert __db_assert@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __db_panic_msg __db_panic_msg@DB_VERSION_UNIQUE_NAME@
+#define __db_panic __db_panic@DB_VERSION_UNIQUE_NAME@
+#define __db_err __db_err@DB_VERSION_UNIQUE_NAME@
+#define __db_errcall __db_errcall@DB_VERSION_UNIQUE_NAME@
+#define __db_errfile __db_errfile@DB_VERSION_UNIQUE_NAME@
+#define __db_logmsg __db_logmsg@DB_VERSION_UNIQUE_NAME@
+#define __db_unknown_flag __db_unknown_flag@DB_VERSION_UNIQUE_NAME@
+#define __db_unknown_type __db_unknown_type@DB_VERSION_UNIQUE_NAME@
+#define __db_check_txn __db_check_txn@DB_VERSION_UNIQUE_NAME@
+#define __db_not_txn_env __db_not_txn_env@DB_VERSION_UNIQUE_NAME@
+#define __db_getlong __db_getlong@DB_VERSION_UNIQUE_NAME@
+#define __db_getulong __db_getulong@DB_VERSION_UNIQUE_NAME@
+#define __db_idspace __db_idspace@DB_VERSION_UNIQUE_NAME@
+#define __db_log2 __db_log2@DB_VERSION_UNIQUE_NAME@
+#define __db_util_arg __db_util_arg@DB_VERSION_UNIQUE_NAME@
+#define __db_util_cache __db_util_cache@DB_VERSION_UNIQUE_NAME@
+#define __db_util_logset __db_util_logset@DB_VERSION_UNIQUE_NAME@
+#define __db_util_siginit __db_util_siginit@DB_VERSION_UNIQUE_NAME@
+#define __db_util_interrupted __db_util_interrupted@DB_VERSION_UNIQUE_NAME@
+#define __db_util_sigresend __db_util_sigresend@DB_VERSION_UNIQUE_NAME@
+#define __aes_setup __aes_setup@DB_VERSION_UNIQUE_NAME@
+#define __aes_adj_size __aes_adj_size@DB_VERSION_UNIQUE_NAME@
+#define __aes_close __aes_close@DB_VERSION_UNIQUE_NAME@
+#define __aes_decrypt __aes_decrypt@DB_VERSION_UNIQUE_NAME@
+#define __aes_encrypt __aes_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __aes_init __aes_init@DB_VERSION_UNIQUE_NAME@
+#define __crypto_region_init __crypto_region_init@DB_VERSION_UNIQUE_NAME@
+#define __crypto_dbenv_close __crypto_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __crypto_algsetup __crypto_algsetup@DB_VERSION_UNIQUE_NAME@
+#define __crypto_decrypt_meta __crypto_decrypt_meta@DB_VERSION_UNIQUE_NAME@
+#define __db_generate_iv __db_generate_iv@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelKeySetupEnc __db_rijndaelKeySetupEnc@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelKeySetupDec __db_rijndaelKeySetupDec@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelEncrypt __db_rijndaelEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelDecrypt __db_rijndaelDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelEncryptRound __db_rijndaelEncryptRound@DB_VERSION_UNIQUE_NAME@
+#define __db_rijndaelDecryptRound __db_rijndaelDecryptRound@DB_VERSION_UNIQUE_NAME@
+#define __db_makeKey __db_makeKey@DB_VERSION_UNIQUE_NAME@
+#define __db_cipherInit __db_cipherInit@DB_VERSION_UNIQUE_NAME@
+#define __db_blockEncrypt __db_blockEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_padEncrypt __db_padEncrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_blockDecrypt __db_blockDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_padDecrypt __db_padDecrypt@DB_VERSION_UNIQUE_NAME@
+#define __db_cipherUpdateRounds __db_cipherUpdateRounds@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_setup __dbreg_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_teardown __dbreg_teardown@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_new_id __dbreg_new_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_assign_id __dbreg_assign_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_revoke_id __dbreg_revoke_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_close_id __dbreg_close_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_log __dbreg_register_log@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_getpgnos __dbreg_register_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_print __dbreg_register_print@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_read __dbreg_register_read@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_print __dbreg_init_print@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_getpgnos __dbreg_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_init_recover __dbreg_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_register_recover __dbreg_register_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_add_dbentry __dbreg_add_dbentry@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_rem_dbentry __dbreg_rem_dbentry@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_open_files __dbreg_open_files@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_close_files __dbreg_close_files@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_nofiles __dbreg_nofiles@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_db __dbreg_id_to_db@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_db_int __dbreg_id_to_db_int@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_id_to_fname __dbreg_id_to_fname@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_fid_to_fname __dbreg_fid_to_fname@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_get_name __dbreg_get_name@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_do_open __dbreg_do_open@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_lazy_id __dbreg_lazy_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_push_id __dbreg_push_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_pop_id __dbreg_pop_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_pluck_id __dbreg_pluck_id@DB_VERSION_UNIQUE_NAME@
+#define __dbreg_print_dblist __dbreg_print_dblist@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_init __db_shalloc_init@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_size __db_shalloc_size@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc __db_shalloc@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_free __db_shalloc_free@DB_VERSION_UNIQUE_NAME@
+#define __db_shsizeof __db_shsizeof@DB_VERSION_UNIQUE_NAME@
+#define __db_shalloc_dump __db_shalloc_dump@DB_VERSION_UNIQUE_NAME@
+#define __db_tablesize __db_tablesize@DB_VERSION_UNIQUE_NAME@
+#define __db_hashinit __db_hashinit@DB_VERSION_UNIQUE_NAME@
+#define __db_fileinit __db_fileinit@DB_VERSION_UNIQUE_NAME@
+#define __db_overwrite __db_overwrite@DB_VERSION_UNIQUE_NAME@
+#define __db_mi_env __db_mi_env@DB_VERSION_UNIQUE_NAME@
+#define __db_mi_open __db_mi_open@DB_VERSION_UNIQUE_NAME@
+#define __db_env_config __db_env_config@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_open __dbenv_open@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_remove __dbenv_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_close __dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __db_appname __db_appname@DB_VERSION_UNIQUE_NAME@
+#define __db_home __db_home@DB_VERSION_UNIQUE_NAME@
+#define __db_apprec __db_apprec@DB_VERSION_UNIQUE_NAME@
+#define __env_openfiles __env_openfiles@DB_VERSION_UNIQUE_NAME@
+#define __db_e_attach __db_e_attach@DB_VERSION_UNIQUE_NAME@
+#define __db_e_detach __db_e_detach@DB_VERSION_UNIQUE_NAME@
+#define __db_e_remove __db_e_remove@DB_VERSION_UNIQUE_NAME@
+#define __db_e_stat __db_e_stat@DB_VERSION_UNIQUE_NAME@
+#define __db_r_attach __db_r_attach@DB_VERSION_UNIQUE_NAME@
+#define __db_r_detach __db_r_detach@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_log __fop_create_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_getpgnos __fop_create_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_print __fop_create_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_read __fop_create_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_log __fop_remove_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_getpgnos __fop_remove_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_print __fop_remove_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_read __fop_remove_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_log __fop_write_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_getpgnos __fop_write_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_print __fop_write_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_read __fop_write_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_log __fop_rename_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_getpgnos __fop_rename_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_print __fop_rename_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_read __fop_rename_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_log __fop_file_remove_log@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_getpgnos __fop_file_remove_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_print __fop_file_remove_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_read __fop_file_remove_read@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_print __fop_init_print@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_getpgnos __fop_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __fop_init_recover __fop_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_create __fop_create@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove __fop_remove@DB_VERSION_UNIQUE_NAME@
+#define __fop_write __fop_write@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename __fop_rename@DB_VERSION_UNIQUE_NAME@
+#define __fop_create_recover __fop_create_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_recover __fop_remove_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_write_recover __fop_write_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_rename_recover __fop_rename_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_remove_recover __fop_file_remove_recover@DB_VERSION_UNIQUE_NAME@
+#define __fop_lock_handle __fop_lock_handle@DB_VERSION_UNIQUE_NAME@
+#define __fop_file_setup __fop_file_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_subdb_setup __fop_subdb_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_remove_setup __fop_remove_setup@DB_VERSION_UNIQUE_NAME@
+#define __fop_read_meta __fop_read_meta@DB_VERSION_UNIQUE_NAME@
+#define __fop_dummy __fop_dummy@DB_VERSION_UNIQUE_NAME@
+#define __fop_dbrename __fop_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __ham_quick_delete __ham_quick_delete@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_init __ham_c_init@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_count __ham_c_count@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_dup __ham_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_call_hash __ham_call_hash@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_dbt __ham_init_dbt@DB_VERSION_UNIQUE_NAME@
+#define __ham_c_update __ham_c_update@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_clist __ham_get_clist@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_log __ham_insdel_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_getpgnos __ham_insdel_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_print __ham_insdel_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_read __ham_insdel_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_log __ham_newpage_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_getpgnos __ham_newpage_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_print __ham_newpage_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_read __ham_newpage_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_log __ham_splitdata_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_getpgnos __ham_splitdata_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_print __ham_splitdata_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_read __ham_splitdata_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_log __ham_replace_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_getpgnos __ham_replace_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_print __ham_replace_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_read __ham_replace_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_log __ham_copypage_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_getpgnos __ham_copypage_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_print __ham_copypage_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_read __ham_copypage_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_log __ham_metagroup_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_getpgnos __ham_metagroup_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_print __ham_metagroup_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_read __ham_metagroup_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_log __ham_groupalloc_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_getpgnos __ham_groupalloc_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_print __ham_groupalloc_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_read __ham_groupalloc_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_log __ham_curadj_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_getpgnos __ham_curadj_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_print __ham_curadj_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_read __ham_curadj_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_log __ham_chgpg_log@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_getpgnos __ham_chgpg_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_print __ham_chgpg_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_read __ham_chgpg_read@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_print __ham_init_print@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_getpgnos __ham_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __ham_init_recover __ham_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_pgin __ham_pgin@DB_VERSION_UNIQUE_NAME@
+#define __ham_pgout __ham_pgout@DB_VERSION_UNIQUE_NAME@
+#define __ham_mswap __ham_mswap@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_dup __ham_add_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_dup_convert __ham_dup_convert@DB_VERSION_UNIQUE_NAME@
+#define __ham_make_dup __ham_make_dup@DB_VERSION_UNIQUE_NAME@
+#define __ham_dsearch __ham_dsearch@DB_VERSION_UNIQUE_NAME@
+#define __ham_cprint __ham_cprint@DB_VERSION_UNIQUE_NAME@
+#define __ham_func2 __ham_func2@DB_VERSION_UNIQUE_NAME@
+#define __ham_func3 __ham_func3@DB_VERSION_UNIQUE_NAME@
+#define __ham_func4 __ham_func4@DB_VERSION_UNIQUE_NAME@
+#define __ham_func5 __ham_func5@DB_VERSION_UNIQUE_NAME@
+#define __ham_test __ham_test@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_meta __ham_get_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_release_meta __ham_release_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_dirty_meta __ham_dirty_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_db_create __ham_db_create@DB_VERSION_UNIQUE_NAME@
+#define __ham_db_close __ham_db_close@DB_VERSION_UNIQUE_NAME@
+#define __ham_open __ham_open@DB_VERSION_UNIQUE_NAME@
+#define __ham_metachk __ham_metachk@DB_VERSION_UNIQUE_NAME@
+#define __ham_new_file __ham_new_file@DB_VERSION_UNIQUE_NAME@
+#define __ham_new_subdb __ham_new_subdb@DB_VERSION_UNIQUE_NAME@
+#define __ham_item __ham_item@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_reset __ham_item_reset@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_init __ham_item_init@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_last __ham_item_last@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_first __ham_item_first@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_prev __ham_item_prev@DB_VERSION_UNIQUE_NAME@
+#define __ham_item_next __ham_item_next@DB_VERSION_UNIQUE_NAME@
+#define __ham_putitem __ham_putitem@DB_VERSION_UNIQUE_NAME@
+#define __ham_reputpair __ham_reputpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_del_pair __ham_del_pair@DB_VERSION_UNIQUE_NAME@
+#define __ham_replpair __ham_replpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_onpage_replace __ham_onpage_replace@DB_VERSION_UNIQUE_NAME@
+#define __ham_split_page __ham_split_page@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_el __ham_add_el@DB_VERSION_UNIQUE_NAME@
+#define __ham_copy_item __ham_copy_item@DB_VERSION_UNIQUE_NAME@
+#define __ham_add_ovflpage __ham_add_ovflpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_get_cpage __ham_get_cpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_next_cpage __ham_next_cpage@DB_VERSION_UNIQUE_NAME@
+#define __ham_lock_bucket __ham_lock_bucket@DB_VERSION_UNIQUE_NAME@
+#define __ham_dpair __ham_dpair@DB_VERSION_UNIQUE_NAME@
+#define __ham_insdel_recover __ham_insdel_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_newpage_recover __ham_newpage_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_replace_recover __ham_replace_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_splitdata_recover __ham_splitdata_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_copypage_recover __ham_copypage_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_metagroup_recover __ham_metagroup_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_groupalloc_recover __ham_groupalloc_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_curadj_recover __ham_curadj_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_chgpg_recover __ham_chgpg_recover@DB_VERSION_UNIQUE_NAME@
+#define __ham_reclaim __ham_reclaim@DB_VERSION_UNIQUE_NAME@
+#define __ham_truncate __ham_truncate@DB_VERSION_UNIQUE_NAME@
+#define __ham_stat __ham_stat@DB_VERSION_UNIQUE_NAME@
+#define __ham_traverse __ham_traverse@DB_VERSION_UNIQUE_NAME@
+#define __ham_30_hashmeta __ham_30_hashmeta@DB_VERSION_UNIQUE_NAME@
+#define __ham_30_sizefix __ham_30_sizefix@DB_VERSION_UNIQUE_NAME@
+#define __ham_31_hashmeta __ham_31_hashmeta@DB_VERSION_UNIQUE_NAME@
+#define __ham_31_hash __ham_31_hash@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_meta __ham_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy __ham_vrfy@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_structure __ham_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __ham_vrfy_hashing __ham_vrfy_hashing@DB_VERSION_UNIQUE_NAME@
+#define __ham_salvage __ham_salvage@DB_VERSION_UNIQUE_NAME@
+#define __ham_meta2pgset __ham_meta2pgset@DB_VERSION_UNIQUE_NAME@
+#define __db_chksum __db_chksum@DB_VERSION_UNIQUE_NAME@
+#define __db_derive_mac __db_derive_mac@DB_VERSION_UNIQUE_NAME@
+#define __db_check_chksum __db_check_chksum@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Transform __db_SHA1Transform@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Init __db_SHA1Init@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Update __db_SHA1Update@DB_VERSION_UNIQUE_NAME@
+#define __db_SHA1Final __db_SHA1Final@DB_VERSION_UNIQUE_NAME@
+#define __lock_id __lock_id@DB_VERSION_UNIQUE_NAME@
+#define __lock_id_free __lock_id_free@DB_VERSION_UNIQUE_NAME@
+#define __lock_vec __lock_vec@DB_VERSION_UNIQUE_NAME@
+#define __lock_get __lock_get@DB_VERSION_UNIQUE_NAME@
+#define __lock_put __lock_put@DB_VERSION_UNIQUE_NAME@
+#define __lock_downgrade __lock_downgrade@DB_VERSION_UNIQUE_NAME@
+#define __lock_addfamilylocker __lock_addfamilylocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_freefamilylocker __lock_freefamilylocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_set_timeout __lock_set_timeout@DB_VERSION_UNIQUE_NAME@
+#define __lock_inherit_timeout __lock_inherit_timeout@DB_VERSION_UNIQUE_NAME@
+#define __lock_getlocker __lock_getlocker@DB_VERSION_UNIQUE_NAME@
+#define __lock_promote __lock_promote@DB_VERSION_UNIQUE_NAME@
+#define __lock_expired __lock_expired@DB_VERSION_UNIQUE_NAME@
+#define __lock_detect __lock_detect@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_create __lock_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_close __lock_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __lock_open __lock_open@DB_VERSION_UNIQUE_NAME@
+#define __lock_dbenv_refresh __lock_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __lock_region_destroy __lock_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __lock_id_set __lock_id_set@DB_VERSION_UNIQUE_NAME@
+#define __lock_stat __lock_stat@DB_VERSION_UNIQUE_NAME@
+#define __lock_dump_region __lock_dump_region@DB_VERSION_UNIQUE_NAME@
+#define __lock_printlock __lock_printlock@DB_VERSION_UNIQUE_NAME@
+#define __lock_cmp __lock_cmp@DB_VERSION_UNIQUE_NAME@
+#define __lock_locker_cmp __lock_locker_cmp@DB_VERSION_UNIQUE_NAME@
+#define __lock_ohash __lock_ohash@DB_VERSION_UNIQUE_NAME@
+#define __lock_lhash __lock_lhash@DB_VERSION_UNIQUE_NAME@
+#define __lock_locker_hash __lock_locker_hash@DB_VERSION_UNIQUE_NAME@
+#define __log_open __log_open@DB_VERSION_UNIQUE_NAME@
+#define __log_find __log_find@DB_VERSION_UNIQUE_NAME@
+#define __log_valid __log_valid@DB_VERSION_UNIQUE_NAME@
+#define __log_dbenv_refresh __log_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __log_stat __log_stat@DB_VERSION_UNIQUE_NAME@
+#define __log_get_cached_ckp_lsn __log_get_cached_ckp_lsn@DB_VERSION_UNIQUE_NAME@
+#define __log_region_destroy __log_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __log_vtruncate __log_vtruncate@DB_VERSION_UNIQUE_NAME@
+#define __log_is_outdated __log_is_outdated@DB_VERSION_UNIQUE_NAME@
+#define __log_archive __log_archive@DB_VERSION_UNIQUE_NAME@
+#define __log_cursor __log_cursor@DB_VERSION_UNIQUE_NAME@
+#define __log_dbenv_create __log_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __log_put __log_put@DB_VERSION_UNIQUE_NAME@
+#define __log_txn_lsn __log_txn_lsn@DB_VERSION_UNIQUE_NAME@
+#define __log_newfile __log_newfile@DB_VERSION_UNIQUE_NAME@
+#define __log_flush __log_flush@DB_VERSION_UNIQUE_NAME@
+#define __log_file __log_file@DB_VERSION_UNIQUE_NAME@
+#define __log_name __log_name@DB_VERSION_UNIQUE_NAME@
+#define __log_rep_put __log_rep_put@DB_VERSION_UNIQUE_NAME@
+#define __memp_alloc __memp_alloc@DB_VERSION_UNIQUE_NAME@
+#ifdef DIAGNOSTIC
+#define __memp_check_order __memp_check_order@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
+#define __memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
+#define __memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
+#define __memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
+#define __memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
+#define __memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
+#define __memp_fopen_int __memp_fopen_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_fclose_int __memp_fclose_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
+#define __memp_fn __memp_fn@DB_VERSION_UNIQUE_NAME@
+#define __memp_fns __memp_fns@DB_VERSION_UNIQUE_NAME@
+#define __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
+#define __memp_fset __memp_fset@DB_VERSION_UNIQUE_NAME@
+#define __memp_dbenv_create __memp_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __memp_open __memp_open@DB_VERSION_UNIQUE_NAME@
+#define __memp_dbenv_refresh __memp_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __mpool_region_destroy __mpool_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __memp_nameop __memp_nameop@DB_VERSION_UNIQUE_NAME@
+#define __memp_register __memp_register@DB_VERSION_UNIQUE_NAME@
+#define __memp_stat __memp_stat@DB_VERSION_UNIQUE_NAME@
+#define __memp_dump_region __memp_dump_region@DB_VERSION_UNIQUE_NAME@
+#define __memp_stat_hash __memp_stat_hash@DB_VERSION_UNIQUE_NAME@
+#define __memp_sync __memp_sync@DB_VERSION_UNIQUE_NAME@
+#define __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
+#define __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
+#define __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
+#define __memp_trickle __memp_trickle@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_init __db_fcntl_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_lock __db_fcntl_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_unlock __db_fcntl_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_fcntl_mutex_destroy __db_fcntl_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_init __db_pthread_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_lock __db_pthread_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_unlock __db_pthread_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_pthread_mutex_destroy __db_pthread_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_init __db_tas_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_lock __db_tas_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_unlock __db_tas_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_tas_mutex_destroy __db_tas_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_init __db_win32_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_lock __db_win32_mutex_lock@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_unlock __db_win32_mutex_unlock@DB_VERSION_UNIQUE_NAME@
+#define __db_win32_mutex_destroy __db_win32_mutex_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_mutex_setup __db_mutex_setup@DB_VERSION_UNIQUE_NAME@
+#define __db_mutex_free __db_mutex_free@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_locks_clear __db_shreg_locks_clear@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_locks_destroy __db_shreg_locks_destroy@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_mutex_init __db_shreg_mutex_init@DB_VERSION_UNIQUE_NAME@
+#define __db_shreg_maintinit __db_shreg_maintinit@DB_VERSION_UNIQUE_NAME@
+#define __os_abspath __os_abspath@DB_VERSION_UNIQUE_NAME@
+#define __os_umalloc __os_umalloc@DB_VERSION_UNIQUE_NAME@
+#define __os_urealloc __os_urealloc@DB_VERSION_UNIQUE_NAME@
+#define __os_ufree __os_ufree@DB_VERSION_UNIQUE_NAME@
+#define __os_strdup __os_strdup@DB_VERSION_UNIQUE_NAME@
+#define __os_calloc __os_calloc@DB_VERSION_UNIQUE_NAME@
+#define __os_malloc __os_malloc@DB_VERSION_UNIQUE_NAME@
+#define __os_realloc __os_realloc@DB_VERSION_UNIQUE_NAME@
+#define __os_free __os_free@DB_VERSION_UNIQUE_NAME@
+#define __ua_memcpy __ua_memcpy@DB_VERSION_UNIQUE_NAME@
+#define __os_clock __os_clock@DB_VERSION_UNIQUE_NAME@
+#define __os_fs_notzero __os_fs_notzero@DB_VERSION_UNIQUE_NAME@
+#define __os_dirlist __os_dirlist@DB_VERSION_UNIQUE_NAME@
+#define __os_dirfree __os_dirfree@DB_VERSION_UNIQUE_NAME@
+#define __os_get_errno_ret_zero __os_get_errno_ret_zero@DB_VERSION_UNIQUE_NAME@
+#define __os_get_errno __os_get_errno@DB_VERSION_UNIQUE_NAME@
+#define __os_set_errno __os_set_errno@DB_VERSION_UNIQUE_NAME@
+#define __os_fileid __os_fileid@DB_VERSION_UNIQUE_NAME@
+#define __os_fsync __os_fsync@DB_VERSION_UNIQUE_NAME@
+#define __os_openhandle __os_openhandle@DB_VERSION_UNIQUE_NAME@
+#define __os_closehandle __os_closehandle@DB_VERSION_UNIQUE_NAME@
+#define __os_id __os_id@DB_VERSION_UNIQUE_NAME@
+#define __os_r_sysattach __os_r_sysattach@DB_VERSION_UNIQUE_NAME@
+#define __os_r_sysdetach __os_r_sysdetach@DB_VERSION_UNIQUE_NAME@
+#define __os_mapfile __os_mapfile@DB_VERSION_UNIQUE_NAME@
+#define __os_unmapfile __os_unmapfile@DB_VERSION_UNIQUE_NAME@
+#define __db_oflags __db_oflags@DB_VERSION_UNIQUE_NAME@
+#define __db_omode __db_omode@DB_VERSION_UNIQUE_NAME@
+#define __os_open __os_open@DB_VERSION_UNIQUE_NAME@
+#ifdef HAVE_QNX
+#define __os_shmname __os_shmname@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __os_r_attach __os_r_attach@DB_VERSION_UNIQUE_NAME@
+#define __os_r_detach __os_r_detach@DB_VERSION_UNIQUE_NAME@
+#define __os_rename __os_rename@DB_VERSION_UNIQUE_NAME@
+#define __os_isroot __os_isroot@DB_VERSION_UNIQUE_NAME@
+#define __db_rpath __db_rpath@DB_VERSION_UNIQUE_NAME@
+#define __os_io __os_io@DB_VERSION_UNIQUE_NAME@
+#define __os_read __os_read@DB_VERSION_UNIQUE_NAME@
+#define __os_write __os_write@DB_VERSION_UNIQUE_NAME@
+#define __os_seek __os_seek@DB_VERSION_UNIQUE_NAME@
+#define __os_sleep __os_sleep@DB_VERSION_UNIQUE_NAME@
+#define __os_spin __os_spin@DB_VERSION_UNIQUE_NAME@
+#define __os_yield __os_yield@DB_VERSION_UNIQUE_NAME@
+#define __os_exists __os_exists@DB_VERSION_UNIQUE_NAME@
+#define __os_ioinfo __os_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define __os_tmpdir __os_tmpdir@DB_VERSION_UNIQUE_NAME@
+#define __os_region_unlink __os_region_unlink@DB_VERSION_UNIQUE_NAME@
+#define __os_unlink __os_unlink@DB_VERSION_UNIQUE_NAME@
+#if defined(DB_WIN32)
+#define __os_win32_errno __os_win32_errno@DB_VERSION_UNIQUE_NAME@
+#endif
+#define __os_fsync __os_fsync@DB_VERSION_UNIQUE_NAME@
+#define __os_openhandle __os_openhandle@DB_VERSION_UNIQUE_NAME@
+#define __os_closehandle __os_closehandle@DB_VERSION_UNIQUE_NAME@
+#define __os_io __os_io@DB_VERSION_UNIQUE_NAME@
+#define __os_read __os_read@DB_VERSION_UNIQUE_NAME@
+#define __os_write __os_write@DB_VERSION_UNIQUE_NAME@
+#define __os_exists __os_exists@DB_VERSION_UNIQUE_NAME@
+#define __os_ioinfo __os_ioinfo@DB_VERSION_UNIQUE_NAME@
+#define __os_is_winnt __os_is_winnt@DB_VERSION_UNIQUE_NAME@
+#define __qam_position __qam_position@DB_VERSION_UNIQUE_NAME@
+#define __qam_pitem __qam_pitem@DB_VERSION_UNIQUE_NAME@
+#define __qam_append __qam_append@DB_VERSION_UNIQUE_NAME@
+#define __qam_c_dup __qam_c_dup@DB_VERSION_UNIQUE_NAME@
+#define __qam_c_init __qam_c_init@DB_VERSION_UNIQUE_NAME@
+#define __qam_truncate __qam_truncate@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_log __qam_incfirst_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_getpgnos __qam_incfirst_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_print __qam_incfirst_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_read __qam_incfirst_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_log __qam_mvptr_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_getpgnos __qam_mvptr_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_print __qam_mvptr_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_read __qam_mvptr_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_log __qam_del_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_getpgnos __qam_del_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_print __qam_del_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_read __qam_del_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_log __qam_add_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_getpgnos __qam_add_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_print __qam_add_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_read __qam_add_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_log __qam_delext_log@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_getpgnos __qam_delext_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_print __qam_delext_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_read __qam_delext_read@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_print __qam_init_print@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_getpgnos __qam_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __qam_init_recover __qam_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_mswap __qam_mswap@DB_VERSION_UNIQUE_NAME@
+#define __qam_pgin_out __qam_pgin_out@DB_VERSION_UNIQUE_NAME@
+#define __qam_fprobe __qam_fprobe@DB_VERSION_UNIQUE_NAME@
+#define __qam_fclose __qam_fclose@DB_VERSION_UNIQUE_NAME@
+#define __qam_fremove __qam_fremove@DB_VERSION_UNIQUE_NAME@
+#define __qam_sync __qam_sync@DB_VERSION_UNIQUE_NAME@
+#define __qam_gen_filelist __qam_gen_filelist@DB_VERSION_UNIQUE_NAME@
+#define __qam_extent_names __qam_extent_names@DB_VERSION_UNIQUE_NAME@
+#define __qam_exid __qam_exid@DB_VERSION_UNIQUE_NAME@
+#define __qam_db_create __qam_db_create@DB_VERSION_UNIQUE_NAME@
+#define __qam_db_close __qam_db_close@DB_VERSION_UNIQUE_NAME@
+#define __db_prqueue __db_prqueue@DB_VERSION_UNIQUE_NAME@
+#define __qam_remove __qam_remove@DB_VERSION_UNIQUE_NAME@
+#define __qam_rename __qam_rename@DB_VERSION_UNIQUE_NAME@
+#define __qam_open __qam_open@DB_VERSION_UNIQUE_NAME@
+#define __qam_metachk __qam_metachk@DB_VERSION_UNIQUE_NAME@
+#define __qam_new_file __qam_new_file@DB_VERSION_UNIQUE_NAME@
+#define __qam_incfirst_recover __qam_incfirst_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_mvptr_recover __qam_mvptr_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_del_recover __qam_del_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_delext_recover __qam_delext_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_add_recover __qam_add_recover@DB_VERSION_UNIQUE_NAME@
+#define __qam_stat __qam_stat@DB_VERSION_UNIQUE_NAME@
+#define __qam_31_qammeta __qam_31_qammeta@DB_VERSION_UNIQUE_NAME@
+#define __qam_32_qammeta __qam_32_qammeta@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_meta __qam_vrfy_meta@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_data __qam_vrfy_data@DB_VERSION_UNIQUE_NAME@
+#define __qam_vrfy_structure __qam_vrfy_structure@DB_VERSION_UNIQUE_NAME@
+#define __rep_dbenv_create __rep_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __rep_process_message __rep_process_message@DB_VERSION_UNIQUE_NAME@
+#define __rep_process_txn __rep_process_txn@DB_VERSION_UNIQUE_NAME@
+#define __rep_region_init __rep_region_init@DB_VERSION_UNIQUE_NAME@
+#define __rep_region_destroy __rep_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __rep_dbenv_close __rep_dbenv_close@DB_VERSION_UNIQUE_NAME@
+#define __rep_preclose __rep_preclose@DB_VERSION_UNIQUE_NAME@
+#define __rep_check_alloc __rep_check_alloc@DB_VERSION_UNIQUE_NAME@
+#define __rep_send_message __rep_send_message@DB_VERSION_UNIQUE_NAME@
+#define __rep_new_master __rep_new_master@DB_VERSION_UNIQUE_NAME@
+#define __rep_lockpgno_init __rep_lockpgno_init@DB_VERSION_UNIQUE_NAME@
+#define __rep_unlockpages __rep_unlockpages@DB_VERSION_UNIQUE_NAME@
+#define __rep_lockpages __rep_lockpages@DB_VERSION_UNIQUE_NAME@
+#define __rep_is_client __rep_is_client@DB_VERSION_UNIQUE_NAME@
+#define __rep_send_vote __rep_send_vote@DB_VERSION_UNIQUE_NAME@
+#define __rep_grow_sites __rep_grow_sites@DB_VERSION_UNIQUE_NAME@
+#define __rep_print_message __rep_print_message@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_envrpcserver __dbcl_envrpcserver@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open_wrap __dbcl_env_open_wrap@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open_wrap __dbcl_db_open_wrap@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_refresh __dbcl_refresh@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_retcopy __dbcl_retcopy@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_end __dbcl_txn_end@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_setup __dbcl_txn_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_c_refresh __dbcl_c_refresh@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_c_setup __dbcl_c_setup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbclose_common __dbcl_dbclose_common@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_alloc __dbcl_env_alloc@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_app_dispatch __dbcl_set_app_dispatch@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_cachesize __dbcl_env_cachesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_close __dbcl_env_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_create __dbcl_env_create@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_data_dir __dbcl_set_data_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_dbremove __dbcl_env_dbremove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_dbrename __dbcl_env_dbrename@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_encrypt __dbcl_env_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_set_feedback __dbcl_env_set_feedback@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_flags __dbcl_env_flags@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_bsize __dbcl_set_lg_bsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_dir __dbcl_set_lg_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_max __dbcl_set_lg_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lg_regionmax __dbcl_set_lg_regionmax@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_conflict __dbcl_set_lk_conflict@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_detect __dbcl_set_lk_detect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max __dbcl_set_lk_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_locks __dbcl_set_lk_max_locks@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_lockers __dbcl_set_lk_max_lockers@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_lk_max_objects __dbcl_set_lk_max_objects@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_mp_mmapsize __dbcl_set_mp_mmapsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open __dbcl_env_open@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_paniccall __dbcl_env_paniccall@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_remove __dbcl_env_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_shm_key __dbcl_set_shm_key@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tas_spins __dbcl_set_tas_spins@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_timeout __dbcl_set_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tmp_dir __dbcl_set_tmp_dir@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tx_max __dbcl_set_tx_max@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_tx_timestamp __dbcl_set_tx_timestamp@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_set_verbose __dbcl_set_verbose@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_abort __dbcl_txn_abort@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_begin __dbcl_txn_begin@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_checkpoint __dbcl_txn_checkpoint@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_commit __dbcl_txn_commit@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_discard __dbcl_txn_discard@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_prepare __dbcl_txn_prepare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_recover __dbcl_txn_recover@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_stat __dbcl_txn_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_timeout __dbcl_txn_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_elect __dbcl_rep_elect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_flush __dbcl_rep_flush@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_process_message __dbcl_rep_process_message@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_limit __dbcl_rep_set_limit@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_request __dbcl_rep_set_request@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_set_rep_transport __dbcl_rep_set_rep_transport@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_start __dbcl_rep_start@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_rep_stat __dbcl_rep_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_alloc __dbcl_db_alloc@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_associate __dbcl_db_associate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_compare __dbcl_db_bt_compare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_maxkey __dbcl_db_bt_maxkey@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_minkey __dbcl_db_bt_minkey@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_bt_prefix __dbcl_db_bt_prefix@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_set_append_recno __dbcl_db_set_append_recno@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cache_priority __dbcl_db_cache_priority@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cachesize __dbcl_db_cachesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_close __dbcl_db_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_create __dbcl_db_create@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_del __dbcl_db_del@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_dup_compare __dbcl_db_dup_compare@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_encrypt __dbcl_db_encrypt@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_extentsize __dbcl_db_extentsize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_fd __dbcl_db_fd@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_feedback __dbcl_db_feedback@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_flags __dbcl_db_flags@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_get __dbcl_db_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_ffactor __dbcl_db_h_ffactor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_hash __dbcl_db_h_hash@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_h_nelem __dbcl_db_h_nelem@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_key_range __dbcl_db_key_range@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_lorder __dbcl_db_lorder@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open __dbcl_db_open@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pagesize __dbcl_db_pagesize@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_panic __dbcl_db_panic@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pget __dbcl_db_pget@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_put __dbcl_db_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_delim __dbcl_db_re_delim@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_len __dbcl_db_re_len@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_pad __dbcl_db_re_pad@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_re_source __dbcl_db_re_source@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_remove __dbcl_db_remove@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_rename __dbcl_db_rename@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_stat __dbcl_db_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_sync __dbcl_db_sync@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_truncate __dbcl_db_truncate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_upgrade __dbcl_db_upgrade@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_verify __dbcl_db_verify@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cursor __dbcl_db_cursor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_join __dbcl_db_join@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_close __dbcl_dbc_close@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_count __dbcl_dbc_count@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_del __dbcl_dbc_del@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_dup __dbcl_dbc_dup@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_get __dbcl_dbc_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_pget __dbcl_dbc_pget@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_put __dbcl_dbc_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_detect __dbcl_lock_detect@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_get __dbcl_lock_get@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_id __dbcl_lock_id@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_id_free __dbcl_lock_id_free@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_put __dbcl_lock_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_stat __dbcl_lock_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_lock_vec __dbcl_lock_vec@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_archive __dbcl_log_archive@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_cursor __dbcl_log_cursor@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_file __dbcl_log_file@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_flush __dbcl_log_flush@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_put __dbcl_log_put@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_log_stat __dbcl_log_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_fcreate __dbcl_memp_fcreate@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_register __dbcl_memp_register@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_stat __dbcl_memp_stat@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_sync __dbcl_memp_sync@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_memp_trickle __dbcl_memp_trickle@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_close_ret __dbcl_env_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_create_ret __dbcl_env_create_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_open_ret __dbcl_env_open_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_env_remove_ret __dbcl_env_remove_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_abort_ret __dbcl_txn_abort_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_begin_ret __dbcl_txn_begin_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_commit_ret __dbcl_txn_commit_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_discard_ret __dbcl_txn_discard_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_txn_recover_ret __dbcl_txn_recover_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_close_ret __dbcl_db_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_create_ret __dbcl_db_create_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_get_ret __dbcl_db_get_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_key_range_ret __dbcl_db_key_range_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_open_ret __dbcl_db_open_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_pget_ret __dbcl_db_pget_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_put_ret __dbcl_db_put_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_remove_ret __dbcl_db_remove_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_rename_ret __dbcl_db_rename_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_stat_ret __dbcl_db_stat_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_truncate_ret __dbcl_db_truncate_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_cursor_ret __dbcl_db_cursor_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_db_join_ret __dbcl_db_join_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_close_ret __dbcl_dbc_close_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_count_ret __dbcl_dbc_count_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_dup_ret __dbcl_dbc_dup_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_get_ret __dbcl_dbc_get_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_pget_ret __dbcl_dbc_pget_ret@DB_VERSION_UNIQUE_NAME@
+#define __dbcl_dbc_put_ret __dbcl_dbc_put_ret@DB_VERSION_UNIQUE_NAME@
+#define __env_cachesize_proc __env_cachesize_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_close_proc __env_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_create_proc __env_create_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_dbremove_proc __env_dbremove_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_dbrename_proc __env_dbrename_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_encrypt_proc __env_encrypt_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_flags_proc __env_flags_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_open_proc __env_open_proc@DB_VERSION_UNIQUE_NAME@
+#define __env_remove_proc __env_remove_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_abort_proc __txn_abort_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_begin_proc __txn_begin_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_commit_proc __txn_commit_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_discard_proc __txn_discard_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_prepare_proc __txn_prepare_proc@DB_VERSION_UNIQUE_NAME@
+#define __txn_recover_proc __txn_recover_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_bt_maxkey_proc __db_bt_maxkey_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_associate_proc __db_associate_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_bt_minkey_proc __db_bt_minkey_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_close_proc __db_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_create_proc __db_create_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_del_proc __db_del_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_encrypt_proc __db_encrypt_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_extentsize_proc __db_extentsize_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_flags_proc __db_flags_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_get_proc __db_get_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_h_ffactor_proc __db_h_ffactor_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_h_nelem_proc __db_h_nelem_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_key_range_proc __db_key_range_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_lorder_proc __db_lorder_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_open_proc __db_open_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_pagesize_proc __db_pagesize_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_pget_proc __db_pget_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_put_proc __db_put_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_delim_proc __db_re_delim_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_len_proc __db_re_len_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_re_pad_proc __db_re_pad_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_remove_proc __db_remove_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_rename_proc __db_rename_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_stat_proc __db_stat_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_sync_proc __db_sync_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_truncate_proc __db_truncate_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_cursor_proc __db_cursor_proc@DB_VERSION_UNIQUE_NAME@
+#define __db_join_proc __db_join_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_close_proc __dbc_close_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_count_proc __dbc_count_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_del_proc __dbc_del_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_dup_proc __dbc_dup_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_get_proc __dbc_get_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_pget_proc __dbc_pget_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbc_put_proc __dbc_put_proc@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_settimeout __dbsrv_settimeout@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_timeout __dbsrv_timeout@DB_VERSION_UNIQUE_NAME@
+#define __dbclear_ctp __dbclear_ctp@DB_VERSION_UNIQUE_NAME@
+#define __dbdel_ctp __dbdel_ctp@DB_VERSION_UNIQUE_NAME@
+#define new_ct_ent new_ct_ent@DB_VERSION_UNIQUE_NAME@
+#define get_tableent get_tableent@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_sharedb __dbsrv_sharedb@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_shareenv __dbsrv_shareenv@DB_VERSION_UNIQUE_NAME@
+#define __dbsrv_active __dbsrv_active@DB_VERSION_UNIQUE_NAME@
+#define __db_close_int __db_close_int@DB_VERSION_UNIQUE_NAME@
+#define __dbc_close_int __dbc_close_int@DB_VERSION_UNIQUE_NAME@
+#define __dbenv_close_int __dbenv_close_int@DB_VERSION_UNIQUE_NAME@
+#define get_home get_home@DB_VERSION_UNIQUE_NAME@
+#define bdb_HCommand bdb_HCommand@DB_VERSION_UNIQUE_NAME@
+#if DB_DBM_HSEARCH != 0
+#define bdb_NdbmOpen bdb_NdbmOpen@DB_VERSION_UNIQUE_NAME@
+#endif
+#if DB_DBM_HSEARCH != 0
+#define bdb_DbmCommand bdb_DbmCommand@DB_VERSION_UNIQUE_NAME@
+#endif
+#define ndbm_Cmd ndbm_Cmd@DB_VERSION_UNIQUE_NAME@
+#define _DbInfoDelete _DbInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define db_Cmd db_Cmd@DB_VERSION_UNIQUE_NAME@
+#define dbc_Cmd dbc_Cmd@DB_VERSION_UNIQUE_NAME@
+#define env_Cmd env_Cmd@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvRemove tcl_EnvRemove@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvVerbose tcl_EnvVerbose@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvAttr tcl_EnvAttr@DB_VERSION_UNIQUE_NAME@
+#define tcl_EnvTest tcl_EnvTest@DB_VERSION_UNIQUE_NAME@
+#define _NewInfo _NewInfo@DB_VERSION_UNIQUE_NAME@
+#define _NameToPtr _NameToPtr@DB_VERSION_UNIQUE_NAME@
+#define _PtrToInfo _PtrToInfo@DB_VERSION_UNIQUE_NAME@
+#define _NameToInfo _NameToInfo@DB_VERSION_UNIQUE_NAME@
+#define _SetInfoData _SetInfoData@DB_VERSION_UNIQUE_NAME@
+#define _DeleteInfo _DeleteInfo@DB_VERSION_UNIQUE_NAME@
+#define _SetListElem _SetListElem@DB_VERSION_UNIQUE_NAME@
+#define _SetListElemInt _SetListElemInt@DB_VERSION_UNIQUE_NAME@
+#define _SetListRecnoElem _SetListRecnoElem@DB_VERSION_UNIQUE_NAME@
+#define _Set3DBTList _Set3DBTList@DB_VERSION_UNIQUE_NAME@
+#define _SetMultiList _SetMultiList@DB_VERSION_UNIQUE_NAME@
+#define _GetGlobPrefix _GetGlobPrefix@DB_VERSION_UNIQUE_NAME@
+#define _ReturnSetup _ReturnSetup@DB_VERSION_UNIQUE_NAME@
+#define _ErrorSetup _ErrorSetup@DB_VERSION_UNIQUE_NAME@
+#define _ErrorFunc _ErrorFunc@DB_VERSION_UNIQUE_NAME@
+#define _GetLsn _GetLsn@DB_VERSION_UNIQUE_NAME@
+#define _GetUInt32 _GetUInt32@DB_VERSION_UNIQUE_NAME@
+#define _GetFlagsList _GetFlagsList@DB_VERSION_UNIQUE_NAME@
+#define _debug_check _debug_check@DB_VERSION_UNIQUE_NAME@
+#define _CopyObjBytes _CopyObjBytes@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockDetect tcl_LockDetect@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockGet tcl_LockGet@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockStat tcl_LockStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockTimeout tcl_LockTimeout@DB_VERSION_UNIQUE_NAME@
+#define tcl_LockVec tcl_LockVec@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogArchive tcl_LogArchive@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogCompare tcl_LogCompare@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogFile tcl_LogFile@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogFlush tcl_LogFlush@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogGet tcl_LogGet@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogPut tcl_LogPut@DB_VERSION_UNIQUE_NAME@
+#define tcl_LogStat tcl_LogStat@DB_VERSION_UNIQUE_NAME@
+#define logc_Cmd logc_Cmd@DB_VERSION_UNIQUE_NAME@
+#define _MpInfoDelete _MpInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpSync tcl_MpSync@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpTrickle tcl_MpTrickle@DB_VERSION_UNIQUE_NAME@
+#define tcl_Mp tcl_Mp@DB_VERSION_UNIQUE_NAME@
+#define tcl_MpStat tcl_MpStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepElect tcl_RepElect@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepFlush tcl_RepFlush@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepLimit tcl_RepLimit@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepRequest tcl_RepRequest@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepStart tcl_RepStart@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepProcessMessage tcl_RepProcessMessage@DB_VERSION_UNIQUE_NAME@
+#define tcl_RepStat tcl_RepStat@DB_VERSION_UNIQUE_NAME@
+#define _TxnInfoDelete _TxnInfoDelete@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnCheckpoint tcl_TxnCheckpoint@DB_VERSION_UNIQUE_NAME@
+#define tcl_Txn tcl_Txn@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnStat tcl_TxnStat@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnTimeout tcl_TxnTimeout@DB_VERSION_UNIQUE_NAME@
+#define tcl_TxnRecover tcl_TxnRecover@DB_VERSION_UNIQUE_NAME@
+#define bdb_RandCommand bdb_RandCommand@DB_VERSION_UNIQUE_NAME@
+#define tcl_Mutex tcl_Mutex@DB_VERSION_UNIQUE_NAME@
+#define __txn_begin __txn_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_begin __txn_xa_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_compensate_begin __txn_compensate_begin@DB_VERSION_UNIQUE_NAME@
+#define __txn_commit __txn_commit@DB_VERSION_UNIQUE_NAME@
+#define __txn_abort __txn_abort@DB_VERSION_UNIQUE_NAME@
+#define __txn_discard __txn_discard@DB_VERSION_UNIQUE_NAME@
+#define __txn_prepare __txn_prepare@DB_VERSION_UNIQUE_NAME@
+#define __txn_id __txn_id@DB_VERSION_UNIQUE_NAME@
+#define __txn_checkpoint __txn_checkpoint@DB_VERSION_UNIQUE_NAME@
+#define __txn_getckp __txn_getckp@DB_VERSION_UNIQUE_NAME@
+#define __txn_activekids __txn_activekids@DB_VERSION_UNIQUE_NAME@
+#define __txn_force_abort __txn_force_abort@DB_VERSION_UNIQUE_NAME@
+#define __txn_preclose __txn_preclose@DB_VERSION_UNIQUE_NAME@
+#define __txn_reset __txn_reset@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_log __txn_regop_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_getpgnos __txn_regop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_print __txn_regop_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_read __txn_regop_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_log __txn_ckp_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_getpgnos __txn_ckp_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_print __txn_ckp_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_read __txn_ckp_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_log __txn_child_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_getpgnos __txn_child_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_print __txn_child_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_read __txn_child_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_log __txn_xa_regop_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_getpgnos __txn_xa_regop_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_print __txn_xa_regop_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_read __txn_xa_regop_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_log __txn_recycle_log@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_getpgnos __txn_recycle_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_print __txn_recycle_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_read __txn_recycle_read@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_print __txn_init_print@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_getpgnos __txn_init_getpgnos@DB_VERSION_UNIQUE_NAME@
+#define __txn_init_recover __txn_init_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_dbenv_create __txn_dbenv_create@DB_VERSION_UNIQUE_NAME@
+#define __txn_regop_recover __txn_regop_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_xa_regop_recover __txn_xa_regop_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_ckp_recover __txn_ckp_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_child_recover __txn_child_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_restore_txn __txn_restore_txn@DB_VERSION_UNIQUE_NAME@
+#define __txn_recycle_recover __txn_recycle_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_continue __txn_continue@DB_VERSION_UNIQUE_NAME@
+#define __txn_map_gid __txn_map_gid@DB_VERSION_UNIQUE_NAME@
+#define __txn_recover __txn_recover@DB_VERSION_UNIQUE_NAME@
+#define __txn_get_prepared __txn_get_prepared@DB_VERSION_UNIQUE_NAME@
+#define __txn_open __txn_open@DB_VERSION_UNIQUE_NAME@
+#define __txn_dbenv_refresh __txn_dbenv_refresh@DB_VERSION_UNIQUE_NAME@
+#define __txn_region_destroy __txn_region_destroy@DB_VERSION_UNIQUE_NAME@
+#define __txn_id_set __txn_id_set@DB_VERSION_UNIQUE_NAME@
+#define __txn_stat __txn_stat@DB_VERSION_UNIQUE_NAME@
+#define __txn_remevent __txn_remevent@DB_VERSION_UNIQUE_NAME@
+#define __txn_lockevent __txn_lockevent@DB_VERSION_UNIQUE_NAME@
+#define __txn_remlock __txn_remlock@DB_VERSION_UNIQUE_NAME@
+#define __txn_doevents __txn_doevents@DB_VERSION_UNIQUE_NAME@
+#define __db_xa_create __db_xa_create@DB_VERSION_UNIQUE_NAME@
+#define __db_rmid_to_env __db_rmid_to_env@DB_VERSION_UNIQUE_NAME@
+#define __db_xid_to_txn __db_xid_to_txn@DB_VERSION_UNIQUE_NAME@
+#define __db_map_rmid __db_map_rmid@DB_VERSION_UNIQUE_NAME@
+#define __db_unmap_rmid __db_unmap_rmid@DB_VERSION_UNIQUE_NAME@
+#define __db_map_xid __db_map_xid@DB_VERSION_UNIQUE_NAME@
+#define __db_unmap_xid __db_unmap_xid@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_cachesize_msg xdr___env_cachesize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_cachesize_reply xdr___env_cachesize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_close_msg xdr___env_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_close_reply xdr___env_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_create_msg xdr___env_create_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_create_reply xdr___env_create_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbremove_msg xdr___env_dbremove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbremove_reply xdr___env_dbremove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbrename_msg xdr___env_dbrename_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_dbrename_reply xdr___env_dbrename_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_encrypt_msg xdr___env_encrypt_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_encrypt_reply xdr___env_encrypt_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_flags_msg xdr___env_flags_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_flags_reply xdr___env_flags_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_open_msg xdr___env_open_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_open_reply xdr___env_open_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_remove_msg xdr___env_remove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___env_remove_reply xdr___env_remove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_abort_msg xdr___txn_abort_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_abort_reply xdr___txn_abort_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_begin_msg xdr___txn_begin_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_begin_reply xdr___txn_begin_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_commit_msg xdr___txn_commit_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_commit_reply xdr___txn_commit_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_discard_msg xdr___txn_discard_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_discard_reply xdr___txn_discard_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_prepare_msg xdr___txn_prepare_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_prepare_reply xdr___txn_prepare_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_recover_msg xdr___txn_recover_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___txn_recover_reply xdr___txn_recover_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_associate_msg xdr___db_associate_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_associate_reply xdr___db_associate_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_maxkey_msg xdr___db_bt_maxkey_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_maxkey_reply xdr___db_bt_maxkey_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_minkey_msg xdr___db_bt_minkey_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_bt_minkey_reply xdr___db_bt_minkey_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_close_msg xdr___db_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_close_reply xdr___db_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_create_msg xdr___db_create_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_create_reply xdr___db_create_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_del_msg xdr___db_del_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_del_reply xdr___db_del_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_encrypt_msg xdr___db_encrypt_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_encrypt_reply xdr___db_encrypt_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_extentsize_msg xdr___db_extentsize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_extentsize_reply xdr___db_extentsize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_flags_msg xdr___db_flags_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_flags_reply xdr___db_flags_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_get_msg xdr___db_get_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_get_reply xdr___db_get_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_ffactor_msg xdr___db_h_ffactor_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_ffactor_reply xdr___db_h_ffactor_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_nelem_msg xdr___db_h_nelem_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_h_nelem_reply xdr___db_h_nelem_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_key_range_msg xdr___db_key_range_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_key_range_reply xdr___db_key_range_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_lorder_msg xdr___db_lorder_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_lorder_reply xdr___db_lorder_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_open_msg xdr___db_open_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_open_reply xdr___db_open_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pagesize_msg xdr___db_pagesize_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pagesize_reply xdr___db_pagesize_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pget_msg xdr___db_pget_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_pget_reply xdr___db_pget_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_put_msg xdr___db_put_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_put_reply xdr___db_put_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_delim_msg xdr___db_re_delim_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_delim_reply xdr___db_re_delim_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_len_msg xdr___db_re_len_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_len_reply xdr___db_re_len_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_pad_msg xdr___db_re_pad_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_re_pad_reply xdr___db_re_pad_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_remove_msg xdr___db_remove_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_remove_reply xdr___db_remove_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_rename_msg xdr___db_rename_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_rename_reply xdr___db_rename_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_stat_msg xdr___db_stat_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_stat_reply xdr___db_stat_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_sync_msg xdr___db_sync_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_sync_reply xdr___db_sync_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_truncate_msg xdr___db_truncate_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_truncate_reply xdr___db_truncate_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_cursor_msg xdr___db_cursor_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_cursor_reply xdr___db_cursor_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_join_msg xdr___db_join_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___db_join_reply xdr___db_join_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_close_msg xdr___dbc_close_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_close_reply xdr___dbc_close_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_count_msg xdr___dbc_count_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_count_reply xdr___dbc_count_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_del_msg xdr___dbc_del_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_del_reply xdr___dbc_del_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_dup_msg xdr___dbc_dup_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_dup_reply xdr___dbc_dup_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_get_msg xdr___dbc_get_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_get_reply xdr___dbc_get_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_pget_msg xdr___dbc_pget_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_pget_reply xdr___dbc_pget_reply@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_put_msg xdr___dbc_put_msg@DB_VERSION_UNIQUE_NAME@
+#define xdr___dbc_put_reply xdr___dbc_put_reply@DB_VERSION_UNIQUE_NAME@
+#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@
+#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@
+
+#endif /* !_DB_INT_DEF_IN_ */
diff --git a/bdb/dbinc_auto/lock_ext.h b/bdb/dbinc_auto/lock_ext.h
new file mode 100644
index 00000000000..be6b1d06d1e
--- /dev/null
+++ b/bdb/dbinc_auto/lock_ext.h
@@ -0,0 +1,41 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _lock_ext_h_
+#define _lock_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __lock_id __P((DB_ENV *, u_int32_t *));
+int __lock_id_free __P((DB_ENV *, u_int32_t));
+int __lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+int __lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int __lock_put __P((DB_ENV *, DB_LOCK *));
+int __lock_downgrade __P((DB_ENV *, DB_LOCK *, db_lockmode_t, u_int32_t));
+int __lock_addfamilylocker __P((DB_ENV *, u_int32_t, u_int32_t));
+int __lock_freefamilylocker __P((DB_LOCKTAB *, u_int32_t));
+int __lock_set_timeout __P(( DB_ENV *, u_int32_t, db_timeout_t, u_int32_t));
+int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t));
+int __lock_getlocker __P((DB_LOCKTAB *, u_int32_t, u_int32_t, int, DB_LOCKER **));
+int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
+int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *));
+int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+void __lock_dbenv_create __P((DB_ENV *));
+void __lock_dbenv_close __P((DB_ENV *));
+int __lock_open __P((DB_ENV *));
+int __lock_dbenv_refresh __P((DB_ENV *));
+void __lock_region_destroy __P((DB_ENV *, REGINFO *));
+int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+int __lock_dump_region __P((DB_ENV *, char *, FILE *));
+void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+int __lock_cmp __P((const DBT *, DB_LOCKOBJ *));
+int __lock_locker_cmp __P((u_int32_t, DB_LOCKER *));
+u_int32_t __lock_ohash __P((const DBT *));
+u_int32_t __lock_lhash __P((DB_LOCKOBJ *));
+u_int32_t __lock_locker_hash __P((u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_lock_ext_h_ */
diff --git a/bdb/dbinc_auto/log_ext.h b/bdb/dbinc_auto/log_ext.h
new file mode 100644
index 00000000000..6fc69afd2b4
--- /dev/null
+++ b/bdb/dbinc_auto/log_ext.h
@@ -0,0 +1,32 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _log_ext_h_
+#define _log_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __log_open __P((DB_ENV *));
+int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *));
+int __log_valid __P((DB_LOG *, u_int32_t, int, logfile_validity *));
+int __log_dbenv_refresh __P((DB_ENV *));
+int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+void __log_region_destroy __P((DB_ENV *, REGINFO *));
+int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *));
+int __log_is_outdated __P((DB_ENV *dbenv, u_int32_t fnum, int *outdatedp));
+int __log_archive __P((DB_ENV *, char **[], u_int32_t));
+int __log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+void __log_dbenv_create __P((DB_ENV *));
+int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+void __log_txn_lsn __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
+int __log_newfile __P((DB_LOG *, DB_LSN *));
+int __log_flush __P((DB_ENV *, const DB_LSN *));
+int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int __log_name __P((DB_LOG *, u_int32_t, char **, DB_FH *, u_int32_t));
+int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_log_ext_h_ */
diff --git a/bdb/dbinc_auto/mp_ext.h b/bdb/dbinc_auto/mp_ext.h
new file mode 100644
index 00000000000..ceadb3d9adc
--- /dev/null
+++ b/bdb/dbinc_auto/mp_ext.h
@@ -0,0 +1,44 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mp_ext_h_
+#define _mp_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __memp_alloc __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, size_t, roff_t *, void *));
+#ifdef DIAGNOSTIC
+void __memp_check_order __P((DB_MPOOL_HASH *));
+#endif
+int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
+int __memp_pg __P((DB_MPOOLFILE *, BH *, int));
+void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int));
+int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
+int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+int __memp_fopen_int __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, u_int32_t, int, size_t));
+int __memp_fclose_int __P((DB_MPOOLFILE *, u_int32_t));
+int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+char * __memp_fn __P((DB_MPOOLFILE *));
+char * __memp_fns __P((DB_MPOOL *, MPOOLFILE *));
+int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
+int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
+void __memp_dbenv_create __P((DB_ENV *));
+int __memp_open __P((DB_ENV *));
+int __memp_dbenv_refresh __P((DB_ENV *));
+void __mpool_region_destroy __P((DB_ENV *, REGINFO *));
+int __memp_nameop __P((DB_ENV *, u_int8_t *, const char *, const char *, const char *));
+int __memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int __memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+int __memp_dump_region __P((DB_ENV *, char *, FILE *));
+void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *));
+int __memp_sync __P((DB_ENV *, DB_LSN *));
+int __memp_fsync __P((DB_MPOOLFILE *));
+int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+int __memp_sync_int __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *));
+int __memp_trickle __P((DB_ENV *, int, int *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_mp_ext_h_ */
diff --git a/bdb/dbinc_auto/mutex_ext.h b/bdb/dbinc_auto/mutex_ext.h
new file mode 100644
index 00000000000..a40f04d5578
--- /dev/null
+++ b/bdb/dbinc_auto/mutex_ext.h
@@ -0,0 +1,35 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _mutex_ext_h_
+#define _mutex_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
+int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_pthread_mutex_destroy __P((DB_MUTEX *));
+int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_tas_mutex_destroy __P((DB_MUTEX *));
+int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+int __db_win32_mutex_destroy __P((DB_MUTEX *));
+int __db_mutex_setup __P((DB_ENV *, REGINFO *, void *, u_int32_t));
+void __db_mutex_free __P((DB_ENV *, REGINFO *, DB_MUTEX *));
+void __db_shreg_locks_clear __P((DB_MUTEX *, REGINFO *, REGMAINT *));
+void __db_shreg_locks_destroy __P((REGINFO *, REGMAINT *));
+int __db_shreg_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t, u_int32_t, REGINFO *, REGMAINT *));
+void __db_shreg_maintinit __P((REGINFO *, void *addr, size_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_mutex_ext_h_ */
diff --git a/bdb/dbinc_auto/os_ext.h b/bdb/dbinc_auto/os_ext.h
new file mode 100644
index 00000000000..0a2e5ab2054
--- /dev/null
+++ b/bdb/dbinc_auto/os_ext.h
@@ -0,0 +1,74 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _os_ext_h_
+#define _os_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __os_abspath __P((const char *));
+int __os_umalloc __P((DB_ENV *, size_t, void *));
+int __os_urealloc __P((DB_ENV *, size_t, void *));
+int __os_ufree __P((DB_ENV *, void *));
+int __os_strdup __P((DB_ENV *, const char *, void *));
+int __os_calloc __P((DB_ENV *, size_t, size_t, void *));
+int __os_malloc __P((DB_ENV *, size_t, void *));
+int __os_realloc __P((DB_ENV *, size_t, void *));
+void __os_free __P((DB_ENV *, void *));
+void *__ua_memcpy __P((void *, const void *, size_t));
+int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+int __os_fs_notzero __P((void));
+int __os_dirlist __P((DB_ENV *, const char *, char ***, int *));
+void __os_dirfree __P((DB_ENV *, char **, int));
+int __os_get_errno_ret_zero __P((void));
+int __os_get_errno __P((void));
+void __os_set_errno __P((int));
+int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
+int __os_fsync __P((DB_ENV *, DB_FH *));
+int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+int __os_closehandle __P((DB_ENV *, DB_FH *));
+void __os_id __P((u_int32_t *));
+int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+int __os_mapfile __P((DB_ENV *, char *, DB_FH *, size_t, int, void **));
+int __os_unmapfile __P((DB_ENV *, void *, size_t));
+u_int32_t __db_oflags __P((int));
+int __db_omode __P((const char *));
+int __os_open __P((DB_ENV *, const char *, u_int32_t, int, DB_FH *));
+#ifdef HAVE_QNX
+int __os_shmname __P((DB_ENV *, const char *, char **));
+#endif
+int __os_r_attach __P((DB_ENV *, REGINFO *, REGION *));
+int __os_r_detach __P((DB_ENV *, REGINFO *, int));
+int __os_rename __P((DB_ENV *, const char *, const char *, u_int32_t));
+int __os_isroot __P((void));
+char *__db_rpath __P((const char *));
+int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_seek __P((DB_ENV *, DB_FH *, size_t, db_pgno_t, u_int32_t, int, DB_OS_SEEK));
+int __os_sleep __P((DB_ENV *, u_long, u_long));
+int __os_spin __P((DB_ENV *));
+void __os_yield __P((DB_ENV*, u_long));
+int __os_exists __P((const char *, int *));
+int __os_ioinfo __P((DB_ENV *, const char *, DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+int __os_tmpdir __P((DB_ENV *, u_int32_t));
+int __os_region_unlink __P((DB_ENV *, const char *));
+int __os_unlink __P((DB_ENV *, const char *));
+#if defined(DB_WIN32)
+int __os_win32_errno __P((void));
+#endif
+int __os_fsync __P((DB_ENV *, DB_FH *));
+int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+int __os_closehandle __P((DB_ENV *, DB_FH *));
+int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+int __os_exists __P((const char *, int *));
+int __os_ioinfo __P((DB_ENV *, const char *, DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+int __os_is_winnt __P((void));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_os_ext_h_ */
diff --git a/bdb/dbinc_auto/qam_auto.h b/bdb/dbinc_auto/qam_auto.h
new file mode 100644
index 00000000000..655c6d0280f
--- /dev/null
+++ b/bdb/dbinc_auto/qam_auto.h
@@ -0,0 +1,70 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __qam_AUTO_H
+#define __qam_AUTO_H
+#define DB___qam_incfirst 84
+typedef struct ___qam_incfirst_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ db_recno_t recno;
+ db_pgno_t meta_pgno;
+} __qam_incfirst_args;
+
+#define DB___qam_mvptr 85
+typedef struct ___qam_mvptr_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t fileid;
+ db_recno_t old_first;
+ db_recno_t new_first;
+ db_recno_t old_cur;
+ db_recno_t new_cur;
+ DB_LSN metalsn;
+ db_pgno_t meta_pgno;
+} __qam_mvptr_args;
+
+#define DB___qam_del 79
+typedef struct ___qam_del_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+} __qam_del_args;
+
+#define DB___qam_add 80
+typedef struct ___qam_add_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+ u_int32_t vflag;
+ DBT olddata;
+} __qam_add_args;
+
+#define DB___qam_delext 83
+typedef struct ___qam_delext_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ int32_t fileid;
+ DB_LSN lsn;
+ db_pgno_t pgno;
+ u_int32_t indx;
+ db_recno_t recno;
+ DBT data;
+} __qam_delext_args;
+
+#endif
diff --git a/bdb/dbinc_auto/qam_ext.h b/bdb/dbinc_auto/qam_ext.h
new file mode 100644
index 00000000000..16dbea79e4c
--- /dev/null
+++ b/bdb/dbinc_auto/qam_ext.h
@@ -0,0 +1,70 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _qam_ext_h_
+#define _qam_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __qam_position __P((DBC *, db_recno_t *, qam_position_mode, int *));
+int __qam_pitem __P((DBC *, QPAGE *, u_int32_t, db_recno_t, DBT *));
+int __qam_append __P((DBC *, DBT *, DBT *));
+int __qam_c_dup __P((DBC *, DBC *));
+int __qam_c_init __P((DBC *));
+int __qam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+int __qam_incfirst_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, db_recno_t, db_pgno_t));
+int __qam_incfirst_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_incfirst_read __P((DB_ENV *, void *, __qam_incfirst_args **));
+int __qam_mvptr_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, db_recno_t, db_recno_t, db_recno_t, db_recno_t, DB_LSN *, db_pgno_t));
+int __qam_mvptr_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_read __P((DB_ENV *, void *, __qam_mvptr_args **));
+int __qam_del_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t));
+int __qam_del_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_read __P((DB_ENV *, void *, __qam_del_args **));
+int __qam_add_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *, u_int32_t, const DBT *));
+int __qam_add_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_read __P((DB_ENV *, void *, __qam_add_args **));
+int __qam_delext_log __P((DB *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, db_pgno_t, u_int32_t, db_recno_t, const DBT *));
+int __qam_delext_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_read __P((DB_ENV *, void *, __qam_delext_args **));
+int __qam_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __qam_mswap __P((PAGE *));
+int __qam_pgin_out __P((DB_ENV *, db_pgno_t, void *, DBT *));
+int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t));
+int __qam_fclose __P((DB *, db_pgno_t));
+int __qam_fremove __P((DB *, db_pgno_t));
+int __qam_sync __P((DB *, u_int32_t));
+int __qam_gen_filelist __P(( DB *, QUEUE_FILELIST **));
+int __qam_extent_names __P((DB_ENV *, char *, char ***));
+void __qam_exid __P((DB *, u_int8_t *, u_int32_t));
+int __qam_db_create __P((DB *));
+int __qam_db_close __P((DB *));
+int __db_prqueue __P((DB *, FILE *, u_int32_t));
+int __qam_remove __P((DB *, DB_TXN *, const char *, const char *, DB_LSN *));
+int __qam_rename __P((DB *, DB_TXN *, const char *, const char *, const char *));
+int __qam_open __P((DB *, DB_TXN *, const char *, db_pgno_t, int, u_int32_t));
+int __qam_metachk __P((DB *, const char *, QMETA *));
+int __qam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+int __qam_incfirst_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_mvptr_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_del_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_delext_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __qam_stat __P((DB *, void *, u_int32_t));
+int __qam_31_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_32_qammeta __P((DB *, char *, u_int8_t *));
+int __qam_vrfy_meta __P((DB *, VRFY_DBINFO *, QMETA *, db_pgno_t, u_int32_t));
+int __qam_vrfy_data __P((DB *, VRFY_DBINFO *, QPAGE *, db_pgno_t, u_int32_t));
+int __qam_vrfy_structure __P((DB *, VRFY_DBINFO *, u_int32_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_qam_ext_h_ */
diff --git a/bdb/dbinc_auto/rep_ext.h b/bdb/dbinc_auto/rep_ext.h
new file mode 100644
index 00000000000..22e2d254fe8
--- /dev/null
+++ b/bdb/dbinc_auto/rep_ext.h
@@ -0,0 +1,30 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rep_ext_h_
+#define _rep_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __rep_dbenv_create __P((DB_ENV *));
+int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+int __rep_process_txn __P((DB_ENV *, DBT *));
+int __rep_region_init __P((DB_ENV *));
+int __rep_region_destroy __P((DB_ENV *));
+int __rep_dbenv_close __P((DB_ENV *));
+int __rep_preclose __P((DB_ENV *, int));
+int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+int __rep_send_message __P((DB_ENV *, int, u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+int __rep_lockpgno_init __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+int __rep_lockpages __P((DB_ENV *, int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t, DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+int __rep_is_client __P((DB_ENV *));
+int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int));
+int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rep_ext_h_ */
diff --git a/bdb/dbinc_auto/rpc_client_ext.h b/bdb/dbinc_auto/rpc_client_ext.h
new file mode 100644
index 00000000000..9634b34abef
--- /dev/null
+++ b/bdb/dbinc_auto/rpc_client_ext.h
@@ -0,0 +1,167 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_client_ext_h_
+#define _rpc_client_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __dbcl_envrpcserver __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+int __dbcl_env_open_wrap __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbcl_db_open_wrap __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __dbcl_refresh __P((DB_ENV *));
+int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t, void **, u_int32_t *));
+void __dbcl_txn_end __P((DB_TXN *));
+void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
+void __dbcl_c_refresh __P((DBC *));
+int __dbcl_c_setup __P((long, DB *, DBC **));
+int __dbcl_dbclose_common __P((DB *));
+int __dbcl_env_alloc __P((DB_ENV *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *)));
+int __dbcl_set_app_dispatch __P((DB_ENV *, int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+int __dbcl_env_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
+int __dbcl_env_close __P((DB_ENV *, u_int32_t));
+int __dbcl_env_create __P((DB_ENV *, long));
+int __dbcl_set_data_dir __P((DB_ENV *, const char *));
+int __dbcl_env_dbremove __P((DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t));
+int __dbcl_env_dbrename __P((DB_ENV *, DB_TXN *, const char *, const char *, const char *, u_int32_t));
+int __dbcl_env_encrypt __P((DB_ENV *, const char *, u_int32_t));
+int __dbcl_env_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+int __dbcl_env_flags __P((DB_ENV *, u_int32_t, int));
+int __dbcl_set_lg_bsize __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lg_dir __P((DB_ENV *, const char *));
+int __dbcl_set_lg_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lg_regionmax __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_conflict __P((DB_ENV *, u_int8_t *, int));
+int __dbcl_set_lk_detect __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+int __dbcl_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+int __dbcl_set_mp_mmapsize __P((DB_ENV *, size_t));
+int __dbcl_env_open __P((DB_ENV *, const char *, u_int32_t, int));
+int __dbcl_env_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+int __dbcl_env_remove __P((DB_ENV *, const char *, u_int32_t));
+int __dbcl_set_shm_key __P((DB_ENV *, long));
+int __dbcl_set_tas_spins __P((DB_ENV *, u_int32_t));
+int __dbcl_set_timeout __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_set_tmp_dir __P((DB_ENV *, const char *));
+int __dbcl_set_tx_max __P((DB_ENV *, u_int32_t));
+int __dbcl_set_tx_timestamp __P((DB_ENV *, time_t *));
+int __dbcl_set_verbose __P((DB_ENV *, u_int32_t, int));
+int __dbcl_txn_abort __P((DB_TXN *));
+int __dbcl_txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int __dbcl_txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+int __dbcl_txn_commit __P((DB_TXN *, u_int32_t));
+int __dbcl_txn_discard __P((DB_TXN *, u_int32_t));
+int __dbcl_txn_prepare __P((DB_TXN *, u_int8_t *));
+int __dbcl_txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+int __dbcl_txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+int __dbcl_txn_timeout __P((DB_TXN *, u_int32_t, u_int32_t));
+int __dbcl_rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+int __dbcl_rep_flush __P((DB_ENV *));
+int __dbcl_rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+int __dbcl_rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+int __dbcl_rep_set_rep_transport __P((DB_ENV *, int, int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+int __dbcl_rep_start __P((DB_ENV *, DBT *, u_int32_t));
+int __dbcl_rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+int __dbcl_db_alloc __P((DB *, void *(*)(size_t), void *(*)(void *, size_t), void (*)(void *)));
+int __dbcl_db_associate __P((DB *, DB_TXN *, DB *, int (*)(DB *, const DBT *, const DBT *, DBT *), u_int32_t));
+int __dbcl_db_bt_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_bt_maxkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_minkey __P((DB *, u_int32_t));
+int __dbcl_db_bt_prefix __P((DB *, size_t(*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_set_append_recno __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
+int __dbcl_db_cache_priority __P((DB *, DB_CACHE_PRIORITY));
+int __dbcl_db_cachesize __P((DB *, u_int32_t, u_int32_t, int));
+int __dbcl_db_close __P((DB *, u_int32_t));
+int __dbcl_db_create __P((DB *, DB_ENV *, u_int32_t));
+int __dbcl_db_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
+int __dbcl_db_dup_compare __P((DB *, int (*)(DB *, const DBT *, const DBT *)));
+int __dbcl_db_encrypt __P((DB *, const char *, u_int32_t));
+int __dbcl_db_extentsize __P((DB *, u_int32_t));
+int __dbcl_db_fd __P((DB *, int *));
+int __dbcl_db_feedback __P((DB *, void (*)(DB *, int, int)));
+int __dbcl_db_flags __P((DB *, u_int32_t));
+int __dbcl_db_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_h_ffactor __P((DB *, u_int32_t));
+int __dbcl_db_h_hash __P((DB *, u_int32_t(*)(DB *, const void *, u_int32_t)));
+int __dbcl_db_h_nelem __P((DB *, u_int32_t));
+int __dbcl_db_key_range __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
+int __dbcl_db_lorder __P((DB *, int));
+int __dbcl_db_open __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int));
+int __dbcl_db_pagesize __P((DB *, u_int32_t));
+int __dbcl_db_panic __P((DB *, void (*)(DB_ENV *, int)));
+int __dbcl_db_pget __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+int __dbcl_db_re_delim __P((DB *, int));
+int __dbcl_db_re_len __P((DB *, u_int32_t));
+int __dbcl_db_re_pad __P((DB *, int));
+int __dbcl_db_re_source __P((DB *, const char *));
+int __dbcl_db_remove __P((DB *, const char *, const char *, u_int32_t));
+int __dbcl_db_rename __P((DB *, const char *, const char *, const char *, u_int32_t));
+int __dbcl_db_stat __P((DB *, void *, u_int32_t));
+int __dbcl_db_sync __P((DB *, u_int32_t));
+int __dbcl_db_truncate __P((DB *, DB_TXN *, u_int32_t *, u_int32_t));
+int __dbcl_db_upgrade __P((DB *, const char *, u_int32_t));
+int __dbcl_db_verify __P((DB *, const char *, const char *, FILE *, u_int32_t));
+int __dbcl_db_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
+int __dbcl_db_join __P((DB *, DBC **, DBC **, u_int32_t));
+int __dbcl_dbc_close __P((DBC *));
+int __dbcl_dbc_count __P((DBC *, db_recno_t *, u_int32_t));
+int __dbcl_dbc_del __P((DBC *, u_int32_t));
+int __dbcl_dbc_dup __P((DBC *, DBC **, u_int32_t));
+int __dbcl_dbc_get __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_pget __P((DBC *, DBT *, DBT *, DBT *, u_int32_t));
+int __dbcl_dbc_put __P((DBC *, DBT *, DBT *, u_int32_t));
+int __dbcl_lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+int __dbcl_lock_get __P((DB_ENV *, u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
+int __dbcl_lock_id __P((DB_ENV *, u_int32_t *));
+int __dbcl_lock_id_free __P((DB_ENV *, u_int32_t));
+int __dbcl_lock_put __P((DB_ENV *, DB_LOCK *));
+int __dbcl_lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
+int __dbcl_lock_vec __P((DB_ENV *, u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+int __dbcl_log_archive __P((DB_ENV *, char ***, u_int32_t));
+int __dbcl_log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
+int __dbcl_log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
+int __dbcl_log_flush __P((DB_ENV *, const DB_LSN *));
+int __dbcl_log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+int __dbcl_log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
+int __dbcl_memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+int __dbcl_memp_register __P((DB_ENV *, int, int (*)(DB_ENV *, db_pgno_t, void *, DBT *), int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
+int __dbcl_memp_stat __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
+int __dbcl_memp_sync __P((DB_ENV *, DB_LSN *));
+int __dbcl_memp_trickle __P((DB_ENV *, int, int *));
+int __dbcl_env_close_ret __P((DB_ENV *, u_int32_t, __env_close_reply *));
+int __dbcl_env_create_ret __P((DB_ENV *, long, __env_create_reply *));
+int __dbcl_env_open_ret __P((DB_ENV *, const char *, u_int32_t, int, __env_open_reply *));
+int __dbcl_env_remove_ret __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+int __dbcl_txn_begin_ret __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+int __dbcl_txn_commit_ret __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t, __txn_discard_reply *));
+int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t, __txn_recover_reply *));
+int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+int __dbcl_db_create_ret __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
+int __dbcl_db_get_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+int __dbcl_db_key_range_ret __P((DB *, DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *, const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *, u_int32_t, __db_pget_reply *));
+int __dbcl_db_put_ret __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+int __dbcl_db_remove_ret __P((DB *, const char *, const char *, u_int32_t, __db_remove_reply *));
+int __dbcl_db_rename_ret __P((DB *, const char *, const char *, const char *, u_int32_t, __db_rename_reply *));
+int __dbcl_db_stat_ret __P((DB *, void *, u_int32_t, __db_stat_reply *));
+int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *, u_int32_t, __db_truncate_reply *));
+int __dbcl_db_cursor_ret __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+int __dbcl_db_join_ret __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+int __dbcl_dbc_count_ret __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+int __dbcl_dbc_dup_ret __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+int __dbcl_dbc_get_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t, __dbc_pget_reply *));
+int __dbcl_dbc_put_ret __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rpc_client_ext_h_ */
diff --git a/bdb/dbinc_auto/rpc_defs.in b/bdb/dbinc_auto/rpc_defs.in
new file mode 100644
index 00000000000..cae76f5606d
--- /dev/null
+++ b/bdb/dbinc_auto/rpc_defs.in
@@ -0,0 +1,4 @@
+
+/* DO NOT EDIT: automatically built by dist/s_rpc. */
+#define DB_RPC_SERVERPROG ((unsigned long)(351457))
+#define DB_RPC_SERVERVERS ((unsigned long)(4001))
diff --git a/bdb/dbinc_auto/rpc_server_ext.h b/bdb/dbinc_auto/rpc_server_ext.h
new file mode 100644
index 00000000000..c0c706881c7
--- /dev/null
+++ b/bdb/dbinc_auto/rpc_server_ext.h
@@ -0,0 +1,126 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rpc_server_ext_h_
+#define _rpc_server_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+void __env_cachesize_proc __P((long, u_int32_t, u_int32_t, u_int32_t, __env_cachesize_reply *));
+void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
+void __env_create_proc __P((u_int32_t, __env_create_reply *));
+void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t, __env_dbremove_reply *));
+void __env_dbrename_proc __P((long, long, char *, char *, char *, u_int32_t, __env_dbrename_reply *));
+void __env_encrypt_proc __P((long, char *, u_int32_t, __env_encrypt_reply *));
+void __env_flags_proc __P((long, u_int32_t, u_int32_t, __env_flags_reply *));
+void __env_open_proc __P((long, char *, u_int32_t, u_int32_t, __env_open_reply *));
+void __env_remove_proc __P((long, char *, u_int32_t, __env_remove_reply *));
+void __txn_abort_proc __P((long, __txn_abort_reply *));
+void __txn_begin_proc __P((long, long, u_int32_t, __txn_begin_reply *));
+void __txn_commit_proc __P((long, u_int32_t, __txn_commit_reply *));
+void __txn_discard_proc __P((long, u_int32_t, __txn_discard_reply *));
+void __txn_prepare_proc __P((long, u_int8_t *, __txn_prepare_reply *));
+void __txn_recover_proc __P((long, u_int32_t, u_int32_t, __txn_recover_reply *, int *));
+void __db_bt_maxkey_proc __P((long, u_int32_t, __db_bt_maxkey_reply *));
+void __db_associate_proc __P((long, long, long, u_int32_t, __db_associate_reply *));
+void __db_bt_minkey_proc __P((long, u_int32_t, __db_bt_minkey_reply *));
+void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
+void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
+void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
+void __db_encrypt_proc __P((long, char *, u_int32_t, __db_encrypt_reply *));
+void __db_extentsize_proc __P((long, u_int32_t, __db_extentsize_reply *));
+void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
+void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_get_reply *, int *));
+void __db_h_ffactor_proc __P((long, u_int32_t, __db_h_ffactor_reply *));
+void __db_h_nelem_proc __P((long, u_int32_t, __db_h_nelem_reply *));
+void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
+void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
+void __db_open_proc __P((long, long, char *, char *, u_int32_t, u_int32_t, u_int32_t, __db_open_reply *));
+void __db_pagesize_proc __P((long, u_int32_t, __db_pagesize_reply *));
+void __db_pget_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_pget_reply *, int *));
+void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_put_reply *, int *));
+void __db_re_delim_proc __P((long, u_int32_t, __db_re_delim_reply *));
+void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
+void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
+void __db_remove_proc __P((long, char *, char *, u_int32_t, __db_remove_reply *));
+void __db_rename_proc __P((long, char *, char *, char *, u_int32_t, __db_rename_reply *));
+void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *, int *));
+void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
+void __db_truncate_proc __P((long, long, u_int32_t, __db_truncate_reply *));
+void __db_cursor_proc __P((long, long, u_int32_t, __db_cursor_reply *));
+void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t, __db_join_reply *));
+void __dbc_close_proc __P((long, __dbc_close_reply *));
+void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
+void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
+void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
+void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_get_reply *, int *));
+void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_pget_reply *, int *));
+void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __dbc_put_reply *, int *));
+void __dbsrv_settimeout __P((ct_entry *, u_int32_t));
+void __dbsrv_timeout __P((int));
+void __dbclear_ctp __P((ct_entry *));
+void __dbdel_ctp __P((ct_entry *));
+ct_entry *new_ct_ent __P((int *));
+ct_entry *get_tableent __P((long));
+ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *, const char *, DBTYPE, u_int32_t));
+ct_entry *__dbsrv_shareenv __P((ct_entry *, home_entry *, u_int32_t));
+void __dbsrv_active __P((ct_entry *));
+int __db_close_int __P((long, u_int32_t));
+int __dbc_close_int __P((ct_entry *));
+int __dbenv_close_int __P((long, u_int32_t, int));
+home_entry *get_home __P((char *));
+__env_cachesize_reply *__db_env_cachesize_4001 __P((__env_cachesize_msg *, struct svc_req *));
+__env_close_reply *__db_env_close_4001 __P((__env_close_msg *, struct svc_req *));
+__env_create_reply *__db_env_create_4001 __P((__env_create_msg *, struct svc_req *));
+__env_dbremove_reply *__db_env_dbremove_4001 __P((__env_dbremove_msg *, struct svc_req *));
+__env_dbrename_reply *__db_env_dbrename_4001 __P((__env_dbrename_msg *, struct svc_req *));
+__env_encrypt_reply *__db_env_encrypt_4001 __P((__env_encrypt_msg *, struct svc_req *));
+__env_flags_reply *__db_env_flags_4001 __P((__env_flags_msg *, struct svc_req *));
+__env_open_reply *__db_env_open_4001 __P((__env_open_msg *, struct svc_req *));
+__env_remove_reply *__db_env_remove_4001 __P((__env_remove_msg *, struct svc_req *));
+__txn_abort_reply *__db_txn_abort_4001 __P((__txn_abort_msg *, struct svc_req *));
+__txn_begin_reply *__db_txn_begin_4001 __P((__txn_begin_msg *, struct svc_req *));
+__txn_commit_reply *__db_txn_commit_4001 __P((__txn_commit_msg *, struct svc_req *));
+__txn_discard_reply *__db_txn_discard_4001 __P((__txn_discard_msg *, struct svc_req *));
+__txn_prepare_reply *__db_txn_prepare_4001 __P((__txn_prepare_msg *, struct svc_req *));
+__txn_recover_reply *__db_txn_recover_4001 __P((__txn_recover_msg *, struct svc_req *));
+__db_associate_reply *__db_db_associate_4001 __P((__db_associate_msg *, struct svc_req *));
+__db_bt_maxkey_reply *__db_db_bt_maxkey_4001 __P((__db_bt_maxkey_msg *, struct svc_req *));
+__db_bt_minkey_reply *__db_db_bt_minkey_4001 __P((__db_bt_minkey_msg *, struct svc_req *));
+__db_close_reply *__db_db_close_4001 __P((__db_close_msg *, struct svc_req *));
+__db_create_reply *__db_db_create_4001 __P((__db_create_msg *, struct svc_req *));
+__db_del_reply *__db_db_del_4001 __P((__db_del_msg *, struct svc_req *));
+__db_encrypt_reply *__db_db_encrypt_4001 __P((__db_encrypt_msg *, struct svc_req *));
+__db_extentsize_reply *__db_db_extentsize_4001 __P((__db_extentsize_msg *, struct svc_req *));
+__db_flags_reply *__db_db_flags_4001 __P((__db_flags_msg *, struct svc_req *));
+__db_get_reply *__db_db_get_4001 __P((__db_get_msg *, struct svc_req *));
+__db_h_ffactor_reply *__db_db_h_ffactor_4001 __P((__db_h_ffactor_msg *, struct svc_req *));
+__db_h_nelem_reply *__db_db_h_nelem_4001 __P((__db_h_nelem_msg *, struct svc_req *));
+__db_key_range_reply *__db_db_key_range_4001 __P((__db_key_range_msg *, struct svc_req *));
+__db_lorder_reply *__db_db_lorder_4001 __P((__db_lorder_msg *, struct svc_req *));
+__db_open_reply *__db_db_open_4001 __P((__db_open_msg *, struct svc_req *));
+__db_pagesize_reply *__db_db_pagesize_4001 __P((__db_pagesize_msg *, struct svc_req *));
+__db_pget_reply *__db_db_pget_4001 __P((__db_pget_msg *, struct svc_req *));
+__db_put_reply *__db_db_put_4001 __P((__db_put_msg *, struct svc_req *));
+__db_re_delim_reply *__db_db_re_delim_4001 __P((__db_re_delim_msg *, struct svc_req *));
+__db_re_len_reply *__db_db_re_len_4001 __P((__db_re_len_msg *, struct svc_req *));
+__db_re_pad_reply *__db_db_re_pad_4001 __P((__db_re_pad_msg *, struct svc_req *));
+__db_remove_reply *__db_db_remove_4001 __P((__db_remove_msg *, struct svc_req *));
+__db_rename_reply *__db_db_rename_4001 __P((__db_rename_msg *, struct svc_req *));
+__db_stat_reply *__db_db_stat_4001 __P((__db_stat_msg *, struct svc_req *));
+__db_sync_reply *__db_db_sync_4001 __P((__db_sync_msg *, struct svc_req *));
+__db_truncate_reply *__db_db_truncate_4001 __P((__db_truncate_msg *, struct svc_req *));
+__db_cursor_reply *__db_db_cursor_4001 __P((__db_cursor_msg *, struct svc_req *));
+__db_join_reply *__db_db_join_4001 __P((__db_join_msg *, struct svc_req *));
+__dbc_close_reply *__db_dbc_close_4001 __P((__dbc_close_msg *, struct svc_req *));
+__dbc_count_reply *__db_dbc_count_4001 __P((__dbc_count_msg *, struct svc_req *));
+__dbc_del_reply *__db_dbc_del_4001 __P((__dbc_del_msg *, struct svc_req *));
+__dbc_dup_reply *__db_dbc_dup_4001 __P((__dbc_dup_msg *, struct svc_req *));
+__dbc_get_reply *__db_dbc_get_4001 __P((__dbc_get_msg *, struct svc_req *));
+__dbc_pget_reply *__db_dbc_pget_4001 __P((__dbc_pget_msg *, struct svc_req *));
+__dbc_put_reply *__db_dbc_put_4001 __P((__dbc_put_msg *, struct svc_req *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_rpc_server_ext_h_ */
diff --git a/bdb/dbinc_auto/tcl_ext.h b/bdb/dbinc_auto/tcl_ext.h
new file mode 100644
index 00000000000..619ea4a9dfc
--- /dev/null
+++ b/bdb/dbinc_auto/tcl_ext.h
@@ -0,0 +1,82 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _tcl_ext_h_
+#define _tcl_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int bdb_HCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+#if DB_DBM_HSEARCH != 0
+int bdb_NdbmOpen __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBM **));
+#endif
+#if DB_DBM_HSEARCH != 0
+int bdb_DbmCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*, int, DBM *));
+#endif
+int ndbm_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int db_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_EnvRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *, Tcl_Obj *));
+int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+DBTCL_INFO *_NewInfo __P((Tcl_Interp *, void *, char *, enum INFOTYPE));
+void *_NameToPtr __P((CONST char *));
+DBTCL_INFO *_PtrToInfo __P((CONST void *));
+DBTCL_INFO *_NameToInfo __P((CONST char *));
+void _SetInfoData __P((DBTCL_INFO *, void *));
+void _DeleteInfo __P((DBTCL_INFO *));
+int _SetListElem __P((Tcl_Interp *, Tcl_Obj *, void *, int, void *, int));
+int _SetListElemInt __P((Tcl_Interp *, Tcl_Obj *, void *, int));
+int _SetListRecnoElem __P((Tcl_Interp *, Tcl_Obj *, db_recno_t, u_char *, int));
+int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int, DBT *, int, DBT *));
+int _SetMultiList __P((Tcl_Interp *, Tcl_Obj *, DBT *, DBT*, int, int));
+int _GetGlobPrefix __P((char *, char **));
+int _ReturnSetup __P((Tcl_Interp *, int, int, char *));
+int _ErrorSetup __P((Tcl_Interp *, int, char *));
+void _ErrorFunc __P((CONST char *, char *));
+int _GetLsn __P((Tcl_Interp *, Tcl_Obj *, DB_LSN *));
+int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *));
+Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t, void (*)(u_int32_t, void *, void (*)(u_int32_t, const FN *, void *))));
+void _debug_check __P((void));
+int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **, u_int32_t *, int *));
+int tcl_LockDetect __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockTimeout __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LockVec __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogArchive __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogCompare __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_LogFile __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogFlush __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_LogStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+void _MpInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_MpSync __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_MpTrickle __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Mp __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_MpStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_RepElect __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepFlush __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepLimit __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepRequest __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepStart __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepProcessMessage __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+int tcl_RepStat __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+void _TxnInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+int tcl_TxnCheckpoint __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_Txn __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int tcl_TxnStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_TxnTimeout __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+int tcl_TxnRecover __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_tcl_ext_h_ */
diff --git a/bdb/dbinc_auto/txn_auto.h b/bdb/dbinc_auto/txn_auto.h
new file mode 100644
index 00000000000..ac841ba5bc3
--- /dev/null
+++ b/bdb/dbinc_auto/txn_auto.h
@@ -0,0 +1,55 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef __txn_AUTO_H
+#define __txn_AUTO_H
+#define DB___txn_regop 10
+typedef struct ___txn_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ int32_t timestamp;
+} __txn_regop_args;
+
+#define DB___txn_ckp 11
+typedef struct ___txn_ckp_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DB_LSN ckp_lsn;
+ DB_LSN last_ckp;
+ int32_t timestamp;
+} __txn_ckp_args;
+
+#define DB___txn_child 12
+typedef struct ___txn_child_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t child;
+ DB_LSN c_lsn;
+} __txn_child_args;
+
+#define DB___txn_xa_regop 13
+typedef struct ___txn_xa_regop_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t opcode;
+ DBT xid;
+ int32_t formatID;
+ u_int32_t gtrid;
+ u_int32_t bqual;
+ DB_LSN begin_lsn;
+} __txn_xa_regop_args;
+
+#define DB___txn_recycle 14
+typedef struct ___txn_recycle_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ u_int32_t min;
+ u_int32_t max;
+} __txn_recycle_args;
+
+#endif
diff --git a/bdb/dbinc_auto/txn_ext.h b/bdb/dbinc_auto/txn_ext.h
new file mode 100644
index 00000000000..5a4381ed890
--- /dev/null
+++ b/bdb/dbinc_auto/txn_ext.h
@@ -0,0 +1,70 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _txn_ext_h_
+#define _txn_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+int __txn_xa_begin __P((DB_ENV *, DB_TXN *));
+int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp));
+int __txn_commit __P((DB_TXN *, u_int32_t));
+int __txn_abort __P((DB_TXN *));
+int __txn_discard __P((DB_TXN *, u_int32_t flags));
+int __txn_prepare __P((DB_TXN *, u_int8_t *));
+u_int32_t __txn_id __P((DB_TXN *));
+int __txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
+int __txn_getckp __P((DB_ENV *, DB_LSN *));
+int __txn_activekids __P((DB_ENV *, u_int32_t, DB_TXN *));
+int __txn_force_abort __P((DB_ENV *, u_int8_t *));
+int __txn_preclose __P((DB_ENV *));
+int __txn_reset __P((DB_ENV *));
+int __txn_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, int32_t));
+int __txn_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_regop_read __P((DB_ENV *, void *, __txn_regop_args **));
+int __txn_ckp_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, DB_LSN *, DB_LSN *, int32_t));
+int __txn_ckp_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_read __P((DB_ENV *, void *, __txn_ckp_args **));
+int __txn_child_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_child_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_read __P((DB_ENV *, void *, __txn_child_args **));
+int __txn_xa_regop_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, const DBT *, int32_t, u_int32_t, u_int32_t, DB_LSN *));
+int __txn_xa_regop_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_read __P((DB_ENV *, void *, __txn_xa_regop_args **));
+int __txn_recycle_log __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, u_int32_t, u_int32_t));
+int __txn_recycle_getpgnos __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_recycle_print __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_recycle_read __P((DB_ENV *, void *, __txn_recycle_args **));
+int __txn_init_print __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __txn_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __txn_init_recover __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+void __txn_dbenv_create __P((DB_ENV *));
+int __txn_regop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_xa_regop_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_ckp_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_child_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int __txn_restore_txn __P((DB_ENV *, DB_LSN *, __txn_xa_regop_args *));
+int __txn_recycle_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t));
+int __txn_map_gid __P((DB_ENV *, u_int8_t *, TXN_DETAIL **, size_t *));
+int __txn_recover __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+int __txn_get_prepared __P((DB_ENV *, XID *, DB_PREPLIST *, long, long *, u_int32_t));
+int __txn_open __P((DB_ENV *));
+int __txn_dbenv_refresh __P((DB_ENV *));
+void __txn_region_destroy __P((DB_ENV *, REGINFO *));
+int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+int __txn_remevent __P((DB_ENV *, DB_TXN *, const char *, u_int8_t*));
+int __txn_lockevent __P((DB_ENV *, DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_txn_ext_h_ */
diff --git a/bdb/dbinc_auto/xa_ext.h b/bdb/dbinc_auto/xa_ext.h
new file mode 100644
index 00000000000..e4fc9895e18
--- /dev/null
+++ b/bdb/dbinc_auto/xa_ext.h
@@ -0,0 +1,20 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _xa_ext_h_
+#define _xa_ext_h_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int __db_xa_create __P((DB *));
+int __db_rmid_to_env __P((int rmid, DB_ENV **envp));
+int __db_xid_to_txn __P((DB_ENV *, XID *, size_t *));
+int __db_map_rmid __P((int, DB_ENV *));
+int __db_unmap_rmid __P((int));
+int __db_map_xid __P((DB_ENV *, XID *, size_t));
+void __db_unmap_xid __P((DB_ENV *, XID *, size_t));
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* !_xa_ext_h_ */
diff --git a/bdb/dbm/dbm.c b/bdb/dbm/dbm.c
index e5f423572c5..3aa6fff6982 100644
--- a/bdb/dbm/dbm.c
+++ b/bdb/dbm/dbm.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: dbm.c,v 11.7 2000/11/30 00:58:35 ubell Exp $";
+static const char revid[] = "$Id: dbm.c,v 11.14 2002/02/22 16:11:10 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -60,6 +60,36 @@ static const char revid[] = "$Id: dbm.c,v 11.7 2000/11/30 00:58:35 ubell Exp $";
*
* This package provides dbm and ndbm compatible interfaces to DB.
*
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_ndbm_clearerr __P((DBM *));
+ * EXTERN: void __db_ndbm_close __P((DBM *));
+ * EXTERN: int __db_ndbm_delete __P((DBM *, datum));
+ * EXTERN: int __db_ndbm_dirfno __P((DBM *));
+ * EXTERN: int __db_ndbm_error __P((DBM *));
+ * EXTERN: datum __db_ndbm_fetch __P((DBM *, datum));
+ * EXTERN: datum __db_ndbm_firstkey __P((DBM *));
+ * EXTERN: datum __db_ndbm_nextkey __P((DBM *));
+ * EXTERN: DBM *__db_ndbm_open __P((const char *, int, int));
+ * EXTERN: int __db_ndbm_pagfno __P((DBM *));
+ * EXTERN: int __db_ndbm_rdonly __P((DBM *));
+ * EXTERN: int __db_ndbm_store __P((DBM *, datum, datum, int));
+ *
+ * EXTERN: int __db_dbm_close __P((void));
+ * EXTERN: int __db_dbm_dbrdonly __P((void));
+ * EXTERN: int __db_dbm_delete __P((datum));
+ * EXTERN: int __db_dbm_dirf __P((void));
+ * EXTERN: datum __db_dbm_fetch __P((datum));
+ * EXTERN: datum __db_dbm_firstkey __P((void));
+ * EXTERN: int __db_dbm_init __P((char *));
+ * EXTERN: datum __db_dbm_nextkey __P((datum));
+ * EXTERN: int __db_dbm_pagf __P((void));
+ * EXTERN: int __db_dbm_store __P((datum, datum));
+ *
+ * EXTERN: #endif
+ */
+
+/*
* The DBM routines, which call the NDBM routines.
*/
static DBM *__cur_db;
@@ -210,7 +240,7 @@ __db_ndbm_open(file, oflags, mode)
if ((ret = dbp->set_pagesize(dbp, 4096)) != 0 ||
(ret = dbp->set_h_ffactor(dbp, 40)) != 0 ||
(ret = dbp->set_h_nelem(dbp, 1)) != 0 ||
- (ret = dbp->open(dbp,
+ (ret = dbp->open(dbp, NULL,
path, NULL, DB_HASH, __db_oflags(oflags), mode)) != 0) {
__os_set_errno(ret);
return (NULL);
@@ -277,7 +307,7 @@ __db_ndbm_fetch(dbm, key)
__os_set_errno(ENOENT);
else {
__os_set_errno(ret);
- F_SET(dbc->dbp, DB_DBM_ERROR);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
}
}
return (data);
@@ -312,7 +342,7 @@ __db_ndbm_firstkey(dbm)
__os_set_errno(ENOENT);
else {
__os_set_errno(ret);
- F_SET(dbc->dbp, DB_DBM_ERROR);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
}
}
return (key);
@@ -347,7 +377,7 @@ __db_ndbm_nextkey(dbm)
__os_set_errno(ENOENT);
else {
__os_set_errno(ret);
- F_SET(dbc->dbp, DB_DBM_ERROR);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
}
}
return (key);
@@ -380,7 +410,7 @@ __db_ndbm_delete(dbm, key)
__os_set_errno(ENOENT);
else {
__os_set_errno(ret);
- F_SET(dbc->dbp, DB_DBM_ERROR);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
}
return (-1);
}
@@ -419,7 +449,7 @@ __db_ndbm_store(dbm, key, data, flags)
return (1);
__os_set_errno(ret);
- F_SET(dbc->dbp, DB_DBM_ERROR);
+ F_SET(dbc->dbp, DB_AM_DBM_ERROR);
return (-1);
}
@@ -431,7 +461,7 @@ __db_ndbm_error(dbm)
dbc = (DBC *)dbm;
- return (F_ISSET(dbc->dbp, DB_DBM_ERROR));
+ return (F_ISSET(dbc->dbp, DB_AM_DBM_ERROR));
}
int
@@ -442,7 +472,7 @@ __db_ndbm_clearerr(dbm)
dbc = (DBC *)dbm;
- F_CLR(dbc->dbp, DB_DBM_ERROR);
+ F_CLR(dbc->dbp, DB_AM_DBM_ERROR);
return (0);
}
diff --git a/bdb/dbreg/dbreg.c b/bdb/dbreg/dbreg.c
new file mode 100644
index 00000000000..289fe67ed50
--- /dev/null
+++ b/bdb/dbreg/dbreg.c
@@ -0,0 +1,450 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg.c,v 11.68 2002/08/28 19:05:27 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+/*
+ * The dbreg subsystem, as its name implies, registers database handles so
+ * that we can associate log messages with them without logging a filename
+ * or a full, unique DB ID. Instead, we assign each dbp an int32_t which is
+ * easy and cheap to log, and use this subsystem to map back and forth.
+ *
+ * Overview of how dbreg ids are managed:
+ *
+ * OPEN
+ * dbreg_setup (Creates FNAME struct.)
+ * dbreg_new_id (Assigns new ID to dbp and logs it. May be postponed
+ * until we attempt to log something else using that dbp, if the dbp
+ * was opened on a replication client.)
+ *
+ * CLOSE
+ * dbreg_close_id (Logs closure of dbp/revocation of ID.)
+ * dbreg_revoke_id (As name implies, revokes ID.)
+ * dbreg_teardown (Destroys FNAME.)
+ *
+ * RECOVERY
+ * dbreg_setup
+ * dbreg_assign_id (Assigns a particular ID we have in the log to a dbp.)
+ *
+ * sometimes: dbreg_revoke_id; dbreg_teardown
+ * other times: normal close path
+ *
+ * A note about locking:
+ *
+ * FNAME structures are referenced only by their corresponding dbp's
+ * until they have a valid id.
+ *
+ * Once they have a valid id, they must get linked into the log
+ * region list so they can get logged on checkpoints.
+ *
+ * An FNAME that may/does have a valid id must be accessed under
+ * protection of the fq_mutex, with the following exception:
+ *
+ * We don't want to have to grab the fq_mutex on every log
+ * record, and it should be safe not to do so when we're just
+ * looking at the id, because once allocated, the id should
+ * not change under a handle until the handle is closed.
+ *
+ * If a handle is closed during an attempt by another thread to
+ * log with it, well, the application doing the close deserves to
+ * go down in flames and a lot else is about to fail anyway.
+ *
+ * When in the course of logging we encounter an invalid id
+ * and go to allocate it lazily, we *do* need to check again
+ * after grabbing the mutex, because it's possible to race with
+ * another thread that has also decided that it needs to allocate
+ * a id lazily.
+ *
+ * See SR #5623 for further discussion of the new dbreg design.
+ */
+
+/*
+ * __dbreg_setup --
+ * Allocate and initialize an FNAME structure. The FNAME structures
+ * live in the log shared region and map one-to-one with open database handles.
+ * When the handle needs to be logged, the FNAME should have a valid fid
+ * allocated. If the handle currently isn't logged, it still has an FNAME
+ * entry. If we later discover that the handle needs to be logged, we can
+ * allocate a id for it later. (This happens when the handle is on a
+ * replication client that later becomes a master.)
+ *
+ * PUBLIC: int __dbreg_setup __P((DB *, const char *, u_int32_t));
+ */
+int
+__dbreg_setup(dbp, name, create_txnid)
+ DB *dbp;
+ const char *name;
+ u_int32_t create_txnid;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ int ret;
+ size_t len;
+ void *namep;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+
+ fnp = NULL;
+ namep = NULL;
+
+ /* Allocate an FNAME and, if necessary, a buffer for the name itself. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr, sizeof(FNAME), 0, &fnp)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ memset(fnp, 0, sizeof(FNAME));
+ if (name != NULL) {
+ len = strlen(name) + 1;
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ len, 0, &namep)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+ fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
+ memcpy(namep, name, len);
+ } else
+ fnp->name_off = INVALID_ROFF;
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Fill in all the remaining info that we'll need later to register
+ * the file, if we use it for logging.
+ */
+ fnp->id = DB_LOGFILEID_INVALID;
+ fnp->s_type = dbp->type;
+ memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
+ fnp->meta_pgno = dbp->meta_pgno;
+ fnp->create_txnid = create_txnid;
+
+ dbp->log_filename = fnp;
+
+ return (0);
+}
+
+/*
+ * __dbreg_teardown --
+ * Destroy a DB handle's FNAME struct.
+ *
+ * PUBLIC: int __dbreg_teardown __P((DB *));
+ */
+int
+__dbreg_teardown(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ fnp = dbp->log_filename;
+
+ /*
+ * We may not have an FNAME if we were never opened. This is not an
+ * error.
+ */
+ if (fnp == NULL)
+ return (0);
+
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (fnp->name_off != INVALID_ROFF)
+ __db_shalloc_free(dblp->reginfo.addr,
+ R_ADDR(&dblp->reginfo, fnp->name_off));
+ __db_shalloc_free(dblp->reginfo.addr, fnp);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ dbp->log_filename = NULL;
+
+ return (0);
+}
+
+/*
+ * __dbreg_new_id --
+ * Assign an unused dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_new_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_new_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN unused;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /*
+ * It's possible that after deciding we needed to call this function,
+ * someone else allocated an ID before we grabbed the lock. Check
+ * to make sure there was no race and we have something useful to do.
+ */
+ if (fnp->id != DB_LOGFILEID_INVALID) {
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (0);
+ }
+
+ /* Get an unused ID from the free list. */
+ if ((ret = __dbreg_pop_id(dbenv, &id)) != 0)
+ goto err;
+
+ /* If no ID was found, allocate a new one. */
+ if (id == DB_LOGFILEID_INVALID)
+ id = lp->fid_max++;
+
+ fnp->id = id;
+
+ /* Hook the FNAME into the list of open files. */
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ /*
+ * Log the registry. We should only request a new ID in situations
+ * where logging is reasonable.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_RECOVER));
+
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ memset(&r_name, 0, sizeof(r_name));
+ if (fnp->name_off != INVALID_ROFF) {
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size = (u_int32_t)strlen((char *)r_name.data) + 1;
+ }
+ fid_dbt.data = dbp->fileid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn, &unused, 0, LOG_OPEN,
+ r_name.size == 0 ? NULL : &r_name, &fid_dbt, id, fnp->s_type,
+ fnp->meta_pgno, fnp->create_txnid)) != 0)
+ goto err;
+
+ DB_ASSERT(dbp->type == fnp->s_type);
+ DB_ASSERT(dbp->meta_pgno == fnp->meta_pgno);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_assign_id --
+ * Assign a particular dbreg id to this database handle.
+ *
+ * PUBLIC: int __dbreg_assign_id __P((DB *, int32_t));
+ */
+int
+__dbreg_assign_id(dbp, id)
+ DB *dbp;
+ int32_t id;
+{
+ DB *close_dbp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *close_fnp, *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ close_dbp = NULL;
+ close_fnp = NULL;
+
+ /* The fq_mutex protects the FNAME list and id management. */
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ /* We should only call this on DB handles that have no ID. */
+ DB_ASSERT(fnp->id == DB_LOGFILEID_INVALID);
+
+ /*
+ * Make sure there isn't already a file open with this ID. There can
+ * be in recovery, if we're recovering across a point where an ID got
+ * reused.
+ */
+ if (__dbreg_id_to_fname(dblp, id, 1, &close_fnp) == 0) {
+ /*
+ * We want to save off any dbp we have open with this id.
+ * We can't safely close it now, because we hold the fq_mutex,
+ * but we should be able to rely on it being open in this
+ * process, and we're running recovery, so no other thread
+ * should muck with it if we just put off closing it until
+ * we're ready to return.
+ *
+ * Once we have the dbp, revoke its id; we're about to
+ * reuse it.
+ */
+ ret = __dbreg_id_to_db_int(dbenv, NULL, &close_dbp, id, 0, 0);
+ if (ret == ENOENT) {
+ ret = 0;
+ goto cont;
+ } else if (ret != 0)
+ goto err;
+
+ if ((ret = __dbreg_revoke_id(close_dbp, 1)) != 0)
+ goto err;
+ }
+
+ /*
+ * Remove this ID from the free list, if it's there, and make sure
+ * we don't allocate it anew.
+ */
+cont: if ((ret = __dbreg_pluck_id(dbenv, id)) != 0)
+ goto err;
+ if (id >= lp->fid_max)
+ lp->fid_max = id + 1;
+
+ /* Now go ahead and assign the id to our dbp. */
+ fnp->id = id;
+ SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
+
+ if ((ret = __dbreg_add_dbentry(dbenv, dblp, dbp, id)) != 0)
+ goto err;
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ /* There's nothing useful that our caller can do if this close fails. */
+ if (close_dbp != NULL)
+ (void)close_dbp->close(close_dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_revoke_id --
+ * Take a log id away from a dbp, in preparation for closing it,
+ * but without logging the close.
+ *
+ * PUBLIC: int __dbreg_revoke_id __P((DB *, int));
+ */
+int
+__dbreg_revoke_id(dbp, have_lock)
+ DB *dbp;
+ int have_lock;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int32_t id;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ id = fnp->id;
+ fnp->id = DB_LOGFILEID_INVALID;
+
+ /* Remove the FNAME from the list of open files. */
+ SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
+
+ /* Remove this id from the dbentry table. */
+ __dbreg_rem_dbentry(dblp, id);
+
+ /* Push this id onto the free list. */
+ ret = __dbreg_push_id(dbenv, id);
+
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
+
+/*
+ * __dbreg_close_id --
+ * Take a dbreg id away from a dbp that we're closing, and log
+ * the unregistry.
+ *
+ * PUBLIC: int __dbreg_close_id __P((DB *, DB_TXN *));
+ */
+int
+__dbreg_close_id(dbp, txn)
+ DB *dbp;
+ DB_TXN *txn;
+{
+ DBT fid_dbt, r_name, *dbtp;
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dbp->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ fnp = dbp->log_filename;
+
+ /* If we lack an ID, this is a null-op. */
+ if (fnp == NULL || fnp->id == DB_LOGFILEID_INVALID)
+ return (0);
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&r_name, 0, sizeof(r_name));
+ r_name.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ r_name.size =
+ (u_int32_t)strlen((char *)r_name.data) + 1;
+ dbtp = &r_name;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ if ((ret = __dbreg_register_log(dbenv, txn,
+ &r_unused, 0, LOG_CLOSE, dbtp, &fid_dbt, fnp->id,
+ fnp->s_type, fnp->meta_pgno, TXN_INVALID)) != 0)
+ goto err;
+
+ ret = __dbreg_revoke_id(dbp, 1);
+
+err: MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+ return (ret);
+}
diff --git a/bdb/dbreg/dbreg.src b/bdb/dbreg/dbreg.src
new file mode 100644
index 00000000000..18429471e82
--- /dev/null
+++ b/bdb/dbreg/dbreg.src
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: dbreg.src,v 10.22 2002/03/27 04:31:44 bostic Exp $
+ */
+
+PREFIX __dbreg
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE
+
+/*
+ * Used for registering name/id translations at open or close.
+ * opcode: register or unregister
+ * name: file name
+ * fileid: unique file id
+ * ftype: file type
+ * ftype: database type
+ * id: transaction id of the subtransaction that created the fs object
+ */
+BEGIN register 2
+ARG opcode u_int32_t lu
+DBT name DBT s
+DBT uid DBT s
+ARG fileid int32_t ld
+ARG ftype DBTYPE lx
+ARG meta_pgno db_pgno_t lu
+ARG id u_int32_t lx
+END
diff --git a/bdb/dbreg/dbreg_auto.c b/bdb/dbreg/dbreg_auto.c
new file mode 100644
index 00000000000..91eace3f4bf
--- /dev/null
+++ b/bdb/dbreg/dbreg_auto.c
@@ -0,0 +1,358 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * PUBLIC: int __dbreg_register_log __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: DB_LSN *, u_int32_t, u_int32_t, const DBT *, const DBT *,
+ * PUBLIC: int32_t, DBTYPE, db_pgno_t, u_int32_t));
+ */
+int
+__dbreg_register_log(dbenv, txnid, ret_lsnp, flags,
+ opcode, name, uid, fileid, ftype, meta_pgno,
+ id)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ u_int32_t opcode;
+ const DBT *name;
+ const DBT *uid;
+ int32_t fileid;
+ DBTYPE ftype;
+ db_pgno_t meta_pgno;
+ u_int32_t id;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___dbreg_register;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (uid == NULL ? 0 : uid->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ uinttmp = (u_int32_t)opcode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ if (uid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &uid->size, sizeof(uid->size));
+ bp += sizeof(uid->size);
+ memcpy(bp, uid->data, uid->size);
+ bp += uid->size;
+ }
+
+ uinttmp = (u_int32_t)fileid;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)ftype;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)meta_pgno;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)id;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__dbreg_register_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__dbreg_register_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __dbreg_register_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__dbreg_register: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\topcode: %lu\n", (u_long)argp->opcode);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tuid: ");
+ for (i = 0; i < argp->uid.size; i++) {
+ ch = ((u_int8_t *)argp->uid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfileid: %ld\n", (long)argp->fileid);
+ (void)printf("\tftype: 0x%lx\n", (u_long)argp->ftype);
+ (void)printf("\tmeta_pgno: %lu\n", (u_long)argp->meta_pgno);
+ (void)printf("\tid: 0x%lx\n", (u_long)argp->id);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_register_read __P((DB_ENV *, void *,
+ * PUBLIC: __dbreg_register_args **));
+ */
+int
+__dbreg_register_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __dbreg_register_args **argpp;
+{
+ __dbreg_register_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__dbreg_register_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->opcode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memset(&argp->uid, 0, sizeof(argp->uid));
+ memcpy(&argp->uid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->uid.data = bp;
+ bp += argp->uid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->fileid = (int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->ftype = (DBTYPE)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->meta_pgno = (db_pgno_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->id = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__dbreg_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_print, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_getpgnos __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_getpgnos, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbreg_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__dbreg_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __dbreg_register_recover, DB___dbreg_register)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/bdb/dbreg/dbreg_rec.c b/bdb/dbreg/dbreg_rec.c
new file mode 100644
index 00000000000..ba3ba0e06d9
--- /dev/null
+++ b/bdb/dbreg/dbreg_rec.c
@@ -0,0 +1,362 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1995, 1996
+ * The President and Fellows of Harvard University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg_rec.c,v 11.108 2002/08/14 20:04:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_open_file __P((DB_ENV *,
+ DB_TXN *, __dbreg_register_args *, void *));
+
+/*
+ * PUBLIC: int __dbreg_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *dblp;
+ DB *dbp;
+ __dbreg_register_args *argp;
+ int do_close, do_open, do_rem, ret, t_ret;
+
+ dblp = dbenv->lg_handle;
+ dbp = NULL;
+
+#ifdef DEBUG_RECOVER
+ REC_PRINT(__dbreg_register_print);
+#endif
+ do_open = do_close = 0;
+ if ((ret = __dbreg_register_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ switch (argp->opcode) {
+ case LOG_OPEN:
+ if ((DB_REDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CLOSE:
+ if (DB_UNDO(op))
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+ case LOG_RCLOSE:
+ /*
+ * LOG_RCLOSE was generated by recover because a file
+ * was left open. The POPENFILES pass, which is run
+ * to open files to abort prepared transactions,
+ * may not include the open for this file so we
+ * open it here. Note that a normal CLOSE is
+ * not legal before the prepared transaction is
+ * committed or aborted.
+ */
+ if (DB_UNDO(op) || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ else
+ do_close = 1;
+ break;
+
+ case LOG_CHECKPOINT:
+ if (DB_UNDO(op) ||
+ op == DB_TXN_OPENFILES || op == DB_TXN_POPENFILES)
+ do_open = 1;
+ break;
+ }
+
+ if (do_open) {
+ /*
+ * We must open the db even if the meta page is not
+ * yet written as we may be creating subdatabase.
+ */
+ if (op == DB_TXN_OPENFILES && argp->opcode != LOG_CHECKPOINT)
+ F_SET(dblp, DBLOG_FORCE_OPEN);
+
+ /*
+ * During an abort or an open pass to recover prepared txns,
+ * we need to make sure that we use the same locker id on the
+ * open. We pass the txnid along to ensure this.
+ */
+ ret = __dbreg_open_file(dbenv,
+ op == DB_TXN_ABORT || op == DB_TXN_POPENFILES ?
+ argp->txnid : NULL, argp, info);
+ if (ret == ENOENT || ret == EINVAL) {
+ /*
+ * If this is an OPEN while rolling forward, it's
+ * possible that the file was recreated since last
+ * time we got here. In that case, we've got deleted
+ * set and probably shouldn't, so we need to check
+ * for that case and possibly retry.
+ */
+ if (op == DB_TXN_FORWARD_ROLL &&
+ argp->txnid != 0 &&
+ dblp->dbentry[argp->fileid].deleted) {
+ dblp->dbentry[argp->fileid].deleted = 0;
+ ret =
+ __dbreg_open_file(dbenv, NULL, argp, info);
+ }
+ ret = 0;
+ }
+ F_CLR(dblp, DBLOG_FORCE_OPEN);
+ }
+
+ if (do_close) {
+ /*
+ * If we are undoing an open, or redoing a close,
+ * then we need to close the file.
+ *
+ * If the file is deleted, then we can just ignore this close.
+ * Otherwise, we should usually have a valid dbp we should
+ * close or whose reference count should be decremented.
+ * However, if we shut down without closing a file, we may, in
+ * fact, not have the file open, and that's OK.
+ */
+ do_rem = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ if (argp->fileid < dblp->dbentry_cnt) {
+ /*
+ * Typically, closes should match an open which means
+ * that if this is a close, there should be a valid
+ * entry in the dbentry table when we get here,
+ * however there is an exception. If this is an
+ * OPENFILES pass, then we may have started from
+ * a log file other than the first, and the
+ * corresponding open appears in an earlier file.
+ * We can ignore that case, but all others are errors.
+ */
+ dbe = &dblp->dbentry[argp->fileid];
+ if (dbe->dbp == NULL && !dbe->deleted) {
+ /* No valid entry here. */
+ if ((argp->opcode != LOG_CLOSE &&
+ argp->opcode != LOG_RCLOSE) ||
+ (op != DB_TXN_OPENFILES &&
+ op !=DB_TXN_POPENFILES)) {
+ __db_err(dbenv,
+ "Improper file close at %lu/%lu",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset);
+ ret = EINVAL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ goto done;
+ }
+
+ /* We have either an open entry or a deleted entry. */
+ if ((dbp = dbe->dbp) != NULL) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+
+ /*
+ * If we're a replication client, it's
+ * possible to get here with a dbp that
+ * the user opened, but which we later
+ * assigned a fileid to. Be sure that
+ * we only close dbps that we opened in
+ * the recovery code; they should have
+ * DB_AM_RECOVER set.
+ *
+ * The only exception is if we're aborting
+ * in a normal environment; then we might
+ * get here with a non-AM_RECOVER database.
+ */
+ if (F_ISSET(dbp, DB_AM_RECOVER) ||
+ op == DB_TXN_ABORT)
+ do_rem = 1;
+ } else if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ __dbreg_rem_dbentry(dblp, argp->fileid);
+ }
+ } else
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if (do_rem) {
+ /*
+ * If we are undoing a create we'd better discard
+ * any buffers from the memory pool.
+ */
+ if (dbp != NULL && dbp->mpf != NULL && argp->id != 0) {
+ if ((ret = dbp->mpf->close(dbp->mpf,
+ DB_MPOOL_DISCARD)) != 0)
+ goto out;
+ dbp->mpf = NULL;
+ }
+
+ /*
+ * During recovery, all files are closed. On an abort,
+ * we only close the file if we opened it during the
+ * abort (DB_AM_RECOVER set), otherwise we simply do
+ * a __db_refresh. For the close case, if remove or
+ * rename has closed the file, don't request a sync,
+ * because the NULL mpf would be a problem.
+ */
+ if (dbp != NULL) {
+ if (op == DB_TXN_ABORT &&
+ !F_ISSET(dbp, DB_AM_RECOVER))
+ t_ret =
+ __db_refresh(dbp, NULL, DB_NOSYNC);
+ else
+ t_ret = dbp->close(dbp, DB_NOSYNC);
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+ }
+ }
+done: if (ret == 0)
+ *lsnp = argp->prev_lsn;
+out: if (argp != NULL)
+ __os_free(dbenv, argp);
+ return (ret);
+}
+
+/*
+ * __dbreg_open_file --
+ * Called during log_register recovery. Make sure that we have an
+ * entry in the dbentry table for this ndx. Returns 0 on success,
+ * non-zero on error.
+ */
+static int
+__dbreg_open_file(dbenv, txn, argp, info)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ __dbreg_register_args *argp;
+ void *info;
+{
+ DB_ENTRY *dbe;
+ DB_LOG *lp;
+ DB *dbp;
+ u_int32_t id;
+
+ lp = (DB_LOG *)dbenv->lg_handle;
+ /*
+ * We never re-open temporary files. Temp files are only
+ * useful during aborts in which case the dbp was entered
+ * when the file was registered. During recovery, we treat
+ * temp files as properly deleted files, allowing the open to
+ * fail and not reporting any errors when recovery fails to
+ * get a valid dbp from __dbreg_id_to_db.
+ */
+ if (argp->name.size == 0) {
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, argp->fileid);
+ return (ENOENT);
+ }
+
+ /*
+ * When we're opening, we have to check that the name we are opening
+ * is what we expect. If it's not, then we close the old file and
+ * open the new one.
+ */
+ MUTEX_THREAD_LOCK(dbenv, lp->mutexp);
+ if (argp->fileid < lp->dbentry_cnt)
+ dbe = &lp->dbentry[argp->fileid];
+ else
+ dbe = NULL;
+
+ if (dbe != NULL) {
+ if (dbe->deleted) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ return (ENOENT);
+ }
+ if ((dbp = dbe->dbp) != NULL) {
+ if (dbp->meta_pgno != argp->meta_pgno ||
+ memcmp(dbp->fileid,
+ argp->uid.data, DB_FILE_ID_LEN) != 0) {
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+ (void)__dbreg_revoke_id(dbp, 0);
+ if (F_ISSET(dbp, DB_AM_RECOVER))
+ dbp->close(dbp, DB_NOSYNC);
+ goto reopen;
+ }
+
+ /*
+ * We should only get here if we already have the
+ * dbp from an openfiles pass, in which case, what's
+ * here had better be the same dbp.
+ */
+ DB_ASSERT(dbe->dbp == dbp);
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * This is a successful open. We need to record that
+ * in the txnlist so that we know how to handle the
+ * subtransaction that created the file system object.
+ */
+ if (argp->id != TXN_INVALID &&
+ __db_txnlist_update(dbenv, info,
+ argp->id, TXN_EXPECTED, NULL) == TXN_NOTFOUND)
+ (void)__db_txnlist_add(dbenv,
+ info, argp->id, TXN_EXPECTED, NULL);
+ return (0);
+ }
+ }
+
+ MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
+
+ /*
+ * We are about to pass a recovery txn pointer into the main library.
+ * We need to make sure that any accessed fields are set appropriately.
+ */
+reopen: if (txn != NULL) {
+ id = txn->txnid;
+ memset(txn, 0, sizeof(DB_TXN));
+ txn->txnid = id;
+ txn->mgrp = dbenv->tx_handle;
+ }
+
+ return (__dbreg_do_open(dbenv, txn, lp, argp->uid.data, argp->name.data,
+ argp->ftype, argp->fileid, argp->meta_pgno, info, argp->id));
+}
diff --git a/bdb/dbreg/dbreg_util.c b/bdb/dbreg/dbreg_util.c
new file mode 100644
index 00000000000..0db5c640adb
--- /dev/null
+++ b/bdb/dbreg/dbreg_util.c
@@ -0,0 +1,797 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: dbreg_util.c,v 11.22 2002/09/10 02:43:10 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __dbreg_check_master __P((DB_ENV *, u_int8_t *, char *));
+
+/*
+ * __dbreg_add_dbentry --
+ * Adds a DB entry to the dbreg DB entry table.
+ *
+ * PUBLIC: int __dbreg_add_dbentry __P((DB_ENV *, DB_LOG *, DB *, int32_t));
+ */
+int
+__dbreg_add_dbentry(dbenv, dblp, dbp, ndx)
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ DB *dbp;
+ int32_t ndx;
+{
+ int32_t i;
+ int ret;
+
+ ret = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Check if we need to grow the table. Note, ndx is 0-based (the
+ * index into the DB entry table) an dbentry_cnt is 1-based, the
+ * number of available slots.
+ */
+ if (dblp->dbentry_cnt <= ndx) {
+ if ((ret = __os_realloc(dbenv,
+ (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY),
+ &dblp->dbentry)) != 0)
+ goto err;
+
+ /* Initialize the new entries. */
+ for (i = dblp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) {
+ dblp->dbentry[i].dbp = NULL;
+ dblp->dbentry[i].deleted = 0;
+ }
+ dblp->dbentry_cnt = i;
+ }
+
+ DB_ASSERT(dblp->dbentry[ndx].dbp == NULL);
+ dblp->dbentry[ndx].deleted = dbp == NULL;
+ dblp->dbentry[ndx].dbp = dbp;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_rem_dbentry
+ * Remove an entry from the DB entry table.
+ *
+ * PUBLIC: void __dbreg_rem_dbentry __P((DB_LOG *, int32_t));
+ */
+void
+__dbreg_rem_dbentry(dblp, ndx)
+ DB_LOG *dblp;
+ int32_t ndx;
+{
+ MUTEX_THREAD_LOCK(dblp->dbenv, dblp->mutexp);
+ dblp->dbentry[ndx].dbp = NULL;
+ dblp->dbentry[ndx].deleted = 0;
+ MUTEX_THREAD_UNLOCK(dblp->dbenv, dblp->mutexp);
+}
+
+/*
+ * __dbreg_open_files --
+ * Put a LOG_CHECKPOINT log record for each open database.
+ *
+ * PUBLIC: int __dbreg_open_files __P((DB_ENV *));
+ */
+int
+__dbreg_open_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB_LSN r_unused;
+ DBT *dbtp, fid_dbt, t;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->name_off == INVALID_ROFF)
+ dbtp = NULL;
+ else {
+ memset(&t, 0, sizeof(t));
+ t.data = R_ADDR(&dblp->reginfo, fnp->name_off);
+ t.size = (u_int32_t)strlen(t.data) + 1;
+ dbtp = &t;
+ }
+ memset(&fid_dbt, 0, sizeof(fid_dbt));
+ fid_dbt.data = fnp->ufid;
+ fid_dbt.size = DB_FILE_ID_LEN;
+ /*
+ * Output LOG_CHECKPOINT records which will be
+ * processed during the OPENFILES pass of recovery.
+ * At the end of recovery we want to output the
+ * files that were open so that a future recovery
+ * run will have the correct files open during
+ * a backward pass. For this we output LOG_RCLOSE
+ * records so that the files will be closed on
+ * the forward pass.
+ */
+ if ((ret = __dbreg_register_log(dbenv,
+ NULL, &r_unused, 0,
+ F_ISSET(dblp, DBLOG_RECOVER) ? LOG_RCLOSE : LOG_CHECKPOINT,
+ dbtp, &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno,
+ TXN_INVALID)) != 0)
+ break;
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_close_files --
+ * Close files that were opened by the recovery daemon. We sync the
+ * file, unless its mpf pointer has been NULLed by a db_remove or
+ * db_rename. We may not have flushed the log_register record that
+ * closes the file.
+ *
+ * PUBLIC: int __dbreg_close_files __P((DB_ENV *));
+ */
+int
+__dbreg_close_files(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOG *dblp;
+ DB *dbp;
+ int ret, t_ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ /* We only want to close dbps that recovery opened. */
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ F_ISSET(dbp, DB_AM_RECOVER)) {
+ /*
+ * It's unsafe to call DB->close while holding the
+ * thread lock, because we'll call __dbreg_rem_dbentry
+ * and grab it again.
+ *
+ * Just drop it. Since dbreg ids go monotonically
+ * upward, concurrent opens should be safe, and the
+ * user should have no business closing files while
+ * we're in this loop anyway--we're in the process of
+ * making all outstanding dbps invalid.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ if ((t_ret = dbp->close(dbp,
+ dbp->mpf == NULL ? DB_NOSYNC : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ }
+ dblp->dbentry[i].deleted = 0;
+ dblp->dbentry[i].dbp = NULL;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_nofiles --
+ * Check that there are no open files in the process local table.
+ * Returns 0 if there are no files and EINVAL if there are any.
+ *
+ * PUBLIC: int __dbreg_nofiles __P((DB_ENV *));
+ */
+int
+__dbreg_nofiles(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ int ret;
+ int32_t i;
+
+ /* If we haven't initialized logging, we have nothing to do. */
+ if (!LOGGING_ON(dbenv))
+ return (0);
+
+ dblp = dbenv->lg_handle;
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+ for (i = 0; i < dblp->dbentry_cnt; i++) {
+ if ((dbp = dblp->dbentry[i].dbp) != NULL &&
+ !F_ISSET(dbp, DB_AM_RECOVER)) {
+ ret = EINVAL;
+ break;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_db --
+ * Return the DB corresponding to the specified dbreg id.
+ *
+ * PUBLIC: int __dbreg_id_to_db __P((DB_ENV *, DB_TXN *, DB **, int32_t, int));
+ */
+int
+__dbreg_id_to_db(dbenv, txn, dbpp, ndx, inc)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc;
+{
+ return (__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, 1));
+}
+
+/*
+ * __dbreg_id_to_db_int --
+ * Return the DB corresponding to the specified dbreg id. The internal
+ * version takes a final parameter that indicates whether we should attempt
+ * to open the file if no mapping is found. During recovery, the recovery
+ * routines all want to try to open the file (and this is called from
+ * __dbreg_id_to_db), however, if we have a multi-process environment where
+ * some processes may not have the files open (e.g., XA), then we also get
+ * called from __dbreg_assign_id and it's OK if there is no mapping.
+ *
+ * PUBLIC: int __dbreg_id_to_db_int __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB **, int32_t, int, int));
+ */
+int
+__dbreg_id_to_db_int(dbenv, txn, dbpp, ndx, inc, tryopen)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB **dbpp;
+ int32_t ndx;
+ int inc, tryopen;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+ int ret;
+ char *name;
+
+ ret = 0;
+ dblp = dbenv->lg_handle;
+ COMPQUIET(inc, 0);
+
+ MUTEX_THREAD_LOCK(dbenv, dblp->mutexp);
+
+ /*
+ * Under XA, a process different than the one issuing DB operations
+ * may abort a transaction. In this case, the "recovery" routines
+ * are run by a process that does not necessarily have the file open,
+ * so we we must open the file explicitly.
+ */
+ if (ndx >= dblp->dbentry_cnt ||
+ (!dblp->dbentry[ndx].deleted && dblp->dbentry[ndx].dbp == NULL)) {
+ if (!tryopen || F_ISSET(dblp, DBLOG_RECOVER)) {
+ ret = ENOENT;
+ goto err;
+ }
+
+ /*
+ * __dbreg_id_to_fname acquires the region's fq_mutex,
+ * which we can't safely acquire while we hold the thread lock.
+ * We no longer need it anyway--the dbentry table didn't
+ * have what we needed.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+
+ if (__dbreg_id_to_fname(dblp, ndx, 0, &fname) != 0)
+ /*
+ * With transactional opens, we may actually have
+ * closed this file in the transaction in which
+ * case this will fail too. Then it's up to the
+ * caller to reopen the file.
+ */
+ return (ENOENT);
+
+ /*
+ * Note that we're relying on fname not to change, even
+ * though we released the mutex that protects it (fq_mutex)
+ * inside __dbreg_id_to_fname. This should be a safe
+ * assumption, because the other process that has the file
+ * open shouldn't be closing it while we're trying to abort.
+ */
+ name = R_ADDR(&dblp->reginfo, fname->name_off);
+
+ /*
+ * At this point, we are not holding the thread lock, so exit
+ * directly instead of going through the exit code at the
+ * bottom. If the __dbreg_do_open succeeded, then we don't need
+ * to do any of the remaining error checking at the end of this
+ * routine.
+ * XXX I am sending a NULL txnlist and 0 txnid which may be
+ * completely broken ;(
+ */
+ if ((ret = __dbreg_do_open(dbenv, txn, dblp,
+ fname->ufid, name, fname->s_type,
+ ndx, fname->meta_pgno, NULL, 0)) != 0)
+ return (ret);
+
+ *dbpp = dblp->dbentry[ndx].dbp;
+ return (0);
+ }
+
+ /*
+ * Return DB_DELETED if the file has been deleted (it's not an error).
+ */
+ if (dblp->dbentry[ndx].deleted) {
+ ret = DB_DELETED;
+ goto err;
+ }
+
+ /* It's an error if we don't have a corresponding writeable DB. */
+ if ((*dbpp = dblp->dbentry[ndx].dbp) == NULL)
+ ret = ENOENT;
+
+err: MUTEX_THREAD_UNLOCK(dbenv, dblp->mutexp);
+ return (ret);
+}
+
+/*
+ * __dbreg_id_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed dbreg id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_id_to_fname __P((DB_LOG *, int32_t, int, FNAME **));
+ */
+int
+__dbreg_id_to_fname(dblp, lid, have_lock, fnamep)
+ DB_LOG *dblp;
+ int32_t lid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (fnp->id == lid) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+/*
+ * __dbreg_fid_to_fname --
+ * Traverse the shared-memory region looking for the entry that
+ * matches the passed file unique id. Returns 0 on success; -1 on error.
+ *
+ * PUBLIC: int __dbreg_fid_to_fname __P((DB_LOG *, u_int8_t *, int, FNAME **));
+ */
+int
+__dbreg_fid_to_fname(dblp, fid, have_lock, fnamep)
+ DB_LOG *dblp;
+ u_int8_t *fid;
+ int have_lock;
+ FNAME **fnamep;
+{
+ DB_ENV *dbenv;
+ FNAME *fnp;
+ LOG *lp;
+ int ret;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ ret = -1;
+
+ if (!have_lock)
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+ for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (memcmp(fnp->ufid, fid, DB_FILE_ID_LEN) == 0) {
+ *fnamep = fnp;
+ ret = 0;
+ break;
+ }
+ }
+ if (!have_lock)
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+
+ return (ret);
+}
+
+/*
+ * __dbreg_get_name
+ *
+ * Interface to get name of registered files. This is mainly diagnostic
+ * and the name passed could be transient unless there is something
+ * ensuring that the file cannot be closed.
+ *
+ * PUBLIC: int __dbreg_get_name __P((DB_ENV *, u_int8_t *, char **));
+ */
+int
+__dbreg_get_name(dbenv, fid, namep)
+ DB_ENV *dbenv;
+ u_int8_t *fid;
+ char **namep;
+{
+ DB_LOG *dblp;
+ FNAME *fname;
+
+ dblp = dbenv->lg_handle;
+
+ if (dblp != NULL && __dbreg_fid_to_fname(dblp, fid, 0, &fname) == 0) {
+ *namep = R_ADDR(&dblp->reginfo, fname->name_off);
+ return (0);
+ }
+
+ return (-1);
+}
+
+/*
+ * __dbreg_do_open --
+ * Open files referenced in the log. This is the part of the open that
+ * is not protected by the thread mutex.
+ * PUBLIC: int __dbreg_do_open __P((DB_ENV *, DB_TXN *, DB_LOG *, u_int8_t *,
+ * PUBLIC: char *, DBTYPE, int32_t, db_pgno_t, void *, u_int32_t));
+ */
+int
+__dbreg_do_open(dbenv,
+ txn, lp, uid, name, ftype, ndx, meta_pgno, info, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOG *lp;
+ u_int8_t *uid;
+ char *name;
+ DBTYPE ftype;
+ int32_t ndx;
+ db_pgno_t meta_pgno;
+ void *info;
+ u_int32_t id;
+{
+ DB *dbp;
+ int ret;
+ u_int32_t cstat;
+
+ if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * We can open files under a number of different scenarios.
+ * First, we can open a file during a normal txn_abort, if that file
+ * was opened and closed during the transaction (as is the master
+ * database of a sub-database).
+ * Second, we might be aborting a transaction in XA and not have
+ * it open in the process that is actually doing the abort.
+ * Third, we might be in recovery.
+ * In case 3, there is no locking, so there is no issue.
+ * In cases 1 and 2, we are guaranteed to already hold any locks
+ * that we need, since we're still in the same transaction, so by
+ * setting DB_AM_RECOVER, we guarantee that we don't log and that
+ * we don't try to acquire locks on behalf of a different locker id.
+ */
+ F_SET(dbp, DB_AM_RECOVER);
+ if (meta_pgno != PGNO_BASE_MD) {
+ memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
+ dbp->meta_pgno = meta_pgno;
+ }
+ dbp->type = ftype;
+ if ((ret = __db_dbopen(dbp, txn, name, NULL,
+ DB_ODDFILESIZE, __db_omode("rw----"), meta_pgno)) == 0) {
+
+ /*
+ * Verify that we are opening the same file that we were
+ * referring to when we wrote this log record.
+ */
+ if ((meta_pgno != PGNO_BASE_MD &&
+ __dbreg_check_master(dbenv, uid, name) != 0) ||
+ memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ cstat = TXN_IGNORE;
+ else
+ cstat = TXN_EXPECTED;
+
+ /* Assign the specific dbreg id to this dbp. */
+ if ((ret = __dbreg_assign_id(dbp, ndx)) != 0)
+ goto err;
+
+ /*
+ * If we successfully opened this file, then we need to
+ * convey that information to the txnlist so that we
+ * know how to handle the subtransaction that created
+ * the file system object.
+ */
+ if (id != TXN_INVALID) {
+ if ((ret = __db_txnlist_update(dbenv,
+ info, id, cstat, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, cstat, NULL);
+ else if (ret > 0)
+ ret = 0;
+ }
+err: if (cstat == TXN_IGNORE)
+ goto not_right;
+ return (ret);
+ } else {
+ /* Record that the open failed in the txnlist. */
+ if (id != TXN_INVALID && (ret = __db_txnlist_update(dbenv,
+ info, id, TXN_UNEXPECTED, NULL)) == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, id, TXN_UNEXPECTED, NULL);
+ }
+not_right:
+ (void)dbp->close(dbp, 0);
+ /* Add this file as deleted. */
+ (void)__dbreg_add_dbentry(dbenv, lp, NULL, ndx);
+ return (ENOENT);
+}
+
+static int
+__dbreg_check_master(dbenv, uid, name)
+ DB_ENV *dbenv;
+ u_int8_t *uid;
+ char *name;
+{
+ DB *dbp;
+ int ret;
+
+ ret = 0;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ dbp->type = DB_BTREE;
+ F_SET(dbp, DB_AM_RECOVER);
+ ret = __db_dbopen(dbp,
+ NULL, name, NULL, 0, __db_omode("rw----"), PGNO_BASE_MD);
+
+ if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
+ ret = EINVAL;
+
+ (void)dbp->close(dbp, 0);
+ return (ret);
+}
+
+/*
+ * __dbreg_lazy_id --
+ * When a replication client gets upgraded to being a replication master,
+ * it may have database handles open that have not been assigned an ID, but
+ * which have become legal to use for logging.
+ *
+ * This function lazily allocates a new ID for such a function, in a
+ * new transaction created for the purpose. We need to do this in a new
+ * transaction because we definitely wish to commit the dbreg_register, but
+ * at this point we have no way of knowing whether the log record that incited
+ * us to call this will be part of a committed transaction.
+ *
+ * PUBLIC: int __dbreg_lazy_id __P((DB *));
+ */
+int
+__dbreg_lazy_id(dbp)
+ DB *dbp;
+{
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ DB_ASSERT(F_ISSET(dbenv, DB_ENV_REP_MASTER));
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ return (ret);
+
+ if ((ret = __dbreg_new_id(dbp, txn)) != 0) {
+ (void)txn->abort(txn);
+ return (ret);
+ }
+
+ return (txn->commit(txn, DB_TXN_NOSYNC));
+}
+
+/*
+ * __dbreg_push_id and __dbreg_pop_id --
+ * Dbreg ids from closed files are kept on a stack in shared memory
+ * for recycling. (We want to reuse them as much as possible because each
+ * process keeps open files in an array by ID.) Push them to the stack and
+ * pop them from it, managing memory as appropriate.
+ *
+ * The stack is protected by the fq_mutex, and in both functions we assume
+ * that this is already locked.
+ *
+ * PUBLIC: int __dbreg_push_id __P((DB_ENV *, int32_t));
+ * PUBLIC: int __dbreg_pop_id __P((DB_ENV *, int32_t *));
+ */
+int
+__dbreg_push_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack, *newstack;
+ int ret;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ if (lp->free_fid_stack != INVALID_ROFF)
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ else
+ stack = NULL;
+
+ /* Check if we have room on the stack. */
+ if (lp->free_fids_alloced <= lp->free_fids + 1) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ (lp->free_fids_alloced + 20) * sizeof(u_int32_t), 0,
+ &newstack)) != 0) {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+ }
+
+ memcpy(newstack, stack,
+ lp->free_fids_alloced * sizeof(u_int32_t));
+ lp->free_fid_stack = R_OFFSET(&dblp->reginfo, newstack);
+ lp->free_fids_alloced += 20;
+
+ if (stack != NULL)
+ __db_shalloc_free(dblp->reginfo.addr, stack);
+
+ stack = newstack;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ DB_ASSERT(stack != NULL);
+ stack[lp->free_fids++] = id;
+ return (0);
+}
+
+int
+__dbreg_pop_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t *id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to pop? */
+ if (lp->free_fid_stack != INVALID_ROFF && lp->free_fids > 0) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ *id = stack[--lp->free_fids];
+ } else
+ *id = DB_LOGFILEID_INVALID;
+
+ return (0);
+}
+
+/*
+ * __dbreg_pluck_id --
+ * Remove a particular dbreg id from the stack of free ids. This is
+ * used when we open a file, as in recovery, with a specific ID that might
+ * be on the stack.
+ *
+ * Returns success whether or not the particular id was found, and like
+ * push and pop, assumes that the fq_mutex is locked.
+ *
+ * PUBLIC: int __dbreg_pluck_id __P((DB_ENV *, int32_t));
+ */
+int
+__dbreg_pluck_id(dbenv, id)
+ DB_ENV *dbenv;
+ int32_t id;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ int32_t *stack;
+ int i;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /* Do we have anything to look at? */
+ if (lp->free_fid_stack != INVALID_ROFF) {
+ stack = R_ADDR(&dblp->reginfo, lp->free_fid_stack);
+ for (i = 0; i < lp->free_fids; i++)
+ if (id == stack[i]) {
+ /*
+ * Found it. Overwrite it with the top
+ * id (which may harmlessly be itself),
+ * and shorten the stack by one.
+ */
+ stack[i] = stack[lp->free_fids - 1];
+ lp->free_fids--;
+ return (0);
+ }
+ }
+
+ return (0);
+}
+
+#ifdef DEBUG
+/*
+ * __dbreg_print_dblist --
+ * Display the list of files.
+ *
+ * PUBLIC: void __dbreg_print_dblist __P((DB_ENV *));
+ */
+void
+__dbreg_print_dblist(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_LOG *dblp;
+ FNAME *fnp;
+ LOG *lp;
+ int del, first;
+ char *name;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, &lp->fq_mutex);
+
+ for (first = 1, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
+ fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
+ if (first) {
+ first = 0;
+ __db_err(dbenv,
+ "ID\t\t\tName\tType\tPgno\tTxnid\tDBP-info");
+ }
+ if (fnp->name_off == INVALID_ROFF)
+ name = "";
+ else
+ name = R_ADDR(&dblp->reginfo, fnp->name_off);
+
+ dbp = fnp->id >= dblp->dbentry_cnt ? NULL :
+ dblp->dbentry[fnp->id].dbp;
+ del = fnp->id >= dblp->dbentry_cnt ? 0 :
+ dblp->dbentry[fnp->id].deleted;
+ __db_err(dbenv, "%ld\t%s\t\t\t%s\t%lu\t%lx\t%s %d %lx %lx",
+ (long)fnp->id, name,
+ __db_dbtype_to_string(fnp->s_type),
+ (u_long)fnp->meta_pgno, (u_long)fnp->create_txnid,
+ dbp == NULL ? "No DBP" : "DBP", del, P_TO_ULONG(dbp),
+ dbp == NULL ? 0 : dbp->flags);
+ }
+
+ MUTEX_UNLOCK(dbenv, &lp->fq_mutex);
+}
+#endif
diff --git a/bdb/dist/Makefile.in b/bdb/dist/Makefile.in
index 4a47953e3e1..a7cc0e11f34 100644
--- a/bdb/dist/Makefile.in
+++ b/bdb/dist/Makefile.in
@@ -1,101 +1,139 @@
-# $Id: Makefile.in,v 11.66 2000/11/27 16:27:51 bostic Exp $
+# $Id: Makefile.in,v 11.175 2002/08/29 14:22:20 margo Exp $
srcdir= @srcdir@/..
builddir=.
##################################################
-# C, C++
+# Installation directories and permissions.
##################################################
-CPPFLAGS= -I$(builddir) -I$(srcdir)/include @CPPFLAGS@
-CFLAGS= -c $(CPPFLAGS) @CFLAGS@
-CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@
+prefix= @prefix@
+exec_prefix=@exec_prefix@
+bindir= @bindir@
+includedir=@includedir@
+libdir= @libdir@
+docdir= $(prefix)/docs
-CC= @MAKEFILE_CC@
-CCLINK= @MAKEFILE_CCLINK@
-CXX= @MAKEFILE_CXX@
+dmode= 755
+emode= 555
+fmode= 444
-INSTALLER= @INSTALLER@
+transform=@program_transform_name@
-LDFLAGS= @LDFLAGS@
-LIBDB_ARGS= @LIBDB_ARGS@
-LIBJSO_LIBS= @LIBJSO_LIBS@
-LIBS= @LIBS@
-LIBSO_LIBS= @LIBSO_LIBS@
+##################################################
+# Paths for standard user-level commands.
+##################################################
+SHELL= @db_cv_path_sh@
+ar= @db_cv_path_ar@
+chmod= @db_cv_path_chmod@
+cp= @db_cv_path_cp@
+ln= @db_cv_path_ln@
+mkdir= @db_cv_path_mkdir@
+ranlib= @db_cv_path_ranlib@
+rm= @db_cv_path_rm@
+rpm= @db_cv_path_rpm@
+strip= @db_cv_path_strip@
+
+##################################################
+# General library information.
+##################################################
+DEF_LIB= @DEFAULT_LIB@
+DEF_LIB_CXX= @DEFAULT_LIB_CXX@
+INSTALLER= @INSTALLER@
LIBTOOL= @LIBTOOL@
-LIBTSO_LIBS= @LIBTSO_LIBS@
-LIBXSO_LIBS= @LIBXSO_LIBS@
POSTLINK= @POSTLINK@
-SOLINK= @SOLINK@
+SOLINK= @MAKEFILE_SOLINK@
SOFLAGS= @SOFLAGS@
SOMAJOR= @DB_VERSION_MAJOR@
SOVERSION= @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@
-libdb= libdb.a
+##################################################
+# C API.
+##################################################
+CPPFLAGS= -I$(builddir) -I$(srcdir) -I$(srcdir)/dbinc @CPPFLAGS@
+CFLAGS= -c $(CPPFLAGS) @CFLAGS@
+CC= @MAKEFILE_CC@
+CCLINK= @MAKEFILE_CCLINK@
+
+LDFLAGS= @LDFLAGS@
+LIBS= @LIBS@
+LIBSO_LIBS= @LIBSO_LIBS@
+libdb= libdb.a
libso_base= libdb
-libso_linkname= $(libso_base)-$(SOVERSION).la
libso= $(libso_base)-$(SOVERSION).@SOSUFFIX@
+libso_static= $(libso_base)-$(SOVERSION).a
libso_target= $(libso_base)-$(SOVERSION).la
libso_default= $(libso_base).@SOSUFFIX@
libso_major= $(libso_base)-$(SOMAJOR).@SOSUFFIX@
##################################################
-# C++
+# C++ API.
#
-# C++ support is optional, and it can be used with or without shared libraries.
-# You must configure it using:
-# --enable-cxx
+# C++ support is optional, and can be built with static or shared libraries.
##################################################
+CXXFLAGS= -c $(CPPFLAGS) @CXXFLAGS@
+CXX= @MAKEFILE_CXX@
+CXXLINK= @MAKEFILE_CXXLINK@
+XSOLINK= @MAKEFILE_XSOLINK@
+LIBXSO_LIBS= @LIBXSO_LIBS@
+
libcxx= libdb_cxx.a
libxso_base= libdb_cxx
libxso= $(libxso_base)-$(SOVERSION).@SOSUFFIX@
+libxso_static= $(libxso_base)-$(SOVERSION).a
libxso_target= $(libxso_base)-$(SOVERSION).la
libxso_default= $(libxso_base).@SOSUFFIX@
libxso_major= $(libxso_base)-$(SOMAJOR).@SOSUFFIX@
##################################################
-# JAVA
+# Java API.
#
# Java support is optional and requires shared librarires.
-# You must configure it using:
-# --enable-java --enable-dynamic
##################################################
CLASSPATH= $(JAVA_CLASSTOP)
-JAR= @JAR@
-JAVAC= env CLASSPATH=$(CLASSPATH) @JAVAC@
+LIBJSO_LIBS= @LIBJSO_LIBS@
+
+JAR= @JAR@
+JAVAC= env CLASSPATH="$(CLASSPATH)" @JAVAC@
JAVACFLAGS= @JAVACFLAGS@
-JAVA_BUILTFILE= .javabuilt
-JAVA_CLASSTOP= $(srcdir)/java/classes
+JAVA_CLASSTOP= ./classes
+JAVA_RPCCLASSES=./classes.rpc
JAVA_SRCDIR= $(srcdir)/java/src
-JAVA_DBREL= com/sleepycat/db
+JAVA_DBREL= com/sleepycat/db
+JAVA_EXREL= com/sleepycat/examples
+JAVA_RPCREL= com/sleepycat/db/rpcserver
JAVA_DBDIR= $(JAVA_SRCDIR)/$(JAVA_DBREL)
-JAVA_EXDIR= $(JAVA_SRCDIR)/com/sleepycat/examples
+JAVA_EXDIR= $(JAVA_SRCDIR)/$(JAVA_EXREL)
+JAVA_RPCDIR= $(srcdir)/rpc_server/java
libj_jarfile= db.jar
+libj_exjarfile= dbexamples.jar
+rpc_jarfile= dbsvc.jar
libjso_base= libdb_java
-libjso= $(libjso_base)-$(SOVERSION).@SOSUFFIX@
+libjso= $(libjso_base)-$(SOVERSION).@JMODSUFFIX@
+libjso_static= $(libjso_base)-$(SOVERSION).a
libjso_target= $(libjso_base)-$(SOVERSION).la
-libjso_default= $(libjso_base).@SOSUFFIX@
-libjso_major= $(libjso_base)-$(SOMAJOR).@SOSUFFIX@
-libjso_g= $(libjso_base)-$(SOVERSION)_g.@SOSUFFIX@
+libjso_default= $(libjso_base).@JMODSUFFIX@
+libjso_major= $(libjso_base)-$(SOMAJOR).@JMODSUFFIX@
+libjso_g= $(libjso_base)-$(SOVERSION)_g.@JMODSUFFIX@
##################################################
-# TCL
+# TCL API.
#
# Tcl support is optional and requires shared libraries.
-# You must configure it using:
-# --enable-tcl --with-tcl=DIR --enable-dynamic
##################################################
TCFLAGS= @TCFLAGS@
+LIBTSO_LIBS= @LIBTSO_LIBS@
libtso_base= libdb_tcl
-libtso= $(libtso_base)-$(SOVERSION).@SOSUFFIX@
+libtso= $(libtso_base)-$(SOVERSION).@MODSUFFIX@
+libtso_static= $(libtso_base)-$(SOVERSION).a
libtso_target= $(libtso_base)-$(SOVERSION).la
-libtso_default= $(libtso_base).@SOSUFFIX@
-libtso_major= $(libtso_base)-$(SOMAJOR).@SOSUFFIX@
+libtso_default= $(libtso_base).@MODSUFFIX@
+libtso_major= $(libtso_base)-$(SOMAJOR).@MODSUFFIX@
##################################################
-# DB_DUMP185 UTILITY
+# db_dump185 UTILITY
#
# The db_dump185 application should be compiled using the system's db.h file
# (which should be a DB 1.85/1.86 include file), and the system's 1.85/1.86
@@ -104,620 +142,1138 @@ libtso_major= $(libtso_base)-$(SOMAJOR).@SOSUFFIX@
# local libraries, for example. Do that by adding -I options to the DB185INC
# line, and -l options to the DB185LIB line.
##################################################
-DB185INC= -c @CFLAGS@ -I$(srcdir)/include @CPPFLAGS@
+DB185INC= -c @CFLAGS@ -I$(srcdir) @CPPFLAGS@
DB185LIB=
##################################################
-# INSTALLATION DIRECTORIES AND PERMISSIONS
-##################################################
-prefix= @prefix@
-exec_prefix=@exec_prefix@
-bindir= @bindir@
-includedir=@includedir@
-libdir= @libdir@
-docdir= $(prefix)/docs
-
-dmode= 755
-emode= 555
-fmode= 444
-
-transform=@program_transform_name@
-
-##################################################
-# PATHS FOR STANDARD USER-LEVEL COMMANDS
+# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED.
##################################################
-SHELL= @db_cv_path_sh@
-ar= @db_cv_path_ar@ cr
-chmod= @db_cv_path_chmod@
-cp= @db_cv_path_cp@
-ln= @db_cv_path_ln@
-mkdir= @db_cv_path_mkdir@
-ranlib= @db_cv_path_ranlib@
-rm= @db_cv_path_rm@
-strip= @db_cv_path_strip@
##################################################
-# NOTHING BELOW THIS LINE SHOULD EVER NEED TO BE MODIFIED.
+# Object and utility lists.
##################################################
-OBJS= @ADDITIONAL_OBJS@ @LIBOBJS@ @RPC_OBJS@ bt_compare@o@ bt_conv@o@ \
- bt_curadj@o@ bt_cursor@o@ bt_delete@o@ bt_method@o@ bt_open@o@ \
- bt_put@o@ bt_rec@o@ bt_reclaim@o@ bt_recno@o@ bt_rsearch@o@ \
- bt_search@o@ bt_split@o@ bt_stat@o@ bt_upgrade@o@ bt_verify@o@ \
- btree_auto@o@ crdel_auto@o@ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ \
- db_byteorder@o@ db_cam@o@ db_conv@o@ db_dispatch@o@ db_dup@o@ \
- db_err@o@ db_getlong@o@ db_iface@o@ db_join@o@ db_log2@o@ \
- db_meta@o@ db_method@o@ db_overflow@o@ db_pr@o@ db_rec@o@ \
- db_reclaim@o@ db_ret@o@ db_salloc@o@ db_shash@o@ db_upg@o@ \
- db_upg_opd@o@ db_vrfy@o@ db_vrfyutil@o@ dbm@o@ env_method@o@ \
- env_open@o@ env_recover@o@ env_region@o@ hash@o@ hash_auto@o@ \
- hash_conv@o@ hash_dup@o@ hash_func@o@ hash_meta@o@ hash_method@o@ \
- hash_page@o@ hash_rec@o@ hash_reclaim@o@ hash_stat@o@ hash_upgrade@o@ \
- hash_verify@o@ hsearch@o@ lock@o@ lock_conflict@o@ \
+C_OBJS= @ADDITIONAL_OBJS@ @LTLIBOBJS@ @RPC_CLIENT_OBJS@ \
+ bt_compare@o@ bt_conv@o@ bt_curadj@o@ bt_cursor@o@ bt_delete@o@ \
+ bt_method@o@ bt_open@o@ bt_put@o@ bt_rec@o@ bt_reclaim@o@ \
+ bt_recno@o@ bt_rsearch@o@ bt_search@o@ bt_split@o@ bt_stat@o@ \
+ bt_upgrade@o@ bt_verify@o@ btree_auto@o@ crdel_auto@o@ \
+ crdel_rec@o@ db@o@ db_am@o@ db_auto@o@ db_byteorder@o@ db_cam@o@ \
+ db_conv@o@ db_dispatch@o@ db_dup@o@ db_err@o@ db_getlong@o@ \
+ db_idspace@o@ db_iface@o@ db_join@o@ db_log2@o@ db_meta@o@ \
+ db_method@o@ db_open@o@ db_overflow@o@ db_pr@o@ db_rec@o@ \
+ db_reclaim@o@ db_rename@o@ db_remove@o@ db_ret@o@ db_salloc@o@ \
+ db_shash@o@ db_truncate@o@ db_upg@o@ db_upg_opd@o@ db_vrfy@o@ \
+ db_vrfyutil@o@ dbm@o@ dbreg@o@ dbreg_auto@o@ dbreg_rec@o@ \
+ dbreg_util@o@ env_file@o@ env_method@o@ env_open@o@ env_recover@o@ \
+ env_region@o@ fileops_auto@o@ fop_basic@o@ fop_rec@o@ \
+ fop_util@o@ hash@o@ hash_auto@o@ hash_conv@o@ hash_dup@o@ \
+ hash_func@o@ hash_meta@o@ hash_method@o@ hash_open@o@ \
+ hash_page@o@ hash_rec@o@ hash_reclaim@o@ hash_stat@o@ \
+ hash_upgrade@o@ hash_verify@o@ hmac@o@ hsearch@o@ lock@o@ \
lock_deadlock@o@ lock_method@o@ lock_region@o@ lock_stat@o@ \
- lock_util@o@ log@o@ log_archive@o@ log_auto@o@ log_compare@o@ \
- log_findckp@o@ log_get@o@ log_method@o@ log_put@o@ log_rec@o@ \
- log_register@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ mp_fopen@o@ \
- mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ mp_register@o@ \
- mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ os_abs@o@ \
- os_alloc@o@ os_dir@o@ os_errno@o@ os_fid@o@ os_finit@o@ \
- os_fsync@o@ os_handle@o@ os_map@o@ os_method@o@ os_oflags@o@ \
- os_open@o@ os_region@o@ os_rename@o@ os_root@o@ os_rpath@o@ \
- os_rw@o@ os_seek@o@ os_sleep@o@ os_spin@o@ os_stat@o@ \
- os_tmpdir@o@ os_unlink@o@ qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ \
- qam_method@o@ qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ \
- qam_verify@o@ txn@o@ txn_auto@o@ txn_rec@o@ txn_region@o@ xa@o@ \
- xa_db@o@ xa_map@o@
-
-COBJS= cxx_app@o@ cxx_except@o@ cxx_lock@o@ cxx_log@o@ cxx_mpool@o@ \
- cxx_table@o@ cxx_txn@o@
-
-DBSOBJS=dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \
- dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \
- dbs_yield@o@
-
-EOBJS= ex_access@o@ ex_btrec@o@ ex_dbclient@o@ ex_env@o@ ex_lock@o@ \
- ex_mpool@o@ ex_thread@o@ ex_tpcb@o@
-
-JOBJS= java_Db@o@ java_DbEnv@o@ java_DbLock@o@ java_DbLsn@o@ java_DbTxn@o@ \
- java_Dbc@o@ java_Dbt@o@ java_info@o@ java_locked@o@ java_util@o@
-
-RPC_OBJS=client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \
- gen_client_ret@o@
-RPC_SRV=db_server_proc@o@ db_server_svc@o@ db_server_util@o@ gen_db_server@o@
-
-TOBJS= tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
- tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_txn@o@
-
-UOBJS= db_archive@o@ db_checkpoint@o@ db_deadlock@o@ db_dump185@o@ \
- db_dump@o@ db_load@o@ db_printlog@o@ db_recover@o@ db_stat@o@ \
- db_upgrade@o@ db_verify@o@ util_log@o@ util_sig@o@
-
-PROGS= @ADDITIONAL_PROGS@ db_archive db_checkpoint db_deadlock \
- db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify
+ lock_util@o@ log@o@ log_archive@o@ log_compare@o@ log_get@o@ \
+ log_method@o@ log_put@o@ mp_alloc@o@ mp_bh@o@ mp_fget@o@ \
+ mp_fopen@o@ mp_fput@o@ mp_fset@o@ mp_method@o@ mp_region@o@ \
+ mp_register@o@ mp_stat@o@ mp_sync@o@ mp_trickle@o@ mutex@o@ \
+ os_abs@o@ os_alloc@o@ os_clock@o@ os_config@o@ os_dir@o@ \
+ os_errno@o@ os_fid@o@ os_fsync@o@ os_handle@o@ os_id@o@ \
+ os_map@o@ os_method@o@ os_oflags@o@ os_open@o@ os_region@o@ \
+ os_rename@o@ os_root@o@ os_rpath@o@ os_rw@o@ os_seek@o@ \
+ os_sleep@o@ os_spin@o@ os_stat@o@ os_tmpdir@o@ os_unlink@o@ \
+ qam@o@ qam_auto@o@ qam_conv@o@ qam_files@o@ qam_method@o@ \
+ qam_open@o@ qam_rec@o@ qam_stat@o@ qam_upgrade@o@ qam_verify@o@ \
+ rep_method@o@ rep_record@o@ rep_region@o@ rep_util@o@ sha1@o@ \
+ txn@o@ txn_auto@o@ txn_method@o@ txn_rec@o@ txn_recover@o@ \
+ txn_region@o@ txn_stat@o@ txn_util@o@ xa@o@ xa_db@o@ xa_map@o@
+
+CXX_OBJS=\
+ cxx_db@o@ cxx_dbc@o@ cxx_dbt@o@ cxx_env@o@ cxx_except@o@ \
+ cxx_lock@o@ cxx_logc@o@ cxx_mpool@o@ cxx_txn@o@
+
+JAVA_OBJS=\
+ java_Db@o@ java_DbEnv@o@ java_DbLock@o@ java_DbLogc@o@ \
+ java_DbLsn@o@ java_DbTxn@o@ java_DbUtil@o@ java_Dbc@o@ \
+ java_Dbt@o@ \
+ java_info@o@ java_locked@o@ java_util@o@ java_stat_auto@o@
JAVA_DBSRCS=\
$(JAVA_DBDIR)/Db.java $(JAVA_DBDIR)/DbAppendRecno.java \
- $(JAVA_DBDIR)/DbBtreeCompare.java \
- $(JAVA_DBDIR)/DbBtreePrefix.java $(JAVA_DBDIR)/DbBtreeStat.java \
+ $(JAVA_DBDIR)/DbAppDispatch.java \
+ $(JAVA_DBDIR)/DbBtreeCompare.java $(JAVA_DBDIR)/DbBtreePrefix.java \
+ $(JAVA_DBDIR)/DbBtreeStat.java $(JAVA_DBDIR)/DbClient.java \
$(JAVA_DBDIR)/DbConstants.java $(JAVA_DBDIR)/DbDeadlockException.java \
$(JAVA_DBDIR)/DbDupCompare.java $(JAVA_DBDIR)/DbEnv.java \
$(JAVA_DBDIR)/DbEnvFeedback.java $(JAVA_DBDIR)/DbErrcall.java \
$(JAVA_DBDIR)/DbException.java $(JAVA_DBDIR)/DbFeedback.java \
$(JAVA_DBDIR)/DbHash.java $(JAVA_DBDIR)/DbHashStat.java \
$(JAVA_DBDIR)/DbKeyRange.java $(JAVA_DBDIR)/DbLock.java \
- $(JAVA_DBDIR)/DbLockStat.java $(JAVA_DBDIR)/DbLogStat.java \
+ $(JAVA_DBDIR)/DbLockNotGrantedException.java \
+ $(JAVA_DBDIR)/DbLockRequest.java $(JAVA_DBDIR)/DbLockStat.java \
+ $(JAVA_DBDIR)/DbLogc.java $(JAVA_DBDIR)/DbLogStat.java \
$(JAVA_DBDIR)/DbLsn.java $(JAVA_DBDIR)/DbMemoryException.java \
$(JAVA_DBDIR)/DbMpoolFStat.java $(JAVA_DBDIR)/DbMpoolStat.java \
+ $(JAVA_DBDIR)/DbMultipleDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleIterator.java \
+ $(JAVA_DBDIR)/DbMultipleKeyDataIterator.java \
+ $(JAVA_DBDIR)/DbMultipleRecnoDataIterator.java \
$(JAVA_DBDIR)/DbOutputStreamErrcall.java \
- $(JAVA_DBDIR)/DbQueueStat.java $(JAVA_DBDIR)/DbRecoveryInit.java \
- $(JAVA_DBDIR)/DbRunRecoveryException.java $(JAVA_DBDIR)/DbTxn.java \
- $(JAVA_DBDIR)/DbTxnRecover.java $(JAVA_DBDIR)/DbTxnStat.java \
- $(JAVA_DBDIR)/Dbc.java $(JAVA_DBDIR)/Dbt.java
+ $(JAVA_DBDIR)/DbPreplist.java $(JAVA_DBDIR)/DbQueueStat.java \
+ $(JAVA_DBDIR)/DbRepStat.java $(JAVA_DBDIR)/DbRepTransport.java \
+ $(JAVA_DBDIR)/DbRunRecoveryException.java \
+ $(JAVA_DBDIR)/DbSecondaryKeyCreate.java $(JAVA_DBDIR)/DbTxn.java \
+ $(JAVA_DBDIR)/DbTxnStat.java \
+ $(JAVA_DBDIR)/DbUtil.java $(JAVA_DBDIR)/Dbc.java $(JAVA_DBDIR)/Dbt.java
JAVA_EXSRCS=\
$(JAVA_EXDIR)/AccessExample.java \
$(JAVA_EXDIR)/BtRecExample.java \
+ $(JAVA_EXDIR)/BulkAccessExample.java \
$(JAVA_EXDIR)/EnvExample.java \
$(JAVA_EXDIR)/LockExample.java \
$(JAVA_EXDIR)/TpcbExample.java
+TCL_OBJS=\
+ tcl_compat@o@ tcl_db@o@ tcl_db_pkg@o@ tcl_dbcursor@o@ tcl_env@o@ \
+ tcl_internal@o@ tcl_lock@o@ tcl_log@o@ tcl_mp@o@ tcl_rep@o@ \
+ tcl_txn@o@ tcl_util@o@
+
+RPC_CLIENT_OBJS=\
+ client@o@ db_server_clnt@o@ db_server_xdr@o@ gen_client@o@ \
+ gen_client_ret@o@
+
+RPC_SRV_OBJS=\
+ db_server_proc@o@ db_server_svc@o@ db_server_util@o@ \
+ gen_db_server@o@
+
+RPC_CXXSRV_OBJS=\
+ db_server_cxxproc@o@ db_server_cxxutil@o@ db_server_svc@o@ \
+ gen_db_server@o@
+
+RPC_JAVASRV_SRCS=\
+ $(JAVA_RPCDIR)/DbDispatcher.java \
+ $(JAVA_RPCDIR)/DbServer.java \
+ $(JAVA_RPCDIR)/FreeList.java \
+ $(JAVA_RPCDIR)/LocalIterator.java \
+ $(JAVA_RPCDIR)/RpcDb.java \
+ $(JAVA_RPCDIR)/RpcDbEnv.java \
+ $(JAVA_RPCDIR)/RpcDbTxn.java \
+ $(JAVA_RPCDIR)/RpcDbc.java \
+ $(JAVA_RPCDIR)/Timer.java \
+ $(JAVA_RPCDIR)/gen/DbServerStub.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_associate_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_maxkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_bt_minkey_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_cursor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_extentsize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_ffactor_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_h_nelem_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_join_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_join_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_key_range_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_lorder_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pagesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_delim_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_len_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_re_pad_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_rename_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_stat_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_sync_reply.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_msg.java \
+ $(JAVA_RPCDIR)/gen/__db_truncate_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_count_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_del_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_dup_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_get_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_pget_reply.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_msg.java \
+ $(JAVA_RPCDIR)/gen/__dbc_put_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_cachesize_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_close_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_close_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_create_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_create_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbremove_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_dbrename_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_encrypt_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_flags_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_open_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_open_reply.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_msg.java \
+ $(JAVA_RPCDIR)/gen/__env_remove_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_abort_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_begin_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_commit_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_discard_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_prepare_reply.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_msg.java \
+ $(JAVA_RPCDIR)/gen/__txn_recover_reply.java \
+ $(JAVA_RPCDIR)/gen/db_server.java
+
+UTIL_PROGS=\
+ @ADDITIONAL_PROGS@ \
+ db_archive db_checkpoint db_deadlock \
+ db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify
+
+##################################################
+# List of files installed into the library directory.
##################################################
-# Note: Berkeley DB Makefiles are configured to build either a static or
-# a dynamic library. You should not attempt to build both library types
-# in the same directory, as they have incompatible object file formats.
-# To build both static and dynamic libraries, create two separate build
-# directories, and configure and build them separately.
+LIB_INSTALL_FILE_LIST=\
+ $(libdb) \
+ $(libso) \
+ $(libso_default) \
+ $(libso_major) \
+ $(libso_static) \
+ $(libso_target) \
+ $(libcxx) \
+ $(libxso) \
+ $(libxso_default) \
+ $(libxso_major) \
+ $(libxso_static) \
+ $(libxso_target) \
+ $(libtso) \
+ $(libtso_default) \
+ $(libtso_major) \
+ $(libtso_static) \
+ $(libtso_target) \
+ $(libjso) \
+ $(libjso_default) \
+ $(libjso_g) \
+ $(libjso_major) \
+ $(libjso_static) \
+ $(libjso_target) \
+ $(libj_exjarfile) \
+ $(libj_jarfile)
+
+##################################################
+# We're building a standard library or a RPM file hierarchy, potentially
+# for Embedix. Note: "all" must be the first target in the Makefile.
##################################################
-all: @DEFAULT_LIB@ @ADDITIONAL_LIBS@ @ADDITIONAL_LANG@ $(PROGS)
+all: @BUILD_TARGET@ libdb.a
+
+install-strip install: all @INSTALL_TARGET@
-$(libdb): $(OBJS)
- $(ar) $@ $(OBJS)
+##################################################
+# Library and standard utilities build.
+##################################################
+library_build: @INSTALL_LIBS@ @ADDITIONAL_LANG@ $(UTIL_PROGS)
+
+$(libdb): $(C_OBJS)
+ $(ar) cr $@ $(C_OBJS)
test ! -f $(ranlib) || $(ranlib) $@
-$(libcxx): $(COBJS) $(OBJS)
- $(ar) $@ $(COBJS) $(OBJS)
+$(libcxx): $(CXX_OBJS) $(C_OBJS)
+ $(ar) cr $@ $(CXX_OBJS) $(C_OBJS)
test ! -f $(ranlib) || $(ranlib) $@
-$(libso_target): $(OBJS)
- $(SOLINK) $(SOFLAGS) -o $(libso_target) \
- $(OBJS) $(LDFLAGS) $(LIBSO_LIBS)
+$(libso_target): $(C_OBJS)
+ $(SOLINK) $(SOFLAGS) $(LDFLAGS) -o $@ $(C_OBJS) $(LIBSO_LIBS)
-$(libxso_target): $(COBJS) $(OBJS)
- $(SOLINK) $(SOFLAGS) -o $(libxso_target) \
- $(COBJS) $(OBJS) $(LDFLAGS) $(LIBXSO_LIBS)
+$(libjso_target): $(JAVA_OBJS) $(C_OBJS)
+ $(SOLINK) -jnimodule $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(JAVA_OBJS) $(C_OBJS) $(LIBJSO_LIBS)
-$(libjso_target): $(JOBJS) $(OBJS)
- $(SOLINK) $(SOFLAGS) -o $(libjso_target) \
- $(JOBJS) $(OBJS) $(LDFLAGS) $(LIBJSO_LIBS)
+$(libtso_target): $(TCL_OBJS) $(C_OBJS)
+ $(SOLINK) -module $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(TCL_OBJS) $(C_OBJS) $(LIBTSO_LIBS)
-$(libtso_target): $(TOBJS) $(OBJS)
- $(SOLINK) $(SOFLAGS) -o $(libtso_target) \
- $(TOBJS) $(OBJS) $(LDFLAGS) $(LIBTSO_LIBS)
+$(libxso_target): $(CXX_OBJS) $(C_OBJS)
+ $(XSOLINK) $(SOFLAGS) $(LDFLAGS) \
+ -o $@ $(CXX_OBJS) $(C_OBJS) $(LIBXSO_LIBS)
##################################################
# Creating individual dependencies and actions for building class
# files is possible, but it is very messy and error prone.
##################################################
-java: $(JAVA_CLASSTOP) $(JAVA_BUILTFILE)
+java: $(libj_jarfile) $(libj_exjarfile)
-$(JAVA_BUILTFILE): $(JAVA_DBSRCS) $(JAVA_EXSRCS)
- @test -f $(rm) || (echo 'rm not found.'; exit 1)
- @test -f $(cp) || (echo 'cp not found.'; exit 1)
+$(libj_jarfile): $(JAVA_DBSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
$(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_DBSRCS)
- @cd $(JAVA_CLASSTOP) && $(JAR) cf $(libj_jarfile) $(JAVA_DBREL)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_jarfile) $(JAVA_DBREL)
+
+$(libj_exjarfile): $(libj_jarfile) $(JAVA_EXSRCS)
+ @test -d $(JAVA_CLASSTOP) || \
+ ($(mkdir) -p $(JAVA_CLASSTOP) && $(chmod) $(dmode) $(JAVA_CLASSTOP))
$(JAVAC) -d $(JAVA_CLASSTOP) $(JAVACFLAGS) $(JAVA_EXSRCS)
- @echo This file helps with building java using make > $(JAVA_BUILTFILE)
+ cd $(JAVA_CLASSTOP) && $(JAR) cf ../$(libj_exjarfile) $(JAVA_EXREL)
+
+$(rpc_jarfile): $(libj_jarfile) $(RPC_JAVASRV_SRCS)
+ @test -d $(JAVA_RPCCLASSES) || \
+ ($(mkdir) -p $(JAVA_RPCCLASSES) && \
+ $(chmod) $(dmode) $(JAVA_RPCCLASSES))
+ env CLASSPATH=$(CLASSPATH):$(JAVA_RPCDIR)/oncrpc.jar \
+ @JAVAC@ -d $(JAVA_RPCCLASSES) $(JAVACFLAGS) $(RPC_JAVASRV_SRCS)
+ cd $(JAVA_RPCCLASSES) && $(JAR) cf ../$(rpc_jarfile) $(JAVA_RPCREL)
-$(JAVA_CLASSTOP):
- @test -f $(mkdir) || (echo 'mkdir not found.'; exit 1)
- $(mkdir) $(JAVA_CLASSTOP)
##################################################
# Utilities
##################################################
-berkeley_db_svc: $(RPC_SRV) util_log@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) $(RPC_SRV) util_log@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-db_archive: db_archive@o@ util_sig@o@ @DEFAULT_LIB@
+berkeley_db_svc: $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB)
$(CCLINK) -o $@ $(LDFLAGS) \
- db_archive@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ $(RPC_SRV_OBJS) util_log@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-db_checkpoint: db_checkpoint@o@ util_log@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) \
- db_checkpoint@o@ util_log@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+berkeley_db_cxxsvc: $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) \
+ $(RPC_CXXSRV_OBJS) util_log@o@ $(DEF_LIB_CXX) $(LIBS)
$(POSTLINK) $@
-db_deadlock: db_deadlock@o@ util_log@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) \
- db_deadlock@o@ util_log@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-db_dump: db_dump@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) db_dump@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
+berkeley_db_javasvc: $(rpc_jarfile)
+ echo > $@ "#!/bin/sh"
+ echo >> $@ CLASSPATH="$(CLASSPATH):$(rpc_jarfile):$(JAVA_RPCDIR)/oncrpc.jar"
+ echo >> $@ LD_LIBRARY_PATH=.libs
+ echo >> $@ export CLASSPATH LD_LIBRARY_PATH
+ echo >> $@ exec java com.sleepycat.db.rpcserver.DbServer \$$@
+ chmod +x $@
-db_dump185: db_dump185@o@ @LIBOBJS@
- $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @LIBOBJS@ $(DB185LIB)
- $(POSTLINK) $@
-
-db_load: db_load@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) db_load@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-db_printlog: db_printlog@o@ util_sig@o@ @DEFAULT_LIB@
+db_archive: db_archive@o@ util_sig@o@ $(DEF_LIB)
$(CCLINK) -o $@ $(LDFLAGS) \
- db_printlog@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-db_recover: db_recover@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) \
- db_recover@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-db_stat: db_stat@o@ util_sig@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) db_stat@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ db_archive@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-db_upgrade: db_upgrade@o@ util_sig@o@ @DEFAULT_LIB@
+db_checkpoint: db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
$(CCLINK) -o $@ $(LDFLAGS) \
- db_upgrade@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ db_checkpoint@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-db_verify: db_verify@o@ util_sig@o@ @DEFAULT_LIB@
+db_deadlock: db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB)
$(CCLINK) -o $@ $(LDFLAGS) \
- db_verify@o@ util_sig@o@ $(LIBDB_ARGS) $(LIBS)
+ db_deadlock@o@ util_log@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-##################################################
-# Example programs
-##################################################
-ex_access: ex_access@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(LIBDB_ARGS) $(LIBS)
+db_dump: db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-ex_btrec: ex_btrec@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(LIBDB_ARGS) $(LIBS)
+db_dump185: db_dump185@o@ @LTLIBOBJS@
+ $(CCLINK) -o $@ $(LDFLAGS) db_dump185@o@ @LTLIBOBJS@ $(DB185LIB)
$(POSTLINK) $@
-ex_dbclient: ex_dbclient@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(LIBDB_ARGS) $(LIBS)
+db_load: db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_load@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-ex_env: ex_env@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(LIBDB_ARGS) $(LIBS)
- $(POSTLINK) $@
-
-ex_lock: ex_lock@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(LIBDB_ARGS) $(LIBS)
+db_printlog: db_printlog@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_printlog@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-ex_mpool: ex_mpool@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(LIBDB_ARGS) $(LIBS)
+db_recover: db_recover@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_recover@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-ex_thread: ex_thread@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_thread@o@ $(LIBDB_ARGS) $(LIBS)
+db_stat: db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) db_stat@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-ex_tpcb: ex_tpcb@o@ @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(LIBDB_ARGS) $(LIBS)
+db_upgrade: db_upgrade@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_upgrade@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
-##################################################
-# Multi-threaded tester.
-##################################################
-dbs: $(DBSOBJS) @DEFAULT_LIB@
- $(CCLINK) -o $@ $(LDFLAGS) $(DBSOBJS) $(LIBDB_ARGS) @DBS_LIBS@ $(LIBS)
+db_verify: db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) \
+ db_verify@o@ util_cache@o@ util_sig@o@ $(DEF_LIB) $(LIBS)
$(POSTLINK) $@
##################################################
-# Standard Makefile targets.
+# Library and standard utilities install.
##################################################
-RMLIST= berkeley_db_svc db_dump185 dbs ex_access ex_btrec ex_dbclient \
- ex_env ex_lock ex_mpool ex_thread ex_tpcb
-clean:
- $(rm) -f $(OBJS)
- $(rm) -f $(COBJS) $(DBSOBJS) $(EOBJS) $(CEOBJS) $(JOBJS)
- $(rm) -f $(TOBJS) $(UOBJS) $(RPC_OBJS) $(RPC_SRV)
- $(rm) -f $(PROGS) $(RMLIST)
- $(rm) -f *@o@ *.o *.lo core *.core
- $(rm) -rf ALL.OUT TESTDIR
- $(rm) -rf .libs $(libdb) $(libcxx)
- $(rm) -rf $(libso_target) $(libso) $(libso_default) $(libso_major)
- $(rm) -rf $(libxso_target) $(libxso) $(libxso_default) $(libxso_major)
- $(rm) -rf $(libtso_target) $(libtso) $(libtso_default) $(libtso_major)
- $(rm) -rf $(libjso_target) $(libjso) $(libjso_default) $(libjso_major)
+library_install: install_setup
+library_install: install_include install_lib install_utilities install_docs
-depend obj:
-
-realclean distclean: clean
- $(rm) -f Makefile config.cache config.log config.status db_config.h
- $(rm) -f confdefs.h db.h db_int.h db_185.h include.tcl libtool
-
-install: all install_setup \
- install_include install_lib install_utilities install_docs
-
-uninstall: uninstall_utilities uninstall_include uninstall_lib uninstall_docs
+uninstall: uninstall_include uninstall_lib uninstall_utilities uninstall_docs
install_setup:
- @test -f $(chmod) || (echo 'chmod not found.'; exit 1)
- @test -f $(cp) || (echo 'cp not found.'; exit 1)
- @test -f $(mkdir) || (echo 'mkdir not found.'; exit 1)
- @test -f $(rm) || (echo 'rm not found.'; exit 1)
+ @test -d $(prefix) || \
+ ($(mkdir) -p $(prefix) && $(chmod) $(dmode) $(prefix))
+INCDOT= db.h db_cxx.h @ADDITIONAL_INCS@
+INCINC= cxx_common.h cxx_except.h
install_include:
@echo "Installing DB include files: $(includedir) ..."
@test -d $(includedir) || \
($(mkdir) -p $(includedir) && $(chmod) $(dmode) $(includedir))
- @cd $(includedir) && $(rm) -f db.h db_185.h db_cxx.h
- @$(cp) -p db.h \
- $(srcdir)/include/db_cxx.h @ADDITIONAL_INCS@ $(includedir)
- @cd $(includedir) && $(chmod) $(fmode) db.h db_cxx.h @ADDITIONAL_INCS@
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
+ @$(cp) -p $(INCDOT) $(includedir)
+ @cd $(srcdir)/dbinc/ && $(cp) -p $(INCINC) $(includedir)
+ @cd $(includedir) && $(chmod) $(fmode) $(INCDOT) $(INCINC)
uninstall_include:
- -cd $(includedir) && $(rm) -f db.h db_185.h db_cxx.h
+ @cd $(includedir) && $(rm) -f $(INCDOT) $(INCINC)
-install_lib: @DEFAULT_INSTALL@
-
-uninstall_lib:
- -cd $(libdir) && $(rm) -f $(libdb) $(libcxx) \
- $(libso_target) $(libso) $(libso_default) $(libso_major) \
- $(libxso_target) $(libxso) $(libxso_default) $(libxso_major) \
- $(libtso_target) $(libtso) $(libtso_default) $(libtso_major) \
- $(libjso_target) $(libjso) $(libjso_default) $(libjso_major) \
- $(libj_jarfile)
-
-install_static:
+install_lib:
@echo "Installing DB library: $(libdir) ..."
@test -d $(libdir) || \
($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f $(libdb)
- @$(cp) -p $(libdb) $(libdir)
- @cd $(libdir) && $(chmod) $(fmode) $(libdb)
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
+ @$(INSTALLER) @INSTALL_LIBS@ $(libdir)
+ @(cd $(libdir) && \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_default); \
+ test -f $(libso) && $(ln) -s $(libso) $(libso_major); \
+ test -f $(libso_static) && $(ln) -s $(libso_static) $(libdb); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_default); \
+ test -f $(libxso) && $(ln) -s $(libxso) $(libxso_major); \
+ test -f $(libxso_static) && $(ln) -s $(libxso_static) $(libcxx); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_default); \
+ test -f $(libtso) && $(ln) -s $(libtso) $(libtso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_default); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_major); \
+ test -f $(libjso) && $(ln) -s $(libjso) $(libjso_g)) || true
+ @(test -f $(libj_jarfile) && \
+ $(cp) $(libj_jarfile) $(libdir) && \
+ $(chmod) $(fmode) $(libdir)/$(libj_jarfile)) || true
-install_static_cxx:
- @echo "Installing DB C++ static library: $(libdir) ..."
- @test -d $(libdir) || \
- ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f $(libcxx)
- @$(cp) -p $(libcxx) $(libdir)
- @cd $(libdir) && $(chmod) $(fmode) $(libcxx)
-
-install_dynamic:
- @echo "Installing DB library: $(libdir) ..."
- @test -d $(libdir) || \
- ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f \
- $(libso_target) $(libso) $(libso_default) $(libso_major)
- @$(INSTALLER) $(libso_target) $(libdir)
- @cd $(libdir) && $(ln) -s $(libso) $(libso_default)
- @cd $(libdir) && $(ln) -s $(libso) $(libso_major)
- @$(LIBTOOL) --mode=finish $(libdir)
-
-install_dynamic_cxx:
- @echo "Installing DB C++ library: $(libdir) ..."
- @test -d $(libdir) || \
- ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f \
- $(libxso_target) $(libxso) $(libxso_default) $(libxso_major)
- @$(INSTALLER) $(libxso_target) $(libdir)
- @cd $(libdir) && $(ln) -s $(libxso) $(libxso_default)
- @cd $(libdir) && $(ln) -s $(libxso) $(libxso_major)
-
-install_tcl:
- @echo "Installing DB Tcl library: $(libdir) ..."
- @test -d $(libdir) || \
- ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f \
- $(libtso_target) $(libtso) $(libtso_default) $(libtso_major)
- @$(INSTALLER) $(libtso_target) $(libdir)
- @cd $(libdir) && $(ln) -s $(libtso) $(libtso_default)
- @cd $(libdir) && $(ln) -s $(libtso) $(libtso_major)
-
-install_java:
- @echo "Installing DB Java library: $(libdir) ..."
- @test -d $(libdir) || \
- ($(mkdir) -p $(libdir) && $(chmod) $(dmode) $(libdir))
- @cd $(libdir) && $(rm) -f \
- $(libjso_target) $(libjso) $(libjso_default) $(libjso_major)
- @$(INSTALLER) $(libjso_target) $(libdir)
- @cd $(libdir) && $(ln) -s $(libjso) $(libjso_default)
- @cd $(libdir) && $(ln) -s $(libjso) $(libjso_major)
- @cd $(libdir) && $(ln) -s $(libjso) $(libjso_g)
- @$(cp) $(JAVA_CLASSTOP)/$(libj_jarfile) $(libdir)
+uninstall_lib:
+ @cd $(libdir) && $(rm) -f $(LIB_INSTALL_FILE_LIST)
install_utilities:
- @echo "Installing DB utilities: $(bindir) ..."
+ echo "Installing DB utilities: $(bindir) ..."
@test -d $(bindir) || \
($(mkdir) -p $(bindir) && $(chmod) $(dmode) $(bindir))
- @cd $(bindir) && $(rm) -f $(PROGS)
- @$(INSTALLER) -fp $(PROGS) $(bindir)
- @cd $(bindir) && (test ! -f $(strip) || $(strip) $(PROGS))
- @cd $(bindir) && $(chmod) $(emode) $(PROGS)
+ @for i in $(UTIL_PROGS); do \
+ $(rm) -f $(bindir)/$$i $(bindir)/$$i.exe; \
+ test -f $$i.exe && i=$$i.exe || true; \
+ $(INSTALLER) $$i $(bindir)/$$i; \
+ test -f $(strip) && $(strip) $(bindir)/$$i || true; \
+ $(chmod) $(emode) $(bindir)/$$i; \
+ done
uninstall_utilities:
- -cd $(bindir) && $(rm) -f $(PROGS)
+ @(cd $(bindir); for i in $(UTIL_PROGS); do \
+ $(rm) -f $$i $$i.exe; \
+ done)
+DOCLIST=\
+ api_c api_cxx api_java api_tcl images index.html ref reftoc.html \
+ sleepycat utility
install_docs:
@echo "Installing documentation: $(docdir) ..."
@test -d $(docdir) || \
($(mkdir) -p $(docdir) && $(chmod) $(dmode) $(docdir))
- @cd $(docdir) && $(rm) -rf *
- @cd $(srcdir)/docs && $(cp) -pr * $(docdir)/
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
+ @cd $(srcdir)/docs && $(cp) -pr $(DOCLIST) $(docdir)/
uninstall_docs:
- -cd $(docdir) && $(rm) -rf *
+ @cd $(docdir) && $(rm) -rf $(DOCLIST)
##################################################
-# Object build rules.
+# RPM, Embedix build and install.
##################################################
-# Utilities
-db_archive@o@: $(srcdir)/db_archive/db_archive.c
+RPM_ARCHIVE=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+rpm_build:
+ @$(rm) -rf BUILD RPMS SOURCES SPECS SRPMS RPM_INSTALL
+ @$(mkdir) -p BUILD && $(chmod) $(dmode) BUILD
+ @$(mkdir) -p RPMS/i386 && $(chmod) $(dmode) RPMS RPMS/i386
+ @$(mkdir) -p SOURCES && $(chmod) $(dmode) SOURCES
+ @$(mkdir) -p SPECS && $(chmod) $(dmode) SPECS
+ @$(mkdir) -p SRPMS && $(chmod) $(dmode) SRPMS
+ $(cp) @db_cv_path_rpm_archive@/$(RPM_ARCHIVE) SOURCES/
+ $(cp) db.spec SPECS/db.spec
+ $(rpm) --rcfile @CONFIGURATION_PATH@/rpmrc -ba SPECS/db.spec
+
+rpm_install:
+
+RPM_SRPMS=db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-1.src.rpm
+embedix_install:
+ $(cp) db.ecd @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(chmod) $(fmode) @db_cv_path_embedix_install@/config-data/ecds/db.ecd
+ $(cp) SRPMS/$(RPM_SRPMS) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+ $(chmod) $(fmode) \
+ @db_cv_path_embedix_install@/Packages/SRPMS/$(RPM_SRPMS)
+
+##################################################
+# Remaining standard Makefile targets.
+##################################################
+CLEAN_LIST=\
+ berkeley_db_svc berkeley_db_cxxsvc berkeley_db_javasvc \
+ db_dump185 db_perf dbs bench_001 \
+ ex_access ex_apprec ex_btrec ex_dbclient ex_env ex_lock ex_mpool \
+ ex_repquote ex_thread ex_tpcb excxx_access excxx_btrec excxx_env \
+ excxx_lock excxx_mpool excxx_tpcb rpmrc
+
+mostly-clean clean:
+ $(rm) -rf $(C_OBJS)
+ $(rm) -rf $(CXX_OBJS) $(JAVA_OBJS) $(TCL_OBJS)
+ $(rm) -rf $(RPC_CLIENT_OBJS) $(RPC_SRV_OBJS) $(RPC_CXXSRV_OBJS)
+ $(rm) -rf $(UTIL_PROGS) *.exe $(CLEAN_LIST)
+ $(rm) -rf $(JAVA_CLASSTOP) $(JAVA_RPCCLASSES) $(rpc_jarfile)
+ $(rm) -rf tags *@o@ *.o *.o.lock *.lo core *.core
+ $(rm) -rf ALL.OUT.* BUILD PARALLEL_TESTDIR.* RPMS RPM_INSTALL
+ $(rm) -rf RUN_LOG RUNQUEUE SOURCES SPECS SRPMS TESTDIR TESTDIR.A
+ $(rm) -rf logtrack_seen.db tm .libs $(LIB_INSTALL_FILE_LIST)
+
+REALCLEAN_LIST=\
+ Makefile confdefs.h config.cache config.log config.status db.h \
+ db.spec db185_int.h db_185.h db_config.h db_cxx.h db_int.h \
+ db_int_def.h include.tcl
+
+distclean maintainer-clean realclean: clean
+ $(rm) -rf $(REALCLEAN_LIST)
+ $(rm) -rf libtool
+
+check depend dvi info obj TAGS:
+ @echo "$@: make target not supported" && true
+
+dist:
+ @echo "$@: make target not supported" && false
+
+##################################################
+# Multi-threaded testers, benchmarks.
+##################################################
+dbs@o@: $(srcdir)/test_server/dbs.c
$(CC) $(CFLAGS) $?
-db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c
+dbs_am@o@: $(srcdir)/test_server/dbs_am.c
$(CC) $(CFLAGS) $?
-db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c
+dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c
$(CC) $(CFLAGS) $?
-db_dump@o@: $(srcdir)/db_dump/db_dump.c
+dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c
$(CC) $(CFLAGS) $?
-db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
- $(CC) $(DB185INC) $?
-db_load@o@: $(srcdir)/db_load/db_load.c
+dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c
$(CC) $(CFLAGS) $?
-db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
+dbs_log@o@: $(srcdir)/test_server/dbs_log.c
$(CC) $(CFLAGS) $?
-db_recover@o@: $(srcdir)/db_recover/db_recover.c
+dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c
$(CC) $(CFLAGS) $?
-db_stat@o@: $(srcdir)/db_stat/db_stat.c
+dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c
$(CC) $(CFLAGS) $?
-db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c
+dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c
$(CC) $(CFLAGS) $?
-db_verify@o@: $(srcdir)/db_verify/db_verify.c
+dbs_util@o@: $(srcdir)/test_server/dbs_util.c
$(CC) $(CFLAGS) $?
-
-# Examples
-ex_access@o@: $(srcdir)/examples_c/ex_access.c
+dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c
$(CC) $(CFLAGS) $?
-ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c
+DBS_OBJS=\
+ dbs@o@ dbs_am@o@ dbs_checkpoint@o@ dbs_debug@o@ dbs_handles@o@ \
+ dbs_log@o@ dbs_qam@o@ dbs_spawn@o@ dbs_trickle@o@ dbs_util@o@ \
+ dbs_yield@o@
+dbs: $(DBS_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBS_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+db_perf@o@: $(srcdir)/test_perf/db_perf.c
$(CC) $(CFLAGS) $?
-ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c
+perf_cache_check@o@: $(srcdir)/test_perf/perf_cache_check.c
$(CC) $(CFLAGS) $?
-ex_env@o@: $(srcdir)/examples_c/ex_env.c
+perf_checkpoint@o@: $(srcdir)/test_perf/perf_checkpoint.c
$(CC) $(CFLAGS) $?
-ex_lock@o@: $(srcdir)/examples_c/ex_lock.c
+perf_config@o@: $(srcdir)/test_perf/perf_config.c
$(CC) $(CFLAGS) $?
-ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c
+perf_dbs@o@: $(srcdir)/test_perf/perf_dbs.c
$(CC) $(CFLAGS) $?
-ex_thread@o@: $(srcdir)/examples_c/ex_thread.c
+perf_debug@o@: $(srcdir)/test_perf/perf_debug.c
$(CC) $(CFLAGS) $?
-ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c
+perf_file@o@: $(srcdir)/test_perf/perf_file.c
$(CC) $(CFLAGS) $?
-
-# DB files
-crdel_auto@o@: $(srcdir)/db/crdel_auto.c
+perf_key@o@: $(srcdir)/test_perf/perf_key.c
$(CC) $(CFLAGS) $?
-crdel_rec@o@: $(srcdir)/db/crdel_rec.c
+perf_log@o@: $(srcdir)/test_perf/perf_log.c
$(CC) $(CFLAGS) $?
-db@o@: $(srcdir)/db/db.c
+perf_misc@o@: $(srcdir)/test_perf/perf_misc.c
$(CC) $(CFLAGS) $?
-db_am@o@: $(srcdir)/db/db_am.c
+perf_op@o@: $(srcdir)/test_perf/perf_op.c
$(CC) $(CFLAGS) $?
-db_auto@o@: $(srcdir)/db/db_auto.c
+perf_parse@o@: $(srcdir)/test_perf/perf_parse.c
$(CC) $(CFLAGS) $?
-db_cam@o@: $(srcdir)/db/db_cam.c
+perf_rand@o@: $(srcdir)/test_perf/perf_rand.c
$(CC) $(CFLAGS) $?
-db_conv@o@: $(srcdir)/db/db_conv.c
+perf_spawn@o@: $(srcdir)/test_perf/perf_spawn.c
$(CC) $(CFLAGS) $?
-db_dispatch@o@: $(srcdir)/db/db_dispatch.c
+perf_thread@o@: $(srcdir)/test_perf/perf_thread.c
$(CC) $(CFLAGS) $?
-db_dup@o@: $(srcdir)/db/db_dup.c
+perf_trickle@o@: $(srcdir)/test_perf/perf_trickle.c
$(CC) $(CFLAGS) $?
-db_iface@o@: $(srcdir)/db/db_iface.c
+perf_txn@o@: $(srcdir)/test_perf/perf_txn.c
$(CC) $(CFLAGS) $?
-db_join@o@: $(srcdir)/db/db_join.c
+perf_util@o@: $(srcdir)/test_perf/perf_util.c
$(CC) $(CFLAGS) $?
-db_meta@o@: $(srcdir)/db/db_meta.c
+perf_vx@o@: $(srcdir)/test_perf/perf_vx.c
$(CC) $(CFLAGS) $?
-db_method@o@: $(srcdir)/db/db_method.c
+DBPERF_OBJS=\
+ db_perf@o@ perf_cache_check@o@ perf_checkpoint@o@ perf_config@o@ \
+ perf_dbs@o@ perf_debug@o@ perf_file@o@ perf_key@o@ perf_log@o@ \
+ perf_misc@o@ perf_op@o@ perf_parse@o@ perf_rand@o@ \
+ perf_spawn@o@ perf_thread@o@ perf_trickle@o@ perf_txn@o@ \
+ perf_util@o@ perf_vx@o@
+db_perf: $(DBPERF_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(DBPERF_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+tm@o@: $(srcdir)/mutex/tm.c
$(CC) $(CFLAGS) $?
-db_overflow@o@: $(srcdir)/db/db_overflow.c
+tm: tm@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) tm@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C.
+##################################################
+bench_001@o@: $(srcdir)/examples_c/bench_001.c
$(CC) $(CFLAGS) $?
-db_pr@o@: $(srcdir)/db/db_pr.c
+bench_001: bench_001@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) bench_001@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_access@o@: $(srcdir)/examples_c/ex_access.c
$(CC) $(CFLAGS) $?
-db_rec@o@: $(srcdir)/db/db_rec.c
+ex_access: ex_access@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_access@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_apprec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec.c
$(CC) $(CFLAGS) $?
-db_reclaim@o@: $(srcdir)/db/db_reclaim.c
+ex_apprec_auto@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_auto.c
$(CC) $(CFLAGS) $?
-db_ret@o@: $(srcdir)/db/db_ret.c
+ex_apprec_rec@o@: $(srcdir)/examples_c/ex_apprec/ex_apprec_rec.c
$(CC) $(CFLAGS) $?
-db_upg@o@: $(srcdir)/db/db_upg.c
+EX_APPREC_OBJS=ex_apprec@o@ ex_apprec_auto@o@ ex_apprec_rec@o@
+ex_apprec: $(EX_APPREC_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_APPREC_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+
+ex_btrec@o@: $(srcdir)/examples_c/ex_btrec.c
$(CC) $(CFLAGS) $?
-db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c
+ex_btrec: ex_btrec@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_btrec@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_dbclient@o@: $(srcdir)/examples_c/ex_dbclient.c
$(CC) $(CFLAGS) $?
-db_vrfy@o@: $(srcdir)/db/db_vrfy.c
+ex_dbclient: ex_dbclient@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_dbclient@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_env@o@: $(srcdir)/examples_c/ex_env.c
$(CC) $(CFLAGS) $?
-db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c
+ex_env: ex_env@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_env@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_lock@o@: $(srcdir)/examples_c/ex_lock.c
$(CC) $(CFLAGS) $?
+ex_lock: ex_lock@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_lock@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
-# Environment files
-db_salloc@o@: $(srcdir)/env/db_salloc.c
+ex_mpool@o@: $(srcdir)/examples_c/ex_mpool.c
$(CC) $(CFLAGS) $?
-db_shash@o@: $(srcdir)/env/db_shash.c
+ex_mpool: ex_mpool@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_mpool@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+ex_rq_client@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_client.c
$(CC) $(CFLAGS) $?
-env_method@o@: $(srcdir)/env/env_method.c
+ex_rq_main@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_main.c
$(CC) $(CFLAGS) $?
-env_open@o@: $(srcdir)/env/env_open.c
+ex_rq_master@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_master.c
$(CC) $(CFLAGS) $?
-env_recover@o@: $(srcdir)/env/env_recover.c
+ex_rq_net@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_net.c
$(CC) $(CFLAGS) $?
-env_region@o@: $(srcdir)/env/env_region.c
+ex_rq_util@o@: $(srcdir)/examples_c/ex_repquote/ex_rq_util.c
$(CC) $(CFLAGS) $?
+EX_RQ_OBJS=\
+ ex_rq_client@o@ ex_rq_main@o@ ex_rq_master@o@ ex_rq_net@o@ ex_rq_util@o@
+ex_repquote: $(EX_RQ_OBJS) $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) $(EX_RQ_OBJS) $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
-# Common files
-db_byteorder@o@: $(srcdir)/common/db_byteorder.c
- $(CC) $(CFLAGS) $?
-db_err@o@: $(srcdir)/common/db_err.c
- $(CC) $(CFLAGS) $?
-db_getlong@o@: $(srcdir)/common/db_getlong.c
- $(CC) $(CFLAGS) $?
-db_log2@o@: $(srcdir)/common/db_log2.c
- $(CC) $(CFLAGS) $?
-util_log@o@: $(srcdir)/common/util_log.c
+ex_thread@o@: $(srcdir)/examples_c/ex_thread.c
$(CC) $(CFLAGS) $?
-util_sig@o@: $(srcdir)/common/util_sig.c
+ex_thread: ex_thread@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ \
+ $(LDFLAGS) ex_thread@o@ $(DEF_LIB) @LOAD_LIBS@ $(LIBS)
+ $(POSTLINK) $@
+
+ex_tpcb@o@: $(srcdir)/examples_c/ex_tpcb.c
$(CC) $(CFLAGS) $?
+ex_tpcb: ex_tpcb@o@ $(DEF_LIB)
+ $(CCLINK) -o $@ $(LDFLAGS) ex_tpcb@o@ $(DEF_LIB) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# Example programs for C++.
+##################################################
+AccessExample@o@: $(srcdir)/examples_cxx/AccessExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_access: AccessExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) AccessExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+BtRecExample@o@: $(srcdir)/examples_cxx/BtRecExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_btrec: BtRecExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) BtRecExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+EnvExample@o@: $(srcdir)/examples_cxx/EnvExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_env: EnvExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) EnvExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+LockExample@o@: $(srcdir)/examples_cxx/LockExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_lock: LockExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) LockExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
-# Btree files
+MpoolExample@o@: $(srcdir)/examples_cxx/MpoolExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_mpool: MpoolExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) MpoolExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+TpcbExample@o@: $(srcdir)/examples_cxx/TpcbExample.cpp
+ $(CXX) $(CXXFLAGS) $?
+excxx_tpcb: TpcbExample@o@ $(DEF_LIB_CXX)
+ $(CXXLINK) -o $@ $(LDFLAGS) TpcbExample@o@ $(DEF_LIB_CXX) $(LIBS)
+ $(POSTLINK) $@
+
+##################################################
+# C API build rules.
+##################################################
+aes_method@o@: $(srcdir)/crypto/aes_method.c
+ $(CC) $(CFLAGS) $?
bt_compare@o@: $(srcdir)/btree/bt_compare.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_conv@o@: $(srcdir)/btree/bt_conv.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_curadj@o@: $(srcdir)/btree/bt_curadj.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_cursor@o@: $(srcdir)/btree/bt_cursor.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_delete@o@: $(srcdir)/btree/bt_delete.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_method@o@: $(srcdir)/btree/bt_method.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_open@o@: $(srcdir)/btree/bt_open.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_put@o@: $(srcdir)/btree/bt_put.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_rec@o@: $(srcdir)/btree/bt_rec.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_reclaim@o@: $(srcdir)/btree/bt_reclaim.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_recno@o@: $(srcdir)/btree/bt_recno.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_rsearch@o@: $(srcdir)/btree/bt_rsearch.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_search@o@: $(srcdir)/btree/bt_search.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_split@o@: $(srcdir)/btree/bt_split.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_stack@o@: $(srcdir)/btree/bt_stack.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_stat@o@: $(srcdir)/btree/bt_stat.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_upgrade@o@: $(srcdir)/btree/bt_upgrade.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
bt_verify@o@: $(srcdir)/btree/bt_verify.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
btree_auto@o@: $(srcdir)/btree/btree_auto.c
- $(CC) $(CFLAGS) $?
-
-# Queue files
+ $(CC) $(CFLAGS) $?
+crdel_auto@o@: $(srcdir)/db/crdel_auto.c
+ $(CC) $(CFLAGS) $?
+crdel_rec@o@: $(srcdir)/db/crdel_rec.c
+ $(CC) $(CFLAGS) $?
+crypto@o@: $(srcdir)/crypto/crypto.c
+ $(CC) $(CFLAGS) $?
+db185@o@: $(srcdir)/db185/db185.c
+ $(CC) $(CFLAGS) $?
+db@o@: $(srcdir)/db/db.c
+ $(CC) $(CFLAGS) $?
+db_am@o@: $(srcdir)/db/db_am.c
+ $(CC) $(CFLAGS) $?
+db_auto@o@: $(srcdir)/db/db_auto.c
+ $(CC) $(CFLAGS) $?
+db_byteorder@o@: $(srcdir)/common/db_byteorder.c
+ $(CC) $(CFLAGS) $?
+db_cam@o@: $(srcdir)/db/db_cam.c
+ $(CC) $(CFLAGS) $?
+db_conv@o@: $(srcdir)/db/db_conv.c
+ $(CC) $(CFLAGS) $?
+db_dispatch@o@: $(srcdir)/db/db_dispatch.c
+ $(CC) $(CFLAGS) $?
+db_dup@o@: $(srcdir)/db/db_dup.c
+ $(CC) $(CFLAGS) $?
+db_err@o@: $(srcdir)/common/db_err.c
+ $(CC) $(CFLAGS) $?
+db_getlong@o@: $(srcdir)/common/db_getlong.c
+ $(CC) $(CFLAGS) $?
+db_idspace@o@: $(srcdir)/common/db_idspace.c
+ $(CC) $(CFLAGS) $?
+db_iface@o@: $(srcdir)/db/db_iface.c
+ $(CC) $(CFLAGS) $?
+db_join@o@: $(srcdir)/db/db_join.c
+ $(CC) $(CFLAGS) $?
+db_log2@o@: $(srcdir)/common/db_log2.c
+ $(CC) $(CFLAGS) $?
+db_meta@o@: $(srcdir)/db/db_meta.c
+ $(CC) $(CFLAGS) $?
+db_method@o@: $(srcdir)/db/db_method.c
+ $(CC) $(CFLAGS) $?
+db_open@o@: $(srcdir)/db/db_open.c
+ $(CC) $(CFLAGS) $?
+db_overflow@o@: $(srcdir)/db/db_overflow.c
+ $(CC) $(CFLAGS) $?
+db_pr@o@: $(srcdir)/db/db_pr.c
+ $(CC) $(CFLAGS) $?
+db_rec@o@: $(srcdir)/db/db_rec.c
+ $(CC) $(CFLAGS) $?
+db_reclaim@o@: $(srcdir)/db/db_reclaim.c
+ $(CC) $(CFLAGS) $?
+db_rename@o@: $(srcdir)/db/db_rename.c
+ $(CC) $(CFLAGS) $?
+db_remove@o@: $(srcdir)/db/db_remove.c
+ $(CC) $(CFLAGS) $?
+db_ret@o@: $(srcdir)/db/db_ret.c
+ $(CC) $(CFLAGS) $?
+db_salloc@o@: $(srcdir)/env/db_salloc.c
+ $(CC) $(CFLAGS) $?
+db_shash@o@: $(srcdir)/env/db_shash.c
+ $(CC) $(CFLAGS) $?
+db_truncate@o@: $(srcdir)/db/db_truncate.c
+ $(CC) $(CFLAGS) $?
+db_upg@o@: $(srcdir)/db/db_upg.c
+ $(CC) $(CFLAGS) $?
+db_upg_opd@o@: $(srcdir)/db/db_upg_opd.c
+ $(CC) $(CFLAGS) $?
+db_vrfy@o@: $(srcdir)/db/db_vrfy.c
+ $(CC) $(CFLAGS) $?
+db_vrfyutil@o@: $(srcdir)/db/db_vrfyutil.c
+ $(CC) $(CFLAGS) $?
+dbm@o@: $(srcdir)/dbm/dbm.c
+ $(CC) $(CFLAGS) $?
+dbreg@o@: $(srcdir)/dbreg/dbreg.c
+ $(CC) $(CFLAGS) $?
+dbreg_auto@o@: $(srcdir)/dbreg/dbreg_auto.c
+ $(CC) $(CFLAGS) $?
+dbreg_rec@o@: $(srcdir)/dbreg/dbreg_rec.c
+ $(CC) $(CFLAGS) $?
+dbreg_util@o@: $(srcdir)/dbreg/dbreg_util.c
+ $(CC) $(CFLAGS) $?
+env_file@o@: $(srcdir)/env/env_file.c
+ $(CC) $(CFLAGS) $?
+env_method@o@: $(srcdir)/env/env_method.c
+ $(CC) $(CFLAGS) $?
+env_open@o@: $(srcdir)/env/env_open.c
+ $(CC) $(CFLAGS) $?
+env_recover@o@: $(srcdir)/env/env_recover.c
+ $(CC) $(CFLAGS) $?
+env_region@o@: $(srcdir)/env/env_region.c
+ $(CC) $(CFLAGS) $?
+fileops_auto@o@: $(srcdir)/fileops/fileops_auto.c
+ $(CC) $(CFLAGS) $?
+fop_basic@o@: $(srcdir)/fileops/fop_basic.c
+ $(CC) $(CFLAGS) $?
+fop_rec@o@: $(srcdir)/fileops/fop_rec.c
+ $(CC) $(CFLAGS) $?
+fop_util@o@: $(srcdir)/fileops/fop_util.c
+ $(CC) $(CFLAGS) $?
+hash@o@: $(srcdir)/hash/hash.c
+ $(CC) $(CFLAGS) $?
+hash_auto@o@: $(srcdir)/hash/hash_auto.c
+ $(CC) $(CFLAGS) $?
+hash_conv@o@: $(srcdir)/hash/hash_conv.c
+ $(CC) $(CFLAGS) $?
+hash_dup@o@: $(srcdir)/hash/hash_dup.c
+ $(CC) $(CFLAGS) $?
+hash_func@o@: $(srcdir)/hash/hash_func.c
+ $(CC) $(CFLAGS) $?
+hash_meta@o@: $(srcdir)/hash/hash_meta.c
+ $(CC) $(CFLAGS) $?
+hash_method@o@: $(srcdir)/hash/hash_method.c
+ $(CC) $(CFLAGS) $?
+hash_open@o@: $(srcdir)/hash/hash_open.c
+ $(CC) $(CFLAGS) $?
+hash_page@o@: $(srcdir)/hash/hash_page.c
+ $(CC) $(CFLAGS) $?
+hash_rec@o@: $(srcdir)/hash/hash_rec.c
+ $(CC) $(CFLAGS) $?
+hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c
+ $(CC) $(CFLAGS) $?
+hash_stat@o@: $(srcdir)/hash/hash_stat.c
+ $(CC) $(CFLAGS) $?
+hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c
+ $(CC) $(CFLAGS) $?
+hash_verify@o@: $(srcdir)/hash/hash_verify.c
+ $(CC) $(CFLAGS) $?
+hmac@o@: $(srcdir)/hmac/hmac.c
+ $(CC) $(CFLAGS) $?
+hsearch@o@: $(srcdir)/hsearch/hsearch.c
+ $(CC) $(CFLAGS) $?
+lock@o@: $(srcdir)/lock/lock.c
+ $(CC) $(CFLAGS) $?
+lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
+ $(CC) $(CFLAGS) $?
+lock_method@o@:$(srcdir)/lock/lock_method.c
+ $(CC) $(CFLAGS) $?
+lock_region@o@:$(srcdir)/lock/lock_region.c
+ $(CC) $(CFLAGS) $?
+lock_stat@o@:$(srcdir)/lock/lock_stat.c
+ $(CC) $(CFLAGS) $?
+lock_util@o@:$(srcdir)/lock/lock_util.c
+ $(CC) $(CFLAGS) $?
+log@o@: $(srcdir)/log/log.c
+ $(CC) $(CFLAGS) $?
+log_archive@o@: $(srcdir)/log/log_archive.c
+ $(CC) $(CFLAGS) $?
+log_compare@o@: $(srcdir)/log/log_compare.c
+ $(CC) $(CFLAGS) $?
+log_get@o@: $(srcdir)/log/log_get.c
+ $(CC) $(CFLAGS) $?
+log_method@o@: $(srcdir)/log/log_method.c
+ $(CC) $(CFLAGS) $?
+log_put@o@: $(srcdir)/log/log_put.c
+ $(CC) $(CFLAGS) $?
+mp_alloc@o@: $(srcdir)/mp/mp_alloc.c
+ $(CC) $(CFLAGS) $?
+mp_bh@o@: $(srcdir)/mp/mp_bh.c
+ $(CC) $(CFLAGS) $?
+mp_fget@o@: $(srcdir)/mp/mp_fget.c
+ $(CC) $(CFLAGS) $?
+mp_fopen@o@: $(srcdir)/mp/mp_fopen.c
+ $(CC) $(CFLAGS) $?
+mp_fput@o@: $(srcdir)/mp/mp_fput.c
+ $(CC) $(CFLAGS) $?
+mp_fset@o@: $(srcdir)/mp/mp_fset.c
+ $(CC) $(CFLAGS) $?
+mp_method@o@: $(srcdir)/mp/mp_method.c
+ $(CC) $(CFLAGS) $?
+mp_region@o@: $(srcdir)/mp/mp_region.c
+ $(CC) $(CFLAGS) $?
+mp_register@o@: $(srcdir)/mp/mp_register.c
+ $(CC) $(CFLAGS) $?
+mp_stat@o@: $(srcdir)/mp/mp_stat.c
+ $(CC) $(CFLAGS) $?
+mp_sync@o@: $(srcdir)/mp/mp_sync.c
+ $(CC) $(CFLAGS) $?
+mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
+ $(CC) $(CFLAGS) $?
+mt19937db@o@: $(srcdir)/crypto/mersenne/mt19937db.c
+ $(CC) $(CFLAGS) $?
+mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
+ $(CC) $(CFLAGS) $?
+mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
+ $(CC) $(CFLAGS) $?
+mut_tas@o@: $(srcdir)/mutex/mut_tas.c
+ $(CC) $(CFLAGS) $?
+mutex@o@: $(srcdir)/mutex/mutex.c
+ $(CC) $(CFLAGS) $?
+os_abs@o@: $(srcdir)/os/os_abs.c
+ $(CC) $(CFLAGS) $?
+os_alloc@o@: $(srcdir)/os/os_alloc.c
+ $(CC) $(CFLAGS) $?
+os_clock@o@: $(srcdir)/os/os_clock.c
+ $(CC) $(CFLAGS) $?
+os_config@o@: $(srcdir)/os/os_config.c
+ $(CC) $(CFLAGS) $?
+os_dir@o@: $(srcdir)/os/os_dir.c
+ $(CC) $(CFLAGS) $?
+os_errno@o@: $(srcdir)/os/os_errno.c
+ $(CC) $(CFLAGS) $?
+os_fid@o@: $(srcdir)/os/os_fid.c
+ $(CC) $(CFLAGS) $?
+os_fsync@o@: $(srcdir)/os/os_fsync.c
+ $(CC) $(CFLAGS) $?
+os_id@o@: $(srcdir)/os/os_id.c
+ $(CC) $(CFLAGS) $?
+os_handle@o@: $(srcdir)/os/os_handle.c
+ $(CC) $(CFLAGS) $?
+os_map@o@: $(srcdir)/os/os_map.c
+ $(CC) $(CFLAGS) $?
+os_method@o@: $(srcdir)/os/os_method.c
+ $(CC) $(CFLAGS) $?
+os_oflags@o@: $(srcdir)/os/os_oflags.c
+ $(CC) $(CFLAGS) $?
+os_open@o@: $(srcdir)/os/os_open.c
+ $(CC) $(CFLAGS) $?
+os_region@o@: $(srcdir)/os/os_region.c
+ $(CC) $(CFLAGS) $?
+os_rename@o@: $(srcdir)/os/os_rename.c
+ $(CC) $(CFLAGS) $?
+os_root@o@: $(srcdir)/os/os_root.c
+ $(CC) $(CFLAGS) $?
+os_rpath@o@: $(srcdir)/os/os_rpath.c
+ $(CC) $(CFLAGS) $?
+os_rw@o@: $(srcdir)/os/os_rw.c
+ $(CC) $(CFLAGS) $?
+os_seek@o@: $(srcdir)/os/os_seek.c
+ $(CC) $(CFLAGS) $?
+os_sleep@o@: $(srcdir)/os/os_sleep.c
+ $(CC) $(CFLAGS) $?
+os_spin@o@: $(srcdir)/os/os_spin.c
+ $(CC) $(CFLAGS) $?
+os_stat@o@: $(srcdir)/os/os_stat.c
+ $(CC) $(CFLAGS) $?
+os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c
+ $(CC) $(CFLAGS) $?
+os_unlink@o@: $(srcdir)/os/os_unlink.c
+ $(CC) $(CFLAGS) $?
qam@o@: $(srcdir)/qam/qam.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_auto@o@: $(srcdir)/qam/qam_auto.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_conv@o@: $(srcdir)/qam/qam_conv.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_files@o@: $(srcdir)/qam/qam_files.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_method@o@: $(srcdir)/qam/qam_method.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_open@o@: $(srcdir)/qam/qam_open.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_rec@o@: $(srcdir)/qam/qam_rec.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_stat@o@: $(srcdir)/qam/qam_stat.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_upgrade@o@: $(srcdir)/qam/qam_upgrade.c
- $(CC) $(CFLAGS) $?
+ $(CC) $(CFLAGS) $?
qam_verify@o@: $(srcdir)/qam/qam_verify.c
+ $(CC) $(CFLAGS) $?
+rep_method@o@: $(srcdir)/rep/rep_method.c
+ $(CC) $(CFLAGS) $?
+rep_record@o@: $(srcdir)/rep/rep_record.c
+ $(CC) $(CFLAGS) $?
+rep_region@o@: $(srcdir)/rep/rep_region.c
+ $(CC) $(CFLAGS) $?
+rep_util@o@: $(srcdir)/rep/rep_util.c
+ $(CC) $(CFLAGS) $?
+rijndael-alg-fst@o@: $(srcdir)/crypto/rijndael/rijndael-alg-fst.c
+ $(CC) $(CFLAGS) $?
+rijndael-api-fst@o@: $(srcdir)/crypto/rijndael/rijndael-api-fst.c
$(CC) $(CFLAGS) $?
+sha1@o@: $(srcdir)/hmac/sha1.c
+ $(CC) $(CFLAGS) $?
+txn@o@: $(srcdir)/txn/txn.c
+ $(CC) $(CFLAGS) $?
+txn_auto@o@: $(srcdir)/txn/txn_auto.c
+ $(CC) $(CFLAGS) $?
+txn_method@o@: $(srcdir)/txn/txn_method.c
+ $(CC) $(CFLAGS) $?
+txn_rec@o@: $(srcdir)/txn/txn_rec.c
+ $(CC) $(CFLAGS) $?
+txn_recover@o@: $(srcdir)/txn/txn_recover.c
+ $(CC) $(CFLAGS) $?
+txn_region@o@: $(srcdir)/txn/txn_region.c
+ $(CC) $(CFLAGS) $?
+txn_stat@o@: $(srcdir)/txn/txn_stat.c
+ $(CC) $(CFLAGS) $?
+txn_util@o@: $(srcdir)/txn/txn_util.c
+ $(CC) $(CFLAGS) $?
+util_cache@o@: $(srcdir)/common/util_cache.c
+ $(CC) $(CFLAGS) $?
+util_log@o@: $(srcdir)/common/util_log.c
+ $(CC) $(CFLAGS) $?
+util_sig@o@: $(srcdir)/common/util_sig.c
+ $(CC) $(CFLAGS) $?
+uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s
+ $(AS) $(ASFLAGS) -o $@ $?
+xa@o@: $(srcdir)/xa/xa.c
+ $(CC) $(CFLAGS) $?
+xa_db@o@: $(srcdir)/xa/xa_db.c
+ $(CC) $(CFLAGS) $?
+xa_map@o@: $(srcdir)/xa/xa_map.c
+ $(CC) $(CFLAGS) $?
-# C++ files
-cxx_app@o@: $(srcdir)/cxx/cxx_app.cpp
+##################################################
+# C++ API build rules.
+##################################################
+cxx_db@o@: $(srcdir)/cxx/cxx_db.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbc@o@: $(srcdir)/cxx/cxx_dbc.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_dbt@o@: $(srcdir)/cxx/cxx_dbt.cpp
+ $(CXX) $(CXXFLAGS) $?
+cxx_env@o@: $(srcdir)/cxx/cxx_env.cpp
$(CXX) $(CXXFLAGS) $?
cxx_except@o@: $(srcdir)/cxx/cxx_except.cpp
$(CXX) $(CXXFLAGS) $?
cxx_lock@o@: $(srcdir)/cxx/cxx_lock.cpp
$(CXX) $(CXXFLAGS) $?
-cxx_log@o@: $(srcdir)/cxx/cxx_log.cpp
+cxx_logc@o@: $(srcdir)/cxx/cxx_logc.cpp
$(CXX) $(CXXFLAGS) $?
cxx_mpool@o@: $(srcdir)/cxx/cxx_mpool.cpp
$(CXX) $(CXXFLAGS) $?
-cxx_table@o@: $(srcdir)/cxx/cxx_table.cpp
- $(CXX) $(CXXFLAGS) $?
cxx_txn@o@: $(srcdir)/cxx/cxx_txn.cpp
$(CXX) $(CXXFLAGS) $?
-# Java files
+##################################################
+# Java API build rules.
+##################################################
java_Db@o@::$(srcdir)/libdb_java/java_Db.c
$(CC) $(CFLAGS) $?
java_DbEnv@o@: $(srcdir)/libdb_java/java_DbEnv.c
$(CC) $(CFLAGS) $?
java_DbLock@o@: $(srcdir)/libdb_java/java_DbLock.c
$(CC) $(CFLAGS) $?
+java_DbLogc@o@: $(srcdir)/libdb_java/java_DbLogc.c
+ $(CC) $(CFLAGS) $?
java_DbLsn@o@: $(srcdir)/libdb_java/java_DbLsn.c
$(CC) $(CFLAGS) $?
java_DbTxn@o@: $(srcdir)/libdb_java/java_DbTxn.c
$(CC) $(CFLAGS) $?
+java_DbUtil@o@: $(srcdir)/libdb_java/java_DbUtil.c
+ $(CC) $(CFLAGS) $?
java_Dbc@o@: $(srcdir)/libdb_java/java_Dbc.c
$(CC) $(CFLAGS) $?
java_Dbt@o@: $(srcdir)/libdb_java/java_Dbt.c
@@ -728,8 +1284,12 @@ java_locked@o@: $(srcdir)/libdb_java/java_locked.c
$(CC) $(CFLAGS) $?
java_util@o@: $(srcdir)/libdb_java/java_util.c
$(CC) $(CFLAGS) $?
+java_stat_auto@o@: $(srcdir)/libdb_java/java_stat_auto.c
+ $(CC) $(CFLAGS) $?
-# Tcl files
+##################################################
+# Tcl API build rules.
+##################################################
tcl_compat@o@: $(srcdir)/tcl/tcl_compat.c
$(CC) $(CFLAGS) $(TCFLAGS) $?
tcl_db@o@: $(srcdir)/tcl/tcl_db.c
@@ -748,132 +1308,16 @@ tcl_log@o@: $(srcdir)/tcl/tcl_log.c
$(CC) $(CFLAGS) $(TCFLAGS) $?
tcl_mp@o@: $(srcdir)/tcl/tcl_mp.c
$(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_rep@o@: $(srcdir)/tcl/tcl_rep.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
tcl_txn@o@: $(srcdir)/tcl/tcl_txn.c
$(CC) $(CFLAGS) $(TCFLAGS) $?
+tcl_util@o@: $(srcdir)/tcl/tcl_util.c
+ $(CC) $(CFLAGS) $(TCFLAGS) $?
-# Hash files
-hash_auto@o@: $(srcdir)/hash/hash_auto.c
- $(CC) $(CFLAGS) $?
-hash@o@: $(srcdir)/hash/hash.c
- $(CC) $(CFLAGS) $?
-hash_conv@o@: $(srcdir)/hash/hash_conv.c
- $(CC) $(CFLAGS) $?
-hash_dup@o@: $(srcdir)/hash/hash_dup.c
- $(CC) $(CFLAGS) $?
-hash_func@o@: $(srcdir)/hash/hash_func.c
- $(CC) $(CFLAGS) $?
-hash_meta@o@: $(srcdir)/hash/hash_meta.c
- $(CC) $(CFLAGS) $?
-hash_method@o@: $(srcdir)/hash/hash_method.c
- $(CC) $(CFLAGS) $?
-hash_page@o@: $(srcdir)/hash/hash_page.c
- $(CC) $(CFLAGS) $?
-hash_rec@o@: $(srcdir)/hash/hash_rec.c
- $(CC) $(CFLAGS) $?
-hash_reclaim@o@: $(srcdir)/hash/hash_reclaim.c
- $(CC) $(CFLAGS) $?
-hash_stat@o@: $(srcdir)/hash/hash_stat.c
- $(CC) $(CFLAGS) $?
-hash_upgrade@o@: $(srcdir)/hash/hash_upgrade.c
- $(CC) $(CFLAGS) $?
-hash_verify@o@: $(srcdir)/hash/hash_verify.c
- $(CC) $(CFLAGS) $?
-
-# Lock files
-lock@o@: $(srcdir)/lock/lock.c
- $(CC) $(CFLAGS) $?
-lock_conflict@o@:$(srcdir)/lock/lock_conflict.c
- $(CC) $(CFLAGS) $?
-lock_deadlock@o@:$(srcdir)/lock/lock_deadlock.c
- $(CC) $(CFLAGS) $?
-lock_method@o@:$(srcdir)/lock/lock_method.c
- $(CC) $(CFLAGS) $?
-lock_region@o@:$(srcdir)/lock/lock_region.c
- $(CC) $(CFLAGS) $?
-lock_stat@o@:$(srcdir)/lock/lock_stat.c
- $(CC) $(CFLAGS) $?
-lock_util@o@:$(srcdir)/lock/lock_util.c
- $(CC) $(CFLAGS) $?
-
-# Log files
-log@o@: $(srcdir)/log/log.c
- $(CC) $(CFLAGS) $?
-log_archive@o@: $(srcdir)/log/log_archive.c
- $(CC) $(CFLAGS) $?
-log_auto@o@: $(srcdir)/log/log_auto.c
- $(CC) $(CFLAGS) $?
-log_compare@o@: $(srcdir)/log/log_compare.c
- $(CC) $(CFLAGS) $?
-log_findckp@o@: $(srcdir)/log/log_findckp.c
- $(CC) $(CFLAGS) $?
-log_get@o@: $(srcdir)/log/log_get.c
- $(CC) $(CFLAGS) $?
-log_method@o@: $(srcdir)/log/log_method.c
- $(CC) $(CFLAGS) $?
-log_put@o@: $(srcdir)/log/log_put.c
- $(CC) $(CFLAGS) $?
-log_rec@o@: $(srcdir)/log/log_rec.c
- $(CC) $(CFLAGS) $?
-log_register@o@: $(srcdir)/log/log_register.c
- $(CC) $(CFLAGS) $?
-
-# Mpool files
-mp_alloc@o@: $(srcdir)/mp/mp_alloc.c
- $(CC) $(CFLAGS) $?
-mp_bh@o@: $(srcdir)/mp/mp_bh.c
- $(CC) $(CFLAGS) $?
-mp_fget@o@: $(srcdir)/mp/mp_fget.c
- $(CC) $(CFLAGS) $?
-mp_fopen@o@: $(srcdir)/mp/mp_fopen.c
- $(CC) $(CFLAGS) $?
-mp_fput@o@: $(srcdir)/mp/mp_fput.c
- $(CC) $(CFLAGS) $?
-mp_fset@o@: $(srcdir)/mp/mp_fset.c
- $(CC) $(CFLAGS) $?
-mp_method@o@: $(srcdir)/mp/mp_method.c
- $(CC) $(CFLAGS) $?
-mp_region@o@: $(srcdir)/mp/mp_region.c
- $(CC) $(CFLAGS) $?
-mp_register@o@: $(srcdir)/mp/mp_register.c
- $(CC) $(CFLAGS) $?
-mp_stat@o@: $(srcdir)/mp/mp_stat.c
- $(CC) $(CFLAGS) $?
-mp_sync@o@: $(srcdir)/mp/mp_sync.c
- $(CC) $(CFLAGS) $?
-mp_trickle@o@: $(srcdir)/mp/mp_trickle.c
- $(CC) $(CFLAGS) $?
-
-# Mutex files
-mutex@o@: $(srcdir)/mutex/mutex.c
- $(CC) $(CFLAGS) $?
-mut_fcntl@o@: $(srcdir)/mutex/mut_fcntl.c
- $(CC) $(CFLAGS) $?
-mut_pthread@o@: $(srcdir)/mutex/mut_pthread.c
- $(CC) $(CFLAGS) $?
-mut_tas@o@: $(srcdir)/mutex/mut_tas.c
- $(CC) $(CFLAGS) $?
-# UTS4 spinlock assembly.
-uts4_cc@o@: $(srcdir)/mutex/uts4_cc.s
- $(AS) $(ASFLAGS) -o $@ $?
-
-# Transaction files
-txn@o@: $(srcdir)/txn/txn.c
- $(CC) $(CFLAGS) $?
-txn_auto@o@: $(srcdir)/txn/txn_auto.c
- $(CC) $(CFLAGS) $?
-txn_rec@o@: $(srcdir)/txn/txn_rec.c
- $(CC) $(CFLAGS) $?
-txn_region@o@: $(srcdir)/txn/txn_region.c
- $(CC) $(CFLAGS) $?
-
-# XA files
-xa@o@: $(srcdir)/xa/xa.c
- $(CC) $(CFLAGS) $?
-xa_db@o@: $(srcdir)/xa/xa_db.c
- $(CC) $(CFLAGS) $?
-xa_map@o@: $(srcdir)/xa/xa_map.c
- $(CC) $(CFLAGS) $?
-
+##################################################
+# RPC build rules.
+##################################################
# RPC client files
client@o@: $(srcdir)/rpc_client/client.c
$(CC) $(CFLAGS) $?
@@ -885,98 +1329,50 @@ gen_client_ret@o@: $(srcdir)/rpc_client/gen_client_ret.c
$(CC) $(CFLAGS) $?
# RPC server files
-db_server_proc@o@: $(srcdir)/rpc_server/db_server_proc.c
- $(CC) $(CFLAGS) $?
-db_server_svc@o@: $(srcdir)/rpc_server/db_server_svc.c
- $(CC) $(CFLAGS) $?
-db_server_util@o@: $(srcdir)/rpc_server/db_server_util.c
+db_server_proc@o@: $(srcdir)/rpc_server/c/db_server_proc.c
$(CC) $(CFLAGS) $?
-db_server_xdr@o@: $(srcdir)/rpc_server/db_server_xdr.c
+db_server_svc@o@: $(srcdir)/rpc_server/c/db_server_svc.c
$(CC) $(CFLAGS) $?
-gen_db_server@o@: $(srcdir)/rpc_server/gen_db_server.c
+db_server_util@o@: $(srcdir)/rpc_server/c/db_server_util.c
$(CC) $(CFLAGS) $?
-
-# Historic compatibility files
-db185@o@: $(srcdir)/db185/db185.c
+db_server_xdr@o@: $(srcdir)/rpc_server/c/db_server_xdr.c
$(CC) $(CFLAGS) $?
-dbm@o@: $(srcdir)/dbm/dbm.c
- $(CC) $(CFLAGS) $?
-hsearch@o@: $(srcdir)/hsearch/hsearch.c
+gen_db_server@o@: $(srcdir)/rpc_server/c/gen_db_server.c
$(CC) $(CFLAGS) $?
+db_server_cxxproc@o@: $(srcdir)/rpc_server/cxx/db_server_cxxproc.cpp
+ $(CXX) $(CXXFLAGS) $?
+db_server_cxxutil@o@: $(srcdir)/rpc_server/cxx/db_server_cxxutil.cpp
+ $(CXX) $(CXXFLAGS) $?
-# OS specific files
-os_abs@o@: $(srcdir)/os/os_abs.c
- $(CC) $(CFLAGS) $?
-os_alloc@o@: $(srcdir)/os/os_alloc.c
- $(CC) $(CFLAGS) $?
-os_dir@o@: $(srcdir)/os/os_dir.c
- $(CC) $(CFLAGS) $?
-os_errno@o@: $(srcdir)/os/os_errno.c
- $(CC) $(CFLAGS) $?
-os_fid@o@: $(srcdir)/os/os_fid.c
- $(CC) $(CFLAGS) $?
-os_finit@o@: $(srcdir)/os/os_finit.c
- $(CC) $(CFLAGS) $?
-os_fsync@o@: $(srcdir)/os/os_fsync.c
- $(CC) $(CFLAGS) $?
-os_handle@o@: $(srcdir)/os/os_handle.c
- $(CC) $(CFLAGS) $?
-os_map@o@: $(srcdir)/os/os_map.c
- $(CC) $(CFLAGS) $?
-os_method@o@: $(srcdir)/os/os_method.c
- $(CC) $(CFLAGS) $?
-os_oflags@o@: $(srcdir)/os/os_oflags.c
- $(CC) $(CFLAGS) $?
-os_open@o@: $(srcdir)/os/os_open.c
- $(CC) $(CFLAGS) $?
-os_region@o@: $(srcdir)/os/os_region.c
- $(CC) $(CFLAGS) $?
-os_rename@o@: $(srcdir)/os/os_rename.c
+##################################################
+# Utility build rules.
+##################################################
+db_archive@o@: $(srcdir)/db_archive/db_archive.c
$(CC) $(CFLAGS) $?
-os_root@o@: $(srcdir)/os/os_root.c
+db_checkpoint@o@: $(srcdir)/db_checkpoint/db_checkpoint.c
$(CC) $(CFLAGS) $?
-os_rpath@o@: $(srcdir)/os/os_rpath.c
+db_deadlock@o@: $(srcdir)/db_deadlock/db_deadlock.c
$(CC) $(CFLAGS) $?
-os_rw@o@: $(srcdir)/os/os_rw.c
+db_dump@o@: $(srcdir)/db_dump/db_dump.c
$(CC) $(CFLAGS) $?
-os_seek@o@: $(srcdir)/os/os_seek.c
+db_dump185@o@: $(srcdir)/db_dump185/db_dump185.c
+ $(CC) $(DB185INC) $?
+db_load@o@: $(srcdir)/db_load/db_load.c
$(CC) $(CFLAGS) $?
-os_sleep@o@: $(srcdir)/os/os_sleep.c
+db_printlog@o@: $(srcdir)/db_printlog/db_printlog.c
$(CC) $(CFLAGS) $?
-os_spin@o@: $(srcdir)/os/os_spin.c
+db_recover@o@: $(srcdir)/db_recover/db_recover.c
$(CC) $(CFLAGS) $?
-os_stat@o@: $(srcdir)/os/os_stat.c
+db_stat@o@: $(srcdir)/db_stat/db_stat.c
$(CC) $(CFLAGS) $?
-os_tmpdir@o@: $(srcdir)/os/os_tmpdir.c
+db_upgrade@o@: $(srcdir)/db_upgrade/db_upgrade.c
$(CC) $(CFLAGS) $?
-os_unlink@o@: $(srcdir)/os/os_unlink.c
+db_verify@o@: $(srcdir)/db_verify/db_verify.c
$(CC) $(CFLAGS) $?
-# Dbs.
-dbs@o@: $(srcdir)/test_server/dbs.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_am@o@: $(srcdir)/test_server/dbs_am.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_checkpoint@o@: $(srcdir)/test_server/dbs_checkpoint.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_debug@o@: $(srcdir)/test_server/dbs_debug.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_handles@o@: $(srcdir)/test_server/dbs_handles.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_log@o@: $(srcdir)/test_server/dbs_log.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_qam@o@: $(srcdir)/test_server/dbs_qam.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_spawn@o@: $(srcdir)/test_server/dbs_spawn.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_trickle@o@: $(srcdir)/test_server/dbs_trickle.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_util@o@: $(srcdir)/test_server/dbs_util.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-dbs_yield@o@: $(srcdir)/test_server/dbs_yield.c
- $(CC) $(CFLAGS) -I$(srcdir)/test_server $?
-
-# Replacement files
+##################################################
+# C library replacement files.
+##################################################
getcwd@o@: $(srcdir)/clib/getcwd.c
$(CC) $(CFLAGS) $?
getopt@o@: $(srcdir)/clib/getopt.c
@@ -991,12 +1387,11 @@ raise@o@: $(srcdir)/clib/raise.c
$(CC) $(CFLAGS) $?
strcasecmp@o@: $(srcdir)/clib/strcasecmp.c
$(CC) $(CFLAGS) $?
+strdup@o@: $(srcdir)/clib/strdup.c
+ $(CC) $(CFLAGS) $?
snprintf@o@: $(srcdir)/clib/snprintf.c
$(CC) $(CFLAGS) $?
strerror@o@: $(srcdir)/clib/strerror.c
$(CC) $(CFLAGS) $?
vsnprintf@o@: $(srcdir)/clib/vsnprintf.c
$(CC) $(CFLAGS) $?
-
-# Don't update the files from bitkeeper
-%::SCCS/s.%
diff --git a/bdb/dist/RELEASE b/bdb/dist/RELEASE
index 7b7d7bde003..fe9b6667bbe 100644
--- a/bdb/dist/RELEASE
+++ b/bdb/dist/RELEASE
@@ -1,8 +1,11 @@
-# $Id: RELEASE,v 11.72 2001/01/24 15:20:14 bostic Exp $
+# $Id: RELEASE,v 11.123 2002/09/13 22:16:02 bostic Exp $
-DB_VERSION_MAJOR=3
-DB_VERSION_MINOR=2
-DB_VERSION_PATCH=9
-DB_RELEASE_DATE=`date "+%B %e, %Y"`
+DB_VERSION_MAJOR=4
+DB_VERSION_MINOR=1
+DB_VERSION_PATCH=24
+DB_VERSION="$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH"
+
+DB_VERSION_UNIQUE_NAME=`printf "_%d%03d" $DB_VERSION_MAJOR $DB_VERSION_MINOR`
-DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR.${DB_VERSION_PATCH}a: ($DB_RELEASE_DATE)"
+DB_RELEASE_DATE=`date "+%B %e, %Y"`
+DB_VERSION_STRING="Sleepycat Software: Berkeley DB $DB_VERSION: ($DB_RELEASE_DATE)"
diff --git a/bdb/dist/acconfig.h b/bdb/dist/acconfig.h
deleted file mode 100644
index 19f85a460d9..00000000000
--- a/bdb/dist/acconfig.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * $Id: acconfig.h,v 11.29 2000/09/20 16:30:33 bostic Exp $
- */
-
-/* Define if you are building a version for running the test suite. */
-#undef CONFIG_TEST
-
-/* Define if you want a debugging version. */
-#undef DEBUG
-
-/* Define if you want a version that logs read operations. */
-#undef DEBUG_ROP
-
-/* Define if you want a version that logs write operations. */
-#undef DEBUG_WOP
-
-/* Define if you want a version with run-time diagnostic checking. */
-#undef DIAGNOSTIC
-
-/* Define if you want to mask harmless unitialized memory read/writes. */
-#undef UMRW
-
-/* Define if fcntl/F_SETFD denies child access to file descriptors. */
-#undef HAVE_FCNTL_F_SETFD
-
-/* Define if building big-file environment (e.g., AIX, HP/UX, Solaris). */
-#undef HAVE_FILE_OFFSET_BITS
-
-/* Mutex possibilities. */
-#undef HAVE_MUTEX_68K_GCC_ASSEMBLY
-#undef HAVE_MUTEX_AIX_CHECK_LOCK
-#undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
-#undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
-#undef HAVE_MUTEX_HPPA_MSEM_INIT
-#undef HAVE_MUTEX_IA64_GCC_ASSEMBLY
-#undef HAVE_MUTEX_MACOS
-#undef HAVE_MUTEX_MSEM_INIT
-#undef HAVE_MUTEX_PPC_GCC_ASSEMBLY
-#undef HAVE_MUTEX_PTHREADS
-#undef HAVE_MUTEX_RELIANTUNIX_INITSPIN
-#undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
-#undef HAVE_MUTEX_SEMA_INIT
-#undef HAVE_MUTEX_SGI_INIT_LOCK
-#undef HAVE_MUTEX_SOLARIS_LOCK_TRY
-#undef HAVE_MUTEX_SOLARIS_LWP
-#undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
-#undef HAVE_MUTEX_THREADS
-#undef HAVE_MUTEX_UI_THREADS
-#undef HAVE_MUTEX_UTS_CC_ASSEMBLY
-#undef HAVE_MUTEX_VMS
-#undef HAVE_MUTEX_VXWORKS
-#undef HAVE_MUTEX_WIN16
-#undef HAVE_MUTEX_WIN32
-#undef HAVE_MUTEX_X86_GCC_ASSEMBLY
-#undef HAVE_MUTEX_X86_64_GCC_ASSEMBLY
-
-/* Define if building on QNX. */
-#undef HAVE_QNX
-
-/* Define if building RPC client/server. */
-#undef HAVE_RPC
-
-/* Define if your sprintf returns a pointer, not a length. */
-#undef SPRINTF_RET_CHARPNT
-
-@BOTTOM@
-
-/*
- * Big-file configuration.
- */
-#ifdef HAVE_FILE_OFFSET_BITS
-#define _FILE_OFFSET_BITS 64
-#endif
-
-/*
- * Don't step on the namespace. Other libraries may have their own
- * implementations of these functions, we don't want to use their
- * implementations or force them to use ours based on the load order.
- */
-#ifndef HAVE_GETCWD
-#define getcwd __db_Cgetcwd
-#endif
-#ifndef HAVE_GETOPT
-#define getopt __db_Cgetopt
-#endif
-#ifndef HAVE_MEMCMP
-#define memcmp __db_Cmemcmp
-#endif
-#ifndef HAVE_MEMCPY
-#define memcpy __db_Cmemcpy
-#endif
-#ifndef HAVE_MEMMOVE
-#define memmove __db_Cmemmove
-#endif
-#ifndef HAVE_RAISE
-#define raise __db_Craise
-#endif
-#ifndef HAVE_SNPRINTF
-#define snprintf __db_Csnprintf
-#endif
-#ifndef HAVE_STRCASECMP
-#define strcasecmp __db_Cstrcasecmp
-#endif
-#ifndef HAVE_STRERROR
-#define strerror __db_Cstrerror
-#endif
-#ifndef HAVE_VSNPRINTF
-#define vsnprintf __db_Cvsnprintf
-#endif
diff --git a/bdb/dist/aclocal/config.ac b/bdb/dist/aclocal/config.ac
new file mode 100644
index 00000000000..cd288425946
--- /dev/null
+++ b/bdb/dist/aclocal/config.ac
@@ -0,0 +1,51 @@
+# Features we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_VXWORKS, [Define to 1 if building VxWorks.])
+
+AH_TEMPLATE(HAVE_FILESYSTEM_NOTZERO,
+ [Define to 1 if allocated filesystem blocks are not zeroed.])
+
+AH_TEMPLATE(HAVE_UNLINK_WITH_OPEN_FAILURE,
+ [Define to 1 if unlink of file with open file descriptors will fail.])
+
+AH_BOTTOM([/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif])
diff --git a/bdb/dist/aclocal/cxx.ac b/bdb/dist/aclocal/cxx.ac
new file mode 100644
index 00000000000..49103cc661a
--- /dev/null
+++ b/bdb/dist/aclocal/cxx.ac
@@ -0,0 +1,17 @@
+# C++ checks to determine what style of headers to use and
+# whether to use "using" clauses.
+
+AC_DEFUN(AC_CXX_HAVE_STDHEADERS, [
+AC_SUBST(cxx_have_stdheaders)
+AC_CACHE_CHECK([whether C++ supports the ISO C++ standard includes],
+db_cv_cxx_have_stdheaders,
+[AC_LANG_SAVE
+ AC_LANG_CPLUSPLUS
+ AC_TRY_COMPILE([#include <iostream>
+],[std::ostream *o; return 0;],
+ db_cv_cxx_have_stdheaders=yes, db_cv_cxx_have_stdheaders=no)
+ AC_LANG_RESTORE
+])
+if test "$db_cv_cxx_have_stdheaders" = yes; then
+ cxx_have_stdheaders="#define HAVE_CXX_STDHEADERS 1"
+fi])
diff --git a/bdb/dist/aclocal/gcc.ac b/bdb/dist/aclocal/gcc.ac
new file mode 100644
index 00000000000..0949d982f17
--- /dev/null
+++ b/bdb/dist/aclocal/gcc.ac
@@ -0,0 +1,36 @@
+# Version 2.96 of gcc (shipped with RedHat Linux 7.[01] and Mandrake) had
+# serious problems.
+AC_DEFUN(AC_GCC_CONFIG1, [
+AC_CACHE_CHECK([whether we are using gcc version 2.96],
+db_cv_gcc_2_96, [
+db_cv_gcc_2_96=no
+if test "$GCC" = "yes"; then
+ GCC_VERSION=`${MAKEFILE_CC} --version`
+ case ${GCC_VERSION} in
+ 2.96*)
+ db_cv_gcc_2_96=yes;;
+ esac
+fi])
+if test "$db_cv_gcc_2_96" = "yes"; then
+ CFLAGS=`echo "$CFLAGS" | sed 's/-O2/-O/'`
+ CXXFLAGS=`echo "$CXXFLAGS" | sed 's/-O2/-O/'`
+ AC_MSG_WARN([INSTALLED GCC COMPILER HAS SERIOUS BUGS; PLEASE UPGRADE.])
+ AC_MSG_WARN([GCC OPTIMIZATION LEVEL SET TO -O.])
+fi])
+
+# Versions of g++ up to 2.8.0 required -fhandle-exceptions, but it is
+# renamed as -fexceptions and is the default in versions 2.8.0 and after.
+AC_DEFUN(AC_GCC_CONFIG2, [
+AC_CACHE_CHECK([whether g++ requires -fhandle-exceptions],
+db_cv_gxx_except, [
+db_cv_gxx_except=no;
+if test "$GXX" = "yes"; then
+ GXX_VERSION=`${MAKEFILE_CXX} --version`
+ case ${GXX_VERSION} in
+ 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].*)
+ db_cv_gxx_except=yes;;
+ esac
+fi])
+if test "$db_cv_gxx_except" = "yes"; then
+ CXXFLAGS="$CXXFLAGS -fhandle-exceptions"
+fi])
diff --git a/bdb/dist/aclocal/libtool.ac b/bdb/dist/aclocal/libtool.ac
new file mode 100644
index 00000000000..e99faf15e4e
--- /dev/null
+++ b/bdb/dist/aclocal/libtool.ac
@@ -0,0 +1,3633 @@
+# libtool.m4 - Configure libtool for the host system. -*-Shell-script-*-
+## Copyright 1996, 1997, 1998, 1999, 2000, 2001
+## Free Software Foundation, Inc.
+## Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+##
+## As a special exception to the GNU General Public License, if you
+## distribute this file as part of a program that contains a
+## configuration script generated by Autoconf, you may include it under
+## the same distribution terms that you use for the rest of that program.
+
+# serial 46 AC_PROG_LIBTOOL
+
+AC_DEFUN([AC_PROG_LIBTOOL],
+[AC_REQUIRE([AC_LIBTOOL_SETUP])dnl
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ac_aux_dir/ltmain.sh"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+# Prevent multiple expansion
+define([AC_PROG_LIBTOOL], [])
+])
+
+AC_DEFUN([AC_LIBTOOL_SETUP],
+[AC_PREREQ(2.13)dnl
+AC_REQUIRE([AC_ENABLE_SHARED])dnl
+AC_REQUIRE([AC_ENABLE_STATIC])dnl
+AC_REQUIRE([AC_ENABLE_FAST_INSTALL])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_LD])dnl
+AC_REQUIRE([AC_PROG_LD_RELOAD_FLAG])dnl
+AC_REQUIRE([AC_PROG_NM])dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl
+AC_REQUIRE([AC_OBJEXT])dnl
+AC_REQUIRE([AC_EXEEXT])dnl
+dnl
+
+_LT_AC_PROG_ECHO_BACKSLASH
+# Only perform the check for file, if the check method requires it
+case $deplibs_check_method in
+file_magic*)
+ if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+ AC_PATH_MAGIC
+ fi
+ ;;
+esac
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+AC_CHECK_TOOL(STRIP, strip, :)
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no)
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+enable_win32_dll=yes, enable_win32_dll=no)
+
+AC_ARG_ENABLE(libtool-lock,
+ [ --disable-libtool-lock avoid locking (might break parallel builds)])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+*-*-irix6*)
+ # Find out which ABI we are using.
+ echo '[#]line __oline__ "configure"' > conftest.$ac_ext
+ if AC_TRY_EVAL(ac_compile); then
+ case `/usr/bin/file conftest.$ac_objext` in
+ *32-bit*)
+ LD="${LD-ld} -32"
+ ;;
+ *N32*)
+ LD="${LD-ld} -n32"
+ ;;
+ *64-bit*)
+ LD="${LD-ld} -64"
+ ;;
+ esac
+ fi
+ rm -rf conftest*
+ ;;
+
+*-*-sco3.2v5*)
+ # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -belf"
+ AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+ [AC_LANG_SAVE
+ AC_LANG_C
+ AC_TRY_LINK([],[],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+ AC_LANG_RESTORE])
+ if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+ # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+ CFLAGS="$SAVE_CFLAGS"
+ fi
+ ;;
+
+ifdef([AC_PROVIDE_AC_LIBTOOL_WIN32_DLL],
+[*-*-cygwin* | *-*-mingw* | *-*-pw32*)
+ AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+ AC_CHECK_TOOL(AS, as, false)
+ AC_CHECK_TOOL(OBJDUMP, objdump, false)
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one
+ AC_CACHE_CHECK([if libtool should supply DllMain function], lt_cv_need_dllmain,
+ [AC_TRY_LINK([],
+ [extern int __attribute__((__stdcall__)) DllMain(void*, int, void*);
+ DllMain (0, 0, 0);],
+ [lt_cv_need_dllmain=no],[lt_cv_need_dllmain=yes])])
+
+ case $host/$CC in
+ *-*-cygwin*/gcc*-mno-cygwin*|*-*-mingw*)
+ # old mingw systems require "-dll" to link a DLL, while more recent ones
+ # require "-mdll"
+ SAVE_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -mdll"
+ AC_CACHE_CHECK([how to link DLLs], lt_cv_cc_dll_switch,
+ [AC_TRY_LINK([], [], [lt_cv_cc_dll_switch=-mdll],[lt_cv_cc_dll_switch=-dll])])
+ CFLAGS="$SAVE_CFLAGS" ;;
+ *-*-cygwin* | *-*-pw32*)
+ # cygwin systems need to pass --dll to the linker, and not link
+ # crt.o which will require a WinMain@16 definition.
+ lt_cv_cc_dll_switch="-Wl,--dll -nostartfiles" ;;
+ esac
+ ;;
+ ])
+esac
+
+_LT_AC_LTCONFIG_HACK
+
+])
+
+# AC_LIBTOOL_HEADER_ASSERT
+# ------------------------
+AC_DEFUN([AC_LIBTOOL_HEADER_ASSERT],
+[AC_CACHE_CHECK([whether $CC supports assert without backlinking],
+ [lt_cv_func_assert_works],
+ [case $host in
+ *-*-solaris*)
+ if test "$GCC" = yes && test "$with_gnu_ld" != yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*) lt_cv_func_assert_works=no ;;
+ *) lt_cv_func_assert_works=yes ;;
+ esac
+ fi
+ ;;
+ esac])
+
+if test "x$lt_cv_func_assert_works" = xyes; then
+ AC_CHECK_HEADERS(assert.h)
+fi
+])# AC_LIBTOOL_HEADER_ASSERT
+
+# _LT_AC_CHECK_DLFCN
+# --------------------
+AC_DEFUN([_LT_AC_CHECK_DLFCN],
+[AC_CHECK_HEADERS(dlfcn.h)
+])# _LT_AC_CHECK_DLFCN
+
+# AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+# ---------------------------------
+AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE],
+[AC_REQUIRE([AC_CANONICAL_HOST])
+AC_REQUIRE([AC_PROG_NM])
+AC_REQUIRE([AC_OBJEXT])
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [dnl
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix. What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Transform the above into a raw symbol and a C symbol.
+symxfrm='\1 \2\3 \3'
+
+# Transform an extracted symbol line into a proper C declaration
+lt_cv_global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+ symcode='[[BCDT]]'
+ ;;
+cygwin* | mingw* | pw32*)
+ symcode='[[ABCDGISTW]]'
+ ;;
+hpux*) # Its linker distinguishes data from code symbols
+ lt_cv_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+ lt_cv_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (lt_ptr) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (lt_ptr) \&\2},/p'"
+ ;;
+irix*)
+ symcode='[[BCDEGRST]]'
+ ;;
+solaris* | sysv5*)
+ symcode='[[BDT]]'
+ ;;
+sysv4)
+ symcode='[[DFNSTU]]'
+ ;;
+esac
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $host_os in
+mingw*)
+ opt_cr=`echo 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+ ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
+ symcode='[[ABCDGISTW]]'
+fi
+
+# Try without a prefix undercore, then with it.
+for ac_symprfx in "" "_"; do
+
+ # Write the raw and C identifiers.
+lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*\($ac_symprfx\)$sympat$opt_cr$/$symxfrm/p'"
+
+ # Check to see that the pipe works correctly.
+ pipe_works=no
+ rm -f conftest*
+ cat > conftest.$ac_ext <<EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+EOF
+
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then
+ # Try sorting and uniquifying the output.
+ if sort "$nlist" | uniq > "$nlist"T; then
+ mv -f "$nlist"T "$nlist"
+ else
+ rm -f "$nlist"T
+ fi
+
+ # Make sure that we snagged all the symbols we need.
+ if egrep ' nm_test_var$' "$nlist" >/dev/null; then
+ if egrep ' nm_test_func$' "$nlist" >/dev/null; then
+ cat <<EOF > conftest.$ac_ext
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+ # Now generate the symbol file.
+ eval "$lt_cv_global_symbol_to_cdecl"' < "$nlist" >> conftest.$ac_ext'
+
+ cat <<EOF >> conftest.$ac_ext
+#if defined (__STDC__) && __STDC__
+# define lt_ptr void *
+#else
+# define lt_ptr char *
+# define const
+#endif
+
+/* The mapping between symbol names and symbols. */
+const struct {
+ const char *name;
+ lt_ptr address;
+}
+lt_preloaded_symbols[[]] =
+{
+EOF
+ sed "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (lt_ptr) \&\2},/" < "$nlist" >> conftest.$ac_ext
+ cat <<\EOF >> conftest.$ac_ext
+ {0, (lt_ptr) 0}
+};
+
+#ifdef __cplusplus
+}
+#endif
+EOF
+ # Now try linking the two files.
+ mv conftest.$ac_objext conftstm.$ac_objext
+ save_LIBS="$LIBS"
+ save_CFLAGS="$CFLAGS"
+ LIBS="conftstm.$ac_objext"
+ CFLAGS="$CFLAGS$no_builtin_flag"
+ if AC_TRY_EVAL(ac_link) && test -s conftest; then
+ pipe_works=yes
+ fi
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+ else
+ echo "cannot find nm_test_func in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot find nm_test_var in $nlist" >&AC_FD_CC
+ fi
+ else
+ echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AC_FD_CC
+ fi
+ else
+ echo "$progname: failed program was:" >&AC_FD_CC
+ cat conftest.$ac_ext >&5
+ fi
+ rm -f conftest* conftst*
+
+ # Do not use the global_symbol_pipe unless it works.
+ if test "$pipe_works" = yes; then
+ break
+ else
+ lt_cv_sys_global_symbol_pipe=
+ fi
+done
+])
+global_symbol_pipe="$lt_cv_sys_global_symbol_pipe"
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+ global_symbol_to_cdecl=
+ global_symbol_to_c_name_address=
+else
+ global_symbol_to_cdecl="$lt_cv_global_symbol_to_cdecl"
+ global_symbol_to_c_name_address="$lt_cv_global_symbol_to_c_name_address"
+fi
+if test -z "$global_symbol_pipe$global_symbol_to_cdec$global_symbol_to_c_name_address";
+then
+ AC_MSG_RESULT(failed)
+else
+ AC_MSG_RESULT(ok)
+fi
+]) # AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE
+
+# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+# ---------------------------------
+AC_DEFUN([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR],
+[# Find the correct PATH separator. Usually this is `:', but
+# DJGPP uses `;' like DOS.
+if test "X${PATH_SEPARATOR+set}" != Xset; then
+ UNAME=${UNAME-`uname 2>/dev/null`}
+ case X$UNAME in
+ *-DOS) lt_cv_sys_path_separator=';' ;;
+ *) lt_cv_sys_path_separator=':' ;;
+ esac
+ PATH_SEPARATOR=$lt_cv_sys_path_separator
+fi
+])# _LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# _LT_AC_PROG_ECHO_BACKSLASH
+# --------------------------
+# Add some code to the start of the generated configure script which
+# will find an echo command which doesn't interpret backslashes.
+AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH],
+[ifdef([AC_DIVERSION_NOTICE], [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)],
+ [AC_DIVERT_PUSH(NOTICE)])
+_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR
+
+# Check that we are running under the correct shell.
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+case X$ECHO in
+X*--fallback-echo)
+ # Remove one level of quotation (which was required for Make).
+ ECHO=`echo "$ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','`
+ ;;
+esac
+
+echo=${ECHO-echo}
+if test "X[$]1" = X--no-reexec; then
+ # Discard the --no-reexec flag, and continue.
+ shift
+elif test "X[$]1" = X--fallback-echo; then
+ # Avoid inline document here, it may be left over
+ :
+elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
+ # Yippee, $echo works!
+ :
+else
+ # Restart under the correct shell.
+ exec $SHELL "[$]0" --no-reexec ${1+"[$]@"}
+fi
+
+if test "X[$]1" = X--fallback-echo; then
+ # used as fallback echo
+ shift
+ cat <<EOF
+$*
+EOF
+ exit 0
+fi
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+if test -z "$ECHO"; then
+if test "X${echo_test_string+set}" != Xset; then
+# find a string as large as possible, as long as the shell can cope with it
+ for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
+ # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
+ if (echo_test_string="`eval $cmd`") 2>/dev/null &&
+ echo_test_string="`eval $cmd`" &&
+ (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null
+ then
+ break
+ fi
+ done
+fi
+
+if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ :
+else
+ # The Solaris, AIX, and Digital Unix default echo programs unquote
+ # backslashes. This makes it impossible to quote backslashes using
+ # echo "$something" | sed 's/\\/\\\\/g'
+ #
+ # So, first we look for a working echo in the user's PATH.
+
+ IFS="${IFS= }"; save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for dir in $PATH /usr/ucb; do
+ if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
+ test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$dir/echo"
+ break
+ fi
+ done
+ IFS="$save_ifs"
+
+ if test "X$echo" = Xecho; then
+ # We didn't find a better echo, so look for alternatives.
+ if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`(print -r "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # This shell has a builtin print -r that does the trick.
+ echo='print -r'
+ elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
+ test "X$CONFIG_SHELL" != X/bin/ksh; then
+ # If we have ksh, try running configure again with it.
+ ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
+ export ORIGINAL_CONFIG_SHELL
+ CONFIG_SHELL=/bin/ksh
+ export CONFIG_SHELL
+ exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"}
+ else
+ # Try using printf.
+ echo='printf %s\n'
+ if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
+ echo_testing_string=`($echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ # Cool, printf works
+ :
+ elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
+ export CONFIG_SHELL
+ SHELL="$CONFIG_SHELL"
+ export SHELL
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
+ test "X$echo_testing_string" = 'X\t' &&
+ echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
+ test "X$echo_testing_string" = "X$echo_test_string"; then
+ echo="$CONFIG_SHELL [$]0 --fallback-echo"
+ else
+ # maybe with a smaller string...
+ prev=:
+
+ for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do
+ if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null
+ then
+ break
+ fi
+ prev="$cmd"
+ done
+
+ if test "$prev" != 'sed 50q "[$]0"'; then
+ echo_test_string=`eval $prev`
+ export echo_test_string
+ exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"}
+ else
+ # Oops. We lost completely, so just stick with echo.
+ echo=echo
+ fi
+ fi
+ fi
+ fi
+fi
+fi
+
+# Copy echo and quote the copy suitably for passing to libtool from
+# the Makefile, instead of quoting the original, which is used later.
+ECHO=$echo
+if test "X$ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then
+ ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo"
+fi
+
+AC_SUBST(ECHO)
+AC_DIVERT_POP
+])# _LT_AC_PROG_ECHO_BACKSLASH
+
+# _LT_AC_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ------------------------------------------------------------------
+AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF],
+[if test "$cross_compiling" = yes; then :
+ [$4]
+else
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+ lt_status=$lt_dlunknown
+ cat > conftest.$ac_ext <<EOF
+[#line __oline__ "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+# define LT_DLGLOBAL RTLD_GLOBAL
+#else
+# ifdef DL_GLOBAL
+# define LT_DLGLOBAL DL_GLOBAL
+# else
+# define LT_DLGLOBAL 0
+# endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+ find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+# ifdef RTLD_LAZY
+# define LT_DLLAZY_OR_NOW RTLD_LAZY
+# else
+# ifdef DL_LAZY
+# define LT_DLLAZY_OR_NOW DL_LAZY
+# else
+# ifdef RTLD_NOW
+# define LT_DLLAZY_OR_NOW RTLD_NOW
+# else
+# ifdef DL_NOW
+# define LT_DLLAZY_OR_NOW DL_NOW
+# else
+# define LT_DLLAZY_OR_NOW 0
+# endif
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" void exit (int);
+#endif
+
+void fnord() { int i=42;}
+int main ()
+{
+ void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+ int status = $lt_dlunknown;
+
+ if (self)
+ {
+ if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
+ else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ /* dlclose (self); */
+ }
+
+ exit (status);
+}]
+EOF
+ if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+ (./conftest; exit; ) 2>/dev/null
+ lt_status=$?
+ case x$lt_status in
+ x$lt_dlno_uscore) $1 ;;
+ x$lt_dlneed_uscore) $2 ;;
+ x$lt_unknown|x*) $3 ;;
+ esac
+ else :
+ # compilation failed
+ $3
+ fi
+fi
+rm -fr conftest*
+])# _LT_AC_TRY_DLOPEN_SELF
+
+# AC_LIBTOOL_DLOPEN_SELF
+# -------------------
+AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF],
+[if test "x$enable_dlopen" != xyes; then
+ enable_dlopen=unknown
+ enable_dlopen_self=unknown
+ enable_dlopen_self_static=unknown
+else
+ lt_cv_dlopen=no
+ lt_cv_dlopen_libs=
+
+ case $host_os in
+ beos*)
+ lt_cv_dlopen="load_add_on"
+ lt_cv_dlopen_libs=
+ lt_cv_dlopen_self=yes
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ lt_cv_dlopen="LoadLibrary"
+ lt_cv_dlopen_libs=
+ ;;
+
+ *)
+ AC_CHECK_FUNC([shl_load],
+ [lt_cv_dlopen="shl_load"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"],
+ [AC_CHECK_FUNC([dlopen],
+ [lt_cv_dlopen="dlopen"],
+ [AC_CHECK_LIB([dl], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+ [AC_CHECK_LIB([dld], [dld_link],
+ [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"])
+ ])
+ ])
+ ])
+ ])
+ ])
+ ;;
+ esac
+
+ if test "x$lt_cv_dlopen" != xno; then
+ enable_dlopen=yes
+ else
+ enable_dlopen=no
+ fi
+
+ case $lt_cv_dlopen in
+ dlopen)
+ save_CPPFLAGS="$CPPFLAGS"
+ AC_REQUIRE([_LT_AC_CHECK_DLFCN])dnl
+ test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+ save_LDFLAGS="$LDFLAGS"
+ eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+ save_LIBS="$LIBS"
+ LIBS="$lt_cv_dlopen_libs $LIBS"
+
+ AC_CACHE_CHECK([whether a program can dlopen itself],
+ lt_cv_dlopen_self, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+ lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+ ])
+
+ if test "x$lt_cv_dlopen_self" = xyes; then
+ LDFLAGS="$LDFLAGS $link_static_flag"
+ AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+ lt_cv_dlopen_self_static, [dnl
+ _LT_AC_TRY_DLOPEN_SELF(
+ lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+ lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross)
+ ])
+ fi
+
+ CPPFLAGS="$save_CPPFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+ ;;
+ esac
+
+ case $lt_cv_dlopen_self in
+ yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+ *) enable_dlopen_self=unknown ;;
+ esac
+
+ case $lt_cv_dlopen_self_static in
+ yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+ *) enable_dlopen_self_static=unknown ;;
+ esac
+fi
+])# AC_LIBTOOL_DLOPEN_SELF
+
+AC_DEFUN([_LT_AC_LTCONFIG_HACK],
+[AC_REQUIRE([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])dnl
+# Sed substitution that helps us do robust quoting. It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed='sed -e s/^X//'
+sed_quote_subst='s/\([[\\"\\`$\\\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([[\\"\\`\\\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Constants:
+rm="rm -f"
+
+# Global variables:
+default_ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except M$VC,
+# which needs '.lib').
+libext=a
+ltmain="$ac_aux_dir/ltmain.sh"
+ofile="$default_ofile"
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+need_locks="$enable_libtool_lock"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$AR" && AR=ar
+test -z "$AR_FLAGS" && AR_FLAGS=cru
+test -z "$AS" && AS=as
+test -z "$CC" && CC=cc
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+test -z "$LD" && LD=ld
+test -z "$LN_S" && LN_S="ln -s"
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+test -z "$NM" && NM=nm
+test -z "$OBJDUMP" && OBJDUMP=objdump
+test -z "$RANLIB" && RANLIB=:
+test -z "$STRIP" && STRIP=:
+test -z "$ac_objext" && ac_objext=o
+
+if test x"$host" != x"$build"; then
+ ac_tool_prefix=${host_alias}-
+else
+ ac_tool_prefix=
+fi
+
+# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
+case $host_os in
+linux-gnu*) ;;
+linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
+esac
+
+case $host_os in
+aix3*)
+ # AIX sometimes has problems with the GCC collect2 program. For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+ if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+ fi
+ ;;
+esac
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+ case $host_os in
+ openbsd*)
+ old_postinstall_cmds="\$RANLIB -t \$oldlib~$old_postinstall_cmds"
+ ;;
+ *)
+ old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
+ ;;
+ esac
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+fi
+
+# Allow CC to be a program name with arguments.
+set dummy $CC
+compiler="[$]2"
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([for objdir])
+rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+ objdir=.libs
+else
+ # MS-DOS does not allow filenames that begin with a dot.
+ objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+AC_MSG_RESULT($objdir)
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+AC_ARG_WITH(pic,
+[ --with-pic try to use only PIC/non-PIC objects [default=use both]],
+pic_mode="$withval", pic_mode=default)
+test -z "$pic_mode" && pic_mode=default
+
+# We assume here that the value for lt_cv_prog_cc_pic will not be cached
+# in isolation, and that seeing it set (from the cache) indicates that
+# the associated values are set (in the cache) correctly too.
+AC_MSG_CHECKING([for $compiler option to produce PIC])
+AC_CACHE_VAL(lt_cv_prog_cc_pic,
+[ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_shlib=
+ lt_cv_prog_cc_wl=
+ lt_cv_prog_cc_static=
+ lt_cv_prog_cc_no_builtin=
+ lt_cv_prog_cc_can_build_shared=$can_build_shared
+
+ if test "$GCC" = yes; then
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-static'
+
+ case $host_os in
+ aix*)
+ # Below there is a dirty hack to force normal static linking with -ldl
+ # The problem is because libdl dynamically linked with both libc and
+ # libC (AIX C++ library), which obviously doesn't included in libraries
+ # list by gcc. This cause undefined symbols with -static flags.
+ # This hack allows C programs to be linked with "-static -ldl", but
+ # not sure about C++ programs.
+ lt_cv_prog_cc_static="$lt_cv_prog_cc_static ${lt_cv_prog_cc_wl}-lC"
+ ;;
+ amigaos*)
+ # FIXME: we need at least 68020 code to build shared libraries, but
+ # adding the `-m68020' flag to GCC prevents building anything better,
+ # like `-m68040'.
+ lt_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
+ ;;
+ beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
+ # PIC is the default for these OSes.
+ ;;
+ darwin* | rhapsody*)
+ # PIC is the default on this platform
+ # Common symbols not allowed in MH_DYLIB files
+ lt_cv_prog_cc_pic='-fno-common'
+ ;;
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ lt_cv_prog_cc_pic=-Kconform_pic
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic='-fPIC'
+ ;;
+ esac
+ else
+ # PORTME Check for PIC flags for the system compiler.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ lt_cv_prog_cc_wl='-Wl,'
+ # All AIX code is PIC.
+ if test "$host_cpu" = ia64; then
+ # AIX 5 now supports IA64 processor
+ lt_cv_prog_cc_static='-Bstatic'
+ else
+ lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
+ fi
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ # Is there a better lt_cv_prog_cc_static that works with the bundled CC?
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static="${lt_cv_prog_cc_wl}-a ${lt_cv_prog_cc_wl}archive"
+ lt_cv_prog_cc_pic='+Z'
+ ;;
+
+ irix5* | irix6*)
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ # PIC (with -KPIC) is the default.
+ ;;
+
+ cygwin* | mingw* | pw32* | os2*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ lt_cv_prog_cc_pic='-DDLL_EXPORT'
+ ;;
+
+ newsos6)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ osf3* | osf4* | osf5*)
+ # All OSF/1 code is PIC.
+ lt_cv_prog_cc_wl='-Wl,'
+ lt_cv_prog_cc_static='-non_shared'
+ ;;
+
+ sco3.2v5*)
+ lt_cv_prog_cc_pic='-Kpic'
+ lt_cv_prog_cc_static='-dn'
+ lt_cv_prog_cc_shlib='-belf'
+ ;;
+
+ solaris*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Wl,'
+ ;;
+
+ sunos4*)
+ lt_cv_prog_cc_pic='-PIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ lt_cv_prog_cc_wl='-Qoption ld '
+ ;;
+
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ lt_cv_prog_cc_pic='-KPIC'
+ lt_cv_prog_cc_static='-Bstatic'
+ if test "x$host_vendor" = xsni; then
+ lt_cv_prog_cc_wl='-LD'
+ else
+ lt_cv_prog_cc_wl='-Wl,'
+ fi
+ ;;
+
+ uts4*)
+ lt_cv_prog_cc_pic='-pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec ;then
+ lt_cv_prog_cc_pic='-Kconform_pic'
+ lt_cv_prog_cc_static='-Bstatic'
+ fi
+ ;;
+
+ *)
+ lt_cv_prog_cc_can_build_shared=no
+ ;;
+ esac
+ fi
+])
+if test -z "$lt_cv_prog_cc_pic"; then
+ AC_MSG_RESULT([none])
+else
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic])
+
+ # Check to make sure the pic_flag actually works.
+ AC_MSG_CHECKING([if $compiler PIC flag $lt_cv_prog_cc_pic works])
+ AC_CACHE_VAL(lt_cv_prog_cc_pic_works, [dnl
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $lt_cv_prog_cc_pic -DPIC"
+ AC_TRY_COMPILE([], [], [dnl
+ case $host_os in
+ hpux9* | hpux10* | hpux11*)
+ # On HP-UX, both CC and GCC only warn that PIC is supported... then
+ # they create non-PIC objects. So, if there were any warnings, we
+ # assume that PIC is not supported.
+ if test -s conftest.err; then
+ lt_cv_prog_cc_pic_works=no
+ else
+ lt_cv_prog_cc_pic_works=yes
+ fi
+ ;;
+ *)
+ lt_cv_prog_cc_pic_works=yes
+ ;;
+ esac
+ ], [dnl
+ lt_cv_prog_cc_pic_works=no
+ ])
+ CFLAGS="$save_CFLAGS"
+ ])
+
+ if test "X$lt_cv_prog_cc_pic_works" = Xno; then
+ lt_cv_prog_cc_pic=
+ lt_cv_prog_cc_can_build_shared=no
+ else
+ lt_cv_prog_cc_pic=" $lt_cv_prog_cc_pic"
+ fi
+
+ AC_MSG_RESULT([$lt_cv_prog_cc_pic_works])
+fi
+##
+## END FIXME
+
+# Check for any special shared library compilation flags.
+if test -n "$lt_cv_prog_cc_shlib"; then
+ AC_MSG_WARN([\`$CC' requires \`$lt_cv_prog_cc_shlib' to build shared libraries])
+ if echo "$old_CC $old_CFLAGS " | egrep -e "[[ ]]$lt_cv_prog_cc_shlib[[ ]]" >/dev/null; then :
+ else
+ AC_MSG_WARN([add \`$lt_cv_prog_cc_shlib' to the CC or CFLAGS env variable and reconfigure])
+ lt_cv_prog_cc_can_build_shared=no
+ fi
+fi
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([if $compiler static flag $lt_cv_prog_cc_static works])
+AC_CACHE_VAL([lt_cv_prog_cc_static_works], [dnl
+ lt_cv_prog_cc_static_works=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $lt_cv_prog_cc_static"
+ AC_TRY_LINK([], [], [lt_cv_prog_cc_static_works=yes])
+ LDFLAGS="$save_LDFLAGS"
+])
+
+# Belt *and* braces to stop my trousers falling down:
+test "X$lt_cv_prog_cc_static_works" = Xno && lt_cv_prog_cc_static=
+AC_MSG_RESULT([$lt_cv_prog_cc_static_works])
+
+pic_flag="$lt_cv_prog_cc_pic"
+special_shlib_compile_flags="$lt_cv_prog_cc_shlib"
+wl="$lt_cv_prog_cc_wl"
+link_static_flag="$lt_cv_prog_cc_static"
+no_builtin_flag="$lt_cv_prog_cc_no_builtin"
+can_build_shared="$lt_cv_prog_cc_can_build_shared"
+##
+## END FIXME
+
+
+## FIXME: this should be a separate macro
+##
+# Check to see if options -o and -c are simultaneously supported by compiler
+AC_MSG_CHECKING([if $compiler supports -c -o file.$ac_objext])
+AC_CACHE_VAL([lt_cv_compiler_c_o], [
+$rm -r conftest 2>/dev/null
+mkdir conftest
+cd conftest
+echo "int some_variable = 0;" > conftest.$ac_ext
+mkdir out
+# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
+# that will create temporary files in the current directory regardless of
+# the output directory. Thus, making CWD read-only will cause this test
+# to fail, enabling locking or at least warning the user not to do parallel
+# builds.
+chmod -w .
+save_CFLAGS="$CFLAGS"
+CFLAGS="$CFLAGS -o out/conftest2.$ac_objext"
+compiler_c_o=no
+if { (eval echo configure:__oline__: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.$ac_objext; then
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s out/conftest.err; then
+ lt_cv_compiler_c_o=no
+ else
+ lt_cv_compiler_c_o=yes
+ fi
+else
+ # Append any errors to the config.log.
+ cat out/conftest.err 1>&AC_FD_CC
+ lt_cv_compiler_c_o=no
+fi
+CFLAGS="$save_CFLAGS"
+chmod u+w .
+$rm conftest* out/*
+rmdir out
+cd ..
+rmdir conftest
+$rm -r conftest 2>/dev/null
+])
+compiler_c_o=$lt_cv_compiler_c_o
+AC_MSG_RESULT([$compiler_c_o])
+
+if test x"$compiler_c_o" = x"yes"; then
+ # Check to see if we can write to a .lo
+ AC_MSG_CHECKING([if $compiler supports -c -o file.lo])
+ AC_CACHE_VAL([lt_cv_compiler_o_lo], [
+ lt_cv_compiler_o_lo=no
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -c -o conftest.lo"
+ save_objext="$ac_objext"
+ ac_objext=lo
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ lt_cv_compiler_o_lo=no
+ else
+ lt_cv_compiler_o_lo=yes
+ fi
+ ])
+ ac_objext="$save_objext"
+ CFLAGS="$save_CFLAGS"
+ ])
+ compiler_o_lo=$lt_cv_compiler_o_lo
+ AC_MSG_RESULT([$compiler_o_lo])
+else
+ compiler_o_lo=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check to see if we can do hard links to lock some files if needed
+hard_links="nottested"
+if test "$compiler_c_o" = no && test "$need_locks" != no; then
+ # do not overwrite the value of need_locks provided by the user
+ AC_MSG_CHECKING([if we can lock with hard links])
+ hard_links=yes
+ $rm conftest*
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ touch conftest.a
+ ln conftest.a conftest.b 2>&5 || hard_links=no
+ ln conftest.a conftest.b 2>/dev/null && hard_links=no
+ AC_MSG_RESULT([$hard_links])
+ if test "$hard_links" = no; then
+ AC_MSG_WARN([\`$CC' does not support \`-c -o', so \`make -j' may be unsafe])
+ need_locks=warn
+ fi
+else
+ need_locks=no
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+if test "$GCC" = yes; then
+ # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
+ AC_MSG_CHECKING([if $compiler supports -fno-rtti -fno-exceptions])
+ echo "int some_variable = 0;" > conftest.$ac_ext
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.$ac_ext"
+ compiler_rtti_exceptions=no
+ AC_TRY_COMPILE([], [int some_variable = 0;], [dnl
+ # The compiler can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ compiler_rtti_exceptions=no
+ else
+ compiler_rtti_exceptions=yes
+ fi
+ ])
+ CFLAGS="$save_CFLAGS"
+ AC_MSG_RESULT([$compiler_rtti_exceptions])
+
+ if test "$compiler_rtti_exceptions" = "yes"; then
+ no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
+ else
+ no_builtin_flag=' -fno-builtin'
+ fi
+fi
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# See if the linker supports building shared libraries.
+AC_MSG_CHECKING([whether the linker ($LD) supports shared libraries])
+
+allow_undefined_flag=
+no_undefined_flag=
+need_lib_prefix=unknown
+need_version=unknown
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+archive_cmds=
+archive_expsym_cmds=
+old_archive_from_new_cmds=
+old_archive_from_expsyms_cmds=
+export_dynamic_flag_spec=
+whole_archive_flag_spec=
+thread_safe_flag_spec=
+hardcode_into_libs=no
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+hardcode_shlibpath_var=unsupported
+runpath_var=
+link_all_deplibs=unknown
+always_export_symbols=no
+export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
+# include_expsyms should be a list of space-separated symbols to be *always*
+# included in the symbol list
+include_expsyms=
+# exclude_expsyms can be an egrep regular expression of symbols to exclude
+# it will be wrapped by ` (' and `)$', so one must not match beginning or
+# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+# as well as any symbol that contains `d'.
+exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
+# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+# platforms (ab)use it in PIC code, but their linkers get confused if
+# the symbol is explicitly referenced. Since portable code cannot
+# rely on this symbol name, it's probably fine to never include it in
+# preloaded symbol tables.
+extract_expsyms_cmds=
+
+case $host_os in
+cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # If archive_cmds runs LD, not CC, wlarc should be empty
+ wlarc='${wl}'
+
+ # See if GNU ld supports shared libraries.
+ case $host_os in
+ aix3* | aix4* | aix5*)
+ # On AIX, the GNU linker is very broken
+ # Note:Check GNU linker on AIX 5-IA64 when/if it becomes available.
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support. If you
+*** really care for shared libraries, you may want to modify your PATH
+*** so that a non-GNU linker is found, and then restart.
+
+EOF
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we can use
+ # them.
+ ld_shlibs=no
+ ;;
+
+ beos*)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ allow_undefined_flag=unsupported
+ # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
+ # support --undefined. This deserves some investigation. FIXME
+ archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+
+ extract_expsyms_cmds='test -f $output_objdir/impgen.c || \
+ sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //;s/^# *$//; p; }" -e d < $''0 > $output_objdir/impgen.c~
+ test -f $output_objdir/impgen.exe || (cd $output_objdir && \
+ if test "x$HOST_CC" != "x" ; then $HOST_CC -o impgen impgen.c ; \
+ else $CC -o impgen impgen.c ; fi)~
+ $output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def'
+
+ old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib'
+
+ # cygwin and mingw dlls have different entry points and sets of symbols
+ # to exclude.
+ # FIXME: what about values for MSVC?
+ dll_entry=__cygwin_dll_entry@12
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~
+ case $host_os in
+ mingw*)
+ # mingw values
+ dll_entry=_DllMainCRTStartup@12
+ dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~
+ ;;
+ esac
+
+ # mingw and cygwin differ, and it's simplest to just exclude the union
+ # of the two symbol sets.
+ dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12
+
+ # recent cygwin and mingw systems supply a stub DllMain which the user
+ # can override, but on older systems we have to supply one (in ltdll.c)
+ if test "x$lt_cv_need_dllmain" = "xyes"; then
+ ltdll_obj='$output_objdir/$soname-ltdll.'"$ac_objext "
+ ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $''0 > $output_objdir/$soname-ltdll.c~
+ test -f $output_objdir/$soname-ltdll.$ac_objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~'
+ else
+ ltdll_obj=
+ ltdll_cmds=
+ fi
+
+ # Extract the symbol export list from an `--export-all' def file,
+ # then regenerate the def file from the symbol export list, so that
+ # the compiled dll only exports the symbol export list.
+ # Be careful not to strip the DATA tag left be newer dlltools.
+ export_symbols_cmds="$ltdll_cmds"'
+ $DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~
+ sed -e "1,/EXPORTS/d" -e "s/ @ [[0-9]]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols'
+
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is.
+ # If DATA tags from a recent dlltool are present, honour them!
+ archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname-def;
+ else
+ echo EXPORTS > $output_objdir/$soname-def;
+ _lt_hint=1;
+ cat $export_symbols | while read symbol; do
+ set dummy \$symbol;
+ case \[$]# in
+ 2) echo " \[$]2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;;
+ *) echo " \[$]2 @ \$_lt_hint \[$]3 ; " >> $output_objdir/$soname-def;;
+ esac;
+ _lt_hint=`expr 1 + \$_lt_hint`;
+ done;
+ fi~
+ '"$ltdll_cmds"'
+ $CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~
+ $CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
+ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~
+ $CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags'
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ wlarc=
+ else
+ archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ fi
+ ;;
+
+ solaris* | sysv5*)
+ if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ cat <<EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems. Therefore, libtool
+*** is disabling shared libraries support. We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer. Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+EOF
+ elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+
+ sunos4*)
+ archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ wlarc=
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+
+ if test "$ld_shlibs" = yes; then
+ runpath_var=LD_RUN_PATH
+ hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
+ export_dynamic_flag_spec='${wl}--export-dynamic'
+ case $host_os in
+ cygwin* | mingw* | pw32*)
+ # dlltool doesn't understand --whole-archive et. al.
+ whole_archive_flag_spec=
+ ;;
+ *)
+ # ancient GNU ld didn't support --whole-archive et. al.
+ if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
+ whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ else
+ whole_archive_flag_spec=
+ fi
+ ;;
+ esac
+ fi
+else
+ # PORTME fill in a description of your system's linker (not GNU ld)
+ case $host_os in
+ aix3*)
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes && test -z "$link_static_flag"; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ exp_sym_flag='-Bexport'
+ no_entry_flag=""
+ else
+ aix_use_runtimelinking=no
+
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ esac
+
+ exp_sym_flag='-bexport'
+ no_entry_flag='-bnoentry'
+ fi
+
+ # When large executables or shared objects are built, AIX ld can
+ # have problems creating the table of contents. If linking a library
+ # or program results in "error TOC overflow" add -mminimal-toc to
+ # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
+ # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+ hardcode_direct=yes
+ archive_cmds=''
+ hardcode_libdir_separator=':'
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # Added $aix_export variable to control use of exports file.
+ # For non-gcc, we don't use exports files, and rather trust
+ # the binder's -qmkshrobj option to export all the mangled
+ # symbols we need for C++ and java.
+
+ aix_export="\${wl}$exp_sym_flag:\$export_symbols"
+
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[[012]]|aix4.[[012]].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ # It fails to find uninstalled libraries when the uninstalled
+ # path is not listed in the libpath. Setting hardcode_minus_L
+ # to unsupported forces relinking
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ esac
+
+ shared_flag='-shared'
+ else
+ # not using gcc
+ if test "$host_cpu" = ia64; then
+ shared_flag='${wl}-G'
+ else
+ if test "$aix_use_runtimelinking" = yes; then
+ shared_flag='${wl}-G'
+ else
+ shared_flag='${wl}-bM:SRE'
+ fi
+ fi
+
+ # Test for -qmkshrobj and use it if it's available.
+ # It's superior for determining exportable symbols,
+ # especially for C++ or JNI libraries, which have
+ # mangled names.
+ #
+ AC_LANG_CONFTEST(void f(){})
+ if AC_TRY_EVAL(CC -c conftest.c) && AC_TRY_EVAL(CC -o conftest conftest.$ac_objext -qmkshrobj -lC_r); then
+ lt_cv_aix_mkshrobj=yes
+ else
+ lt_cv_aix_mkshrobj=no
+ fi
+
+ if test "$lt_cv_aix_mkshrobj" = yes; then
+ aix_export="-qmkshrobj"
+ fi
+ fi
+
+ # It seems that -bexpall can do strange things, so it is better to
+ # generate a list of symbols to export.
+ always_export_symbols=yes
+ if test "$aix_use_runtimelinking" = yes; then
+ # Warning - without using the other runtime loading flags (-brtl),
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='-berok'
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
+ archive_expsym_cmds="\$CC"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then echo "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$no_entry_flag $aix_export $shared_flag"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ allow_undefined_flag="-z nodefs"
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname ${wl}-h$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"
+ else
+ hardcode_libdir_flag_spec='${wl}-bnolibpath ${wl}-blibpath:$libdir:/usr/lib:/lib'
+ # Warning - without using the other run time loading flags,
+ # -berok will link without error, but may produce a broken library.
+ allow_undefined_flag='${wl}-berok'
+ # This is a bit strange, but is similar to how AIX traditionally builds
+ # it's shared libraries.
+ archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag $aix_export"' ~$AR -crlo $objdir/$libname$release.a $objdir/$soname'
+ fi
+ fi
+ ;;
+
+ amigaos*)
+ archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs'
+ fix_srcfile_path='`cygpath -w "$srcfile"`'
+ ;;
+
+ darwin* | rhapsody*)
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ allow_undefined_flag='-undefined suppress'
+ ;;
+ *) # Darwin 1.3 on
+ allow_undefined_flag='-flat_namespace -undefined suppress'
+ ;;
+ esac
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+
+ #### Local change for Sleepycat's Berkeley DB [#5664] [#6511]
+ case "$host_os" in
+ darwin[[12345]].*)
+ # removed double quotes in the following line:
+ archive_cmds='$nonopt $(test x$module = xyes && echo -bundle || echo -dynamiclib) $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -install_name $rpath/$soname $verstring'
+ ;;
+ *) # Darwin6.0 on (Mac OS/X Jaguar)
+ archive_cmds='$nonopt $allow_undefined_flag -o $lib $libobjs $deplibs$linker_flags -dynamiclib -install_name $rpath/$soname $verstring'
+ ;;
+ esac
+ #### End of changes for Sleepycat's Berkeley DB [#5664] [#6511]
+
+ # We need to add '_' to the symbols in $export_symbols first
+ #archive_expsym_cmds="$archive_cmds"' && strip -s $export_symbols'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ whole_archive_flag_spec='-all_load $convenience'
+ ;;
+
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+
+ # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+ # support. Future versions do this automatically, but an explicit c++rt0.o
+ # does not break anything, and helps significantly (at the cost of a little
+ # extra space).
+ freebsd2.2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+ freebsd2*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+ freebsd*)
+ archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ hpux9* | hpux10* | hpux11*)
+ case $host_os in
+ hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
+ *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;;
+ esac
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ hardcode_minus_L=yes # Not in the search PATH, but as the default
+ # location of the library.
+ export_dynamic_flag_spec='${wl}-E'
+ ;;
+
+ irix5* | irix6*)
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ link_all_deplibs=yes
+ ;;
+
+ netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
+ else
+ archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF
+ fi
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ newsos6)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_shlibpath_var=no
+ ;;
+
+ openbsd*)
+ hardcode_direct=yes
+ hardcode_shlibpath_var=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ export_dynamic_flag_spec='${wl}-E'
+ else
+ case "$host_os" in
+ openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+ archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ allow_undefined_flag=unsupported
+ archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+ ;;
+
+ osf3*)
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ fi
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+
+ osf4* | osf5*) # as osf3* with the addition of -msym flag
+ if test "$GCC" = yes; then
+ allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ allow_undefined_flag=' -expect_unresolved \*'
+ archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~
+ $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp'
+
+ #Both c and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+
+ sco3.2v5*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ export_dynamic_flag_spec='${wl}-Bexport'
+ ;;
+
+ solaris*)
+ # gcc --version < 3.0 without binutils cannot create self contained
+ # shared libraries reliably, requiring libgcc.a to resolve some of
+ # the object symbols generated in some cases. Libraries that use
+ # assert need libgcc.a to resolve __eprintf, for example. Linking
+ # a copy of libgcc.a into every shared library to guarantee resolving
+ # such symbols causes other problems: According to Tim Van Holder
+ # <tim.van.holder@pandora.be>, C++ libraries end up with a separate
+ # (to the application) exception stack for one thing.
+ no_undefined_flag=' -z defs'
+ if test "$GCC" = yes; then
+ case `$CC --version 2>/dev/null` in
+ [[12]].*)
+ cat <<EOF 1>&2
+
+*** Warning: Releases of GCC earlier than version 3.0 cannot reliably
+*** create self contained shared libraries on Solaris systems, without
+*** introducing a dependency on libgcc.a. Therefore, libtool is disabling
+*** -no-undefined support, which will at least allow you to build shared
+*** libraries. However, you may find that when you link such libraries
+*** into an application without using GCC, you have to manually add
+*** \`gcc --print-libgcc-file-name\` to the link command. We urge you to
+*** upgrade to a newer version of GCC. Another option is to rebuild your
+*** current GCC to use the GNU linker from GNU binutils 2.9.1 or newer.
+
+EOF
+ no_undefined_flag=
+ ;;
+ esac
+ fi
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_shlibpath_var=no
+ case $host_os in
+ solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+ *) # Supported since Solaris 2.6 (maybe 2.5.1?)
+ whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
+ esac
+ link_all_deplibs=yes
+ ;;
+
+ sunos4*)
+ if test "x$host_vendor" = xsequent; then
+ # Use $CC to link under sequent, because it throws in some extra .o
+ # files that make .init and .fini sections work.
+ archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+ fi
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4)
+ if test "x$host_vendor" = xsno; then
+ archive_cmds='$LD -G -Bsymbolic -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes # is this really true???
+ else
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4.3*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ export_dynamic_flag_spec='-Bexport'
+ ;;
+
+ sysv5*)
+ no_undefined_flag=' -z text'
+ # $CC -shared without GNU ld will not create a library from C++
+ # object files and a static libstdc++, better avoid it by now
+ archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
+ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
+ hardcode_libdir_flag_spec=
+ hardcode_shlibpath_var=no
+ runpath_var='LD_RUN_PATH'
+ ;;
+
+ uts4*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ dgux*)
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_shlibpath_var=no
+ ;;
+
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_shlibpath_var=no
+ runpath_var=LD_RUN_PATH
+ hardcode_runpath_var=yes
+ ld_shlibs=yes
+ fi
+ ;;
+
+ sysv4.2uw2*)
+ archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+ hardcode_direct=yes
+ hardcode_minus_L=no
+ hardcode_shlibpath_var=no
+ hardcode_runpath_var=yes
+ runpath_var=LD_RUN_PATH
+ ;;
+
+ sysv5uw7* | unixware7*)
+ no_undefined_flag='${wl}-z ${wl}text'
+ if test "$GCC" = yes; then
+ archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ else
+ archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ fi
+ runpath_var='LD_RUN_PATH'
+ hardcode_shlibpath_var=no
+ ;;
+
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+AC_MSG_RESULT([$ld_shlibs])
+test "$ld_shlibs" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Check hardcoding attributes.
+AC_MSG_CHECKING([how to hardcode library paths into programs])
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" || \
+ test -n "$runpath_var"; then
+
+ # We can hardcode non-existant directories.
+ if test "$hardcode_direct" != no &&
+ # If the only mechanism to avoid hardcoding is shlibpath_var, we
+ # have to relink, otherwise we might link with an installed library
+ # when we should be linking with a yet-to-be-installed one
+ ## test "$hardcode_shlibpath_var" != no &&
+ test "$hardcode_minus_L" != no; then
+ # Linking always hardcodes the temporary library directory.
+ hardcode_action=relink
+ else
+ # We can link without hardcoding, and we can hardcode nonexisting dirs.
+ hardcode_action=immediate
+ fi
+else
+ # We cannot hardcode anything, or else we can only hardcode existing
+ # directories.
+ hardcode_action=unsupported
+fi
+AC_MSG_RESULT([$hardcode_action])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | grep "GNU strip" >/dev/null; then
+ test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+ test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+ AC_MSG_RESULT([yes])
+else
+ AC_MSG_RESULT([no])
+fi
+##
+## END FIXME
+
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+## FIXME: this should be a separate macro
+##
+# PORTME Fill in your ld.so characteristics
+AC_MSG_CHECKING([dynamic linker characteristics])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+
+case $host_os in
+aix3*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix $libname.a'
+ shlibpath_var=LIBPATH
+
+ # AIX has no versioning support, so we append a major version to the name.
+ soname_spec='${libname}${release}.so$major'
+ ;;
+
+aix4* | aix5*)
+ version_type=linux
+
+ #### Local change for Sleepycat's Berkeley DB [#5779]:
+ # If we don't set need_version, we'll get x.so.0.0.0,
+ # even if -avoid-version is set.
+ need_version=no
+
+ if test "$host_cpu" = ia64; then
+ # AIX 5 supports IA64
+ library_names_spec='${libname}${release}.so$major ${libname}${release}.so$versuffix $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ else
+ # With GCC up to 2.95.x, collect2 would create an import file
+ # for dependence libraries. The import file would start with
+ # the line `#! .'. This would cause the generated library to
+ # depend on `.', always an invalid library. This was fixed in
+ # development snapshots of GCC prior to 3.0.
+ case $host_os in
+ aix4 | aix4.[[01]] | aix4.[[01]].*)
+ if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ echo ' yes '
+ echo '#endif'; } | ${CC} -E - | grep yes > /dev/null; then
+ :
+ else
+ can_build_shared=no
+ fi
+ ;;
+ esac
+ # AIX (on Power*) has no versioning support, so currently we can
+ # not hardcode correct soname into executable. Probably we can
+ # add versioning support to collect2, so additional links can
+ # be useful in future.
+ if test "$aix_use_runtimelinking" = yes; then
+ # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+ # instead of lib<name>.a to let people know that these are not
+ # typical AIX shared libraries.
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ else
+ # We preserve .a as extension for shared libraries through AIX4.2
+ # and later when we are not doing run time linking.
+ library_names_spec='${libname}${release}.a $libname.a'
+ soname_spec='${libname}${release}.so$major'
+ fi
+ shlibpath_var=LIBPATH
+ fi
+ ;;
+
+amigaos*)
+ library_names_spec='$libname.ixlibrary $libname.a'
+ # Create ${libname}_ixlibrary.a entries in /sys/libs.
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
+ ;;
+
+beos*)
+ library_names_spec='${libname}.so'
+ dynamic_linker="$host_os ld.so"
+ shlibpath_var=LIBRARY_PATH
+ ;;
+
+bsdi4*)
+ version_type=linux
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+ sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+ export_dynamic_flag_spec=-rdynamic
+ # the default ld.so.conf also contains /usr/contrib/lib and
+ # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+ # libtool to hard-code these into programs
+ ;;
+
+cygwin* | mingw* | pw32*)
+ version_type=windows
+ need_version=no
+ need_lib_prefix=no
+ case $GCC,$host_os in
+ yes,cygwin*)
+ library_names_spec='$libname.dll.a'
+ soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ postinstall_cmds='dlpath=`bash 2>&1 -c '\''. $dir/${file}i;echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog .libs/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`bash 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $rm \$dlpath'
+ ;;
+ yes,mingw*)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll'
+ sys_lib_search_path_spec=`$CC -print-search-dirs | grep "^libraries:" | sed -e "s/^libraries://" -e "s/;/ /g"`
+ ;;
+ yes,pw32*)
+ library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll'
+ ;;
+ *)
+ library_names_spec='${libname}`echo ${release} | sed -e 's/[[.]]/-/g'`${versuffix}.dll $libname.lib'
+ ;;
+ esac
+ dynamic_linker='Win32 ld.exe'
+ # FIXME: first we should search . and the directory the executable is in
+ shlibpath_var=PATH
+ ;;
+
+darwin* | rhapsody*)
+ dynamic_linker="$host_os dyld"
+ version_type=darwin
+ need_lib_prefix=no
+ need_version=no
+ # FIXME: Relying on posixy $() will cause problems for
+ # cross-compilation, but unfortunately the echo tests do not
+ # yet detect zsh echo's removal of \ escapes.
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ # added support for -jnimodule, encapsulated below in ${darwin_suffix}
+ darwin_suffix='$(test .$jnimodule = .yes && echo jnilib || (test .$module = .yes && echo so || echo dylib))'
+ library_names_spec='${libname}${release}${versuffix}.'"${darwin_suffix}"' ${libname}${release}${major}.'"${darwin_suffix}"' ${libname}.'"${darwin_suffix}"
+ soname_spec='${libname}${release}${major}.'"${darwin_suffix}"
+ shlibpath_overrides_runpath=yes
+ shlibpath_var=DYLD_LIBRARY_PATH
+ ;;
+
+freebsd1*)
+ dynamic_linker=no
+ ;;
+
+freebsd*)
+ objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
+ version_type=freebsd-$objformat
+ case $version_type in
+ freebsd-elf*)
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ need_version=no
+ need_lib_prefix=no
+ ;;
+ freebsd-*)
+ library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
+ need_version=yes
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_os in
+ freebsd2*)
+ shlibpath_overrides_runpath=yes
+ ;;
+ *)
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+ esac
+ ;;
+
+gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ hardcode_into_libs=yes
+ ;;
+
+hpux9* | hpux10* | hpux11*)
+ # Give a soname corresponding to the major version so that dld.sl refuses to
+ # link against other versions.
+ dynamic_linker="$host_os dld.sl"
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_var=SHLIB_PATH
+ shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+ library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
+ soname_spec='${libname}${release}.sl$major'
+ # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ postinstall_cmds='chmod 555 $lib'
+ ;;
+
+irix5* | irix6*)
+ version_type=irix
+ need_lib_prefix=no
+ need_version=no
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so $libname.so'
+ case $host_os in
+ irix5*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in # libtool.m4 will add one of these switches to LD
+ *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
+ *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
+ *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
+ *) libsuff= shlibsuff= libmagic=never-match;;
+ esac
+ ;;
+ esac
+ shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+ sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+ ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
+ dynamic_linker=no
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ # This implies no fast_install, which is unacceptable.
+ # Some rework will be needed to allow for fast_install
+ # before this can be enabled.
+ hardcode_into_libs=yes
+
+ # We used to test for /lib/ld.so.1 and disable shared libraries on
+ # powerpc, because MkLinux only supported shared libraries with the
+ # GNU dynamic linker. Since this was broken with cross compilers,
+ # most powerpc-linux boxes support dynamic linking these days and
+ # people can always --disable-shared, the test was removed, and we
+ # assume the GNU/Linux dynamic linker is in use.
+ dynamic_linker='GNU/Linux ld.so'
+ ;;
+
+netbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ dynamic_linker='NetBSD (a.out) ld.so'
+ else
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
+ soname_spec='${libname}${release}.so$major'
+ dynamic_linker='NetBSD ld.elf_so'
+ fi
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ ;;
+
+newsos6)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+nto-qnx)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ ;;
+
+openbsd*)
+ version_type=sunos
+ need_lib_prefix=no
+ need_version=no
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ case "$host_os" in
+ openbsd2.[[89]] | openbsd2.[[89]].*)
+ shlibpath_overrides_runpath=no
+ ;;
+ *)
+ shlibpath_overrides_runpath=yes
+ ;;
+ esac
+ else
+ shlibpath_overrides_runpath=yes
+ fi
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+os2*)
+ libname_spec='$name'
+ need_lib_prefix=no
+ library_names_spec='$libname.dll $libname.a'
+ dynamic_linker='OS/2 ld.exe'
+ shlibpath_var=LIBPATH
+ ;;
+
+osf3* | osf4* | osf5*)
+ version_type=osf
+ need_version=no
+ soname_spec='${libname}${release}.so'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+ sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+ ;;
+
+sco3.2v5*)
+ version_type=osf
+ soname_spec='${libname}${release}.so$major'
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+solaris*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ hardcode_into_libs=yes
+ # ldd complains unless libraries are executable
+ postinstall_cmds='chmod +x $lib'
+ ;;
+
+sunos4*)
+ version_type=sunos
+ library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
+ finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+ shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ if test "$with_gnu_ld" = yes; then
+ need_lib_prefix=no
+ fi
+ need_version=yes
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ case $host_vendor in
+ sni)
+ shlibpath_overrides_runpath=no
+ ;;
+ motorola)
+ need_lib_prefix=no
+ need_version=no
+ shlibpath_overrides_runpath=no
+ sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+ ;;
+ esac
+ ;;
+
+uts4*)
+ version_type=linux
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+dgux*)
+ version_type=linux
+ need_lib_prefix=no
+ need_version=no
+ library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
+ soname_spec='${libname}${release}.so$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ ;;
+
+sysv4*MP*)
+ if test -d /usr/nec ;then
+ version_type=linux
+ library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
+ soname_spec='$libname.so.$major'
+ shlibpath_var=LD_LIBRARY_PATH
+ fi
+ ;;
+
+*)
+ dynamic_linker=no
+ ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# Report the final consequences.
+AC_MSG_CHECKING([if libtool supports shared libraries])
+AC_MSG_RESULT([$can_build_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build shared libraries])
+test "$can_build_shared" = "no" && enable_shared=no
+
+# On AIX, shared libraries and static libraries use the same namespace, and
+# are all built from PIC.
+case "$host_os" in
+aix3*)
+ test "$enable_shared" = yes && enable_static=no
+ if test -n "$RANLIB"; then
+ archive_cmds="$archive_cmds~\$RANLIB \$lib"
+ postinstall_cmds='$RANLIB $lib'
+ fi
+ ;;
+
+aix4*)
+ if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+ test "$enable_shared" = yes && enable_static=no
+ fi
+ ;;
+esac
+AC_MSG_RESULT([$enable_shared])
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+AC_MSG_CHECKING([whether to build static libraries])
+# Make sure either enable_shared or enable_static is yes.
+test "$enable_shared" = yes || enable_static=yes
+AC_MSG_RESULT([$enable_static])
+##
+## END FIXME
+
+if test "$hardcode_action" = relink; then
+ # Fast installation is not supported
+ enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+ test "$enable_shared" = no; then
+ # Fast installation is not necessary
+ enable_fast_install=needless
+fi
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+ variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+AC_LIBTOOL_DLOPEN_SELF
+
+## FIXME: this should be a separate macro
+##
+if test "$enable_shared" = yes && test "$GCC" = yes; then
+ case $archive_cmds in
+ *'~'*)
+ # FIXME: we may have to deal with multi-command sequences.
+ ;;
+ '$CC '*)
+ # Test whether the compiler implicitly links with -lc since on some
+ # systems, -lgcc has to come before -lc. If gcc already passes -lc
+ # to ld, don't add -lc before -lgcc.
+ AC_MSG_CHECKING([whether -lc should be explicitly linked in])
+ AC_CACHE_VAL([lt_cv_archive_cmds_need_lc],
+ [$rm conftest*
+ echo 'static int dummy;' > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile); then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_cv_prog_cc_wl
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if AC_TRY_EVAL(archive_cmds 2\>\&1 \| grep \" -lc \" \>/dev/null 2\>\&1)
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi])
+ AC_MSG_RESULT([$lt_cv_archive_cmds_need_lc])
+ ;;
+ esac
+fi
+need_lc=${lt_cv_archive_cmds_need_lc-yes}
+##
+## END FIXME
+
+## FIXME: this should be a separate macro
+##
+# The second clause should only fire when bootstrapping the
+# libtool distribution, otherwise you forgot to ship ltmain.sh
+# with your package, and you will get complaints that there are
+# no rules to generate ltmain.sh.
+if test -f "$ltmain"; then
+ :
+else
+ # If there is no Makefile yet, we rely on a make rule to execute
+ # `config.status --recheck' to rerun these tests and create the
+ # libtool script then.
+ test -f Makefile && make "$ltmain"
+fi
+
+if test -f "$ltmain"; then
+ trap "$rm \"${ofile}T\"; exit 1" 1 2 15
+ $rm -f "${ofile}T"
+
+ echo creating $ofile
+
+ # Now quote all the things that may contain metacharacters while being
+ # careful not to overquote the AC_SUBSTed values. We take copies of the
+ # variables and quote the copies for generation of the libtool script.
+ for var in echo old_CC old_CFLAGS \
+ AR AR_FLAGS CC LD LN_S NM SHELL \
+ reload_flag reload_cmds wl \
+ pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
+ thread_safe_flag_spec whole_archive_flag_spec libname_spec \
+ library_names_spec soname_spec \
+ RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
+ old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds \
+ postuninstall_cmds extract_expsyms_cmds old_archive_from_expsyms_cmds \
+ old_striplib striplib file_magic_cmd export_symbols_cmds \
+ deplibs_check_method allow_undefined_flag no_undefined_flag \
+ finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
+ global_symbol_to_c_name_address \
+ hardcode_libdir_flag_spec hardcode_libdir_separator \
+ sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
+ compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
+
+ case $var in
+ reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
+ old_postinstall_cmds | old_postuninstall_cmds | \
+ export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
+ extract_expsyms_cmds | old_archive_from_expsyms_cmds | \
+ postinstall_cmds | postuninstall_cmds | \
+ finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
+ # Double-quote double-evaled strings.
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
+ ;;
+ *)
+ eval "lt_$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
+ ;;
+ esac
+ done
+
+ cat <<__EOF__ > "${ofile}T"
+#! $SHELL
+
+# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+# Copyright (C) 1996-2000 Free Software Foundation, Inc.
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="sed -e s/^X//"
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$need_lc
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# The host system.
+host_alias=$host_alias
+host=$host
+
+# An echo program that does not interpret backslashes.
+echo=$lt_echo
+
+# The archiver.
+AR=$lt_AR
+AR_FLAGS=$lt_AR_FLAGS
+
+# The default C compiler.
+CC=$lt_CC
+
+# Is the compiler the GNU C compiler?
+with_gcc=$GCC
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# Whether we need hard or soft links.
+LN_S=$lt_LN_S
+
+# A BSD-compatible nm program.
+NM=$lt_NM
+
+# A symbol stripping program
+STRIP=$STRIP
+
+# Used to examine libraries when file_magic_cmd begins "file"
+MAGIC_CMD=$MAGIC_CMD
+
+# Used on cygwin: DLL creation program.
+DLLTOOL="$DLLTOOL"
+
+# Used on cygwin: object dumper.
+OBJDUMP="$OBJDUMP"
+
+# Used on cygwin: assembler.
+AS="$AS"
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# How to pass a linker flag through the compiler.
+wl=$lt_wl
+
+# Object file suffix (normally "o").
+objext="$ac_objext"
+
+# Old archive suffix (normally "a").
+libext="$libext"
+
+# Executable file suffix (normally "").
+exeext="$exeext"
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_pic_flag
+pic_mode=$pic_mode
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_compiler_c_o
+
+# Can we write directly to a .lo ?
+compiler_o_lo=$lt_compiler_o_lo
+
+# Must we lock files when doing compilation ?
+need_locks=$lt_need_locks
+
+# Do we need the lib prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_link_static_flag
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_no_builtin_flag
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Compiler flag to generate thread-safe objects.
+thread_safe_flag_spec=$lt_thread_safe_flag_spec
+
+# Library versioning type.
+version_type=$version_type
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names. First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME.
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Commands used to build and install an old-style archive.
+RANLIB=$lt_RANLIB
+old_archive_cmds=$lt_old_archive_cmds
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build and install a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+postinstall_cmds=$lt_postinstall_cmds
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method == file_magic.
+file_magic_cmd=$lt_file_magic_cmd
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that forces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# Same as above, but a single script fragment to be evaled but not shown.
+finish_eval=$lt_finish_eval
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration
+global_symbol_to_cdecl=$lt_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair
+global_symbol_to_c_name_address=$lt_global_symbol_to_c_name_address
+
+# This is the shared library runtime path variable.
+runpath_var=$runpath_var
+
+# This is the shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
+# the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at relink time.
+variables_saved_for_relink="$variables_saved_for_relink"
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Compile-time system search path for libraries
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Fix the shell variable \$srcfile for the compiler.
+fix_srcfile_path="$fix_srcfile_path"
+
+# Set to yes if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# ### END LIBTOOL CONFIG
+
+__EOF__
+
+ case $host_os in
+ aix3*)
+ cat <<\EOF >> "${ofile}T"
+
+# AIX sometimes has problems with the GCC collect2 program. For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+ COLLECT_NAMES=
+ export COLLECT_NAMES
+fi
+EOF
+ ;;
+ esac
+
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ cat <<'EOF' >> "${ofile}T"
+ # This is a source program that is used to create dlls on Windows
+ # Don't remove nor modify the starting and closing comments
+# /* ltdll.c starts here */
+# #define WIN32_LEAN_AND_MEAN
+# #include <windows.h>
+# #undef WIN32_LEAN_AND_MEAN
+# #include <stdio.h>
+#
+# #ifndef __CYGWIN__
+# # ifdef __CYGWIN32__
+# # define __CYGWIN__ __CYGWIN32__
+# # endif
+# #endif
+#
+# #ifdef __cplusplus
+# extern "C" {
+# #endif
+# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
+# #ifdef __cplusplus
+# }
+# #endif
+#
+# #ifdef __CYGWIN__
+# #include <cygwin/cygwin_dll.h>
+# DECLARE_CYGWIN_DLL( DllMain );
+# #endif
+# HINSTANCE __hDllInstance_base;
+#
+# BOOL APIENTRY
+# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
+# {
+# __hDllInstance_base = hInst;
+# return TRUE;
+# }
+# /* ltdll.c ends here */
+ # This is a source program that is used to create import libraries
+ # on Windows for dlls which lack them. Don't remove nor modify the
+ # starting and closing comments
+# /* impgen.c starts here */
+# /* Copyright (C) 1999-2000 Free Software Foundation, Inc.
+#
+# This file is part of GNU libtool.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# */
+#
+# #include <stdio.h> /* for printf() */
+# #include <unistd.h> /* for open(), lseek(), read() */
+# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
+# #include <string.h> /* for strdup() */
+#
+# /* O_BINARY isn't required (or even defined sometimes) under Unix */
+# #ifndef O_BINARY
+# #define O_BINARY 0
+# #endif
+#
+# static unsigned int
+# pe_get16 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[2];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 2);
+# return b[0] + (b[1]<<8);
+# }
+#
+# static unsigned int
+# pe_get32 (fd, offset)
+# int fd;
+# int offset;
+# {
+# unsigned char b[4];
+# lseek (fd, offset, SEEK_SET);
+# read (fd, b, 4);
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# static unsigned int
+# pe_as32 (ptr)
+# void *ptr;
+# {
+# unsigned char *b = ptr;
+# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
+# }
+#
+# int
+# main (argc, argv)
+# int argc;
+# char *argv[];
+# {
+# int dll;
+# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
+# unsigned long export_rva, export_size, nsections, secptr, expptr;
+# unsigned long name_rvas, nexp;
+# unsigned char *expdata, *erva;
+# char *filename, *dll_name;
+#
+# filename = argv[1];
+#
+# dll = open(filename, O_RDONLY|O_BINARY);
+# if (dll < 1)
+# return 1;
+#
+# dll_name = filename;
+#
+# for (i=0; filename[i]; i++)
+# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
+# dll_name = filename + i +1;
+#
+# pe_header_offset = pe_get32 (dll, 0x3c);
+# opthdr_ofs = pe_header_offset + 4 + 20;
+# num_entries = pe_get32 (dll, opthdr_ofs + 92);
+#
+# if (num_entries < 1) /* no exports */
+# return 1;
+#
+# export_rva = pe_get32 (dll, opthdr_ofs + 96);
+# export_size = pe_get32 (dll, opthdr_ofs + 100);
+# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
+# secptr = (pe_header_offset + 4 + 20 +
+# pe_get16 (dll, pe_header_offset + 4 + 16));
+#
+# expptr = 0;
+# for (i = 0; i < nsections; i++)
+# {
+# char sname[8];
+# unsigned long secptr1 = secptr + 40 * i;
+# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
+# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
+# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
+# lseek(dll, secptr1, SEEK_SET);
+# read(dll, sname, 8);
+# if (vaddr <= export_rva && vaddr+vsize > export_rva)
+# {
+# expptr = fptr + (export_rva - vaddr);
+# if (export_rva + export_size > vaddr + vsize)
+# export_size = vsize - (export_rva - vaddr);
+# break;
+# }
+# }
+#
+# expdata = (unsigned char*)malloc(export_size);
+# lseek (dll, expptr, SEEK_SET);
+# read (dll, expdata, export_size);
+# erva = expdata - export_rva;
+#
+# nexp = pe_as32 (expdata+24);
+# name_rvas = pe_as32 (expdata+32);
+#
+# printf ("EXPORTS\n");
+# for (i = 0; i<nexp; i++)
+# {
+# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
+# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
+# }
+#
+# return 0;
+# }
+# /* impgen.c ends here */
+
+EOF
+ ;;
+ esac
+
+ # We use sed instead of cat because bash on DJGPP gets confused if
+ # if finds mixed CR/LF and LF-only lines. Since sed operates in
+ # text mode, it properly converts lines to CR/LF. This bash problem
+ # is reportedly fixed, but why not run on old versions too?
+ sed '$q' "$ltmain" >> "${ofile}T" || (rm -f "${ofile}T"; exit 1)
+
+ mv -f "${ofile}T" "$ofile" || \
+ (rm -f "$ofile" && cp "${ofile}T" "$ofile" && rm -f "${ofile}T")
+ chmod +x "$ofile"
+fi
+##
+## END FIXME
+
+])# _LT_AC_LTCONFIG_HACK
+
+# AC_LIBTOOL_DLOPEN - enable checks for dlopen support
+AC_DEFUN([AC_LIBTOOL_DLOPEN], [AC_BEFORE([$0],[AC_LIBTOOL_SETUP])])
+
+# AC_LIBTOOL_WIN32_DLL - declare package support for building win32 dll's
+AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_BEFORE([$0], [AC_LIBTOOL_SETUP])])
+
+# AC_ENABLE_SHARED - implement the --enable-shared flag
+# Usage: AC_ENABLE_SHARED[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_SHARED],
+[define([AC_ENABLE_SHARED_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(shared,
+changequote(<<, >>)dnl
+<< --enable-shared[=PKGS] build shared libraries [default=>>AC_ENABLE_SHARED_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_shared=yes ;;
+no) enable_shared=no ;;
+*)
+ enable_shared=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_shared=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_shared=AC_ENABLE_SHARED_DEFAULT)dnl
+])
+
+# AC_DISABLE_SHARED - set the default shared flag to --disable-shared
+AC_DEFUN([AC_DISABLE_SHARED],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_SHARED(no)])
+
+# AC_ENABLE_STATIC - implement the --enable-static flag
+# Usage: AC_ENABLE_STATIC[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_STATIC],
+[define([AC_ENABLE_STATIC_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(static,
+changequote(<<, >>)dnl
+<< --enable-static[=PKGS] build static libraries [default=>>AC_ENABLE_STATIC_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_static=yes ;;
+no) enable_static=no ;;
+*)
+ enable_static=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_static=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_static=AC_ENABLE_STATIC_DEFAULT)dnl
+])
+
+# AC_DISABLE_STATIC - set the default static flag to --disable-static
+AC_DEFUN([AC_DISABLE_STATIC],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_STATIC(no)])
+
+
+# AC_ENABLE_FAST_INSTALL - implement the --enable-fast-install flag
+# Usage: AC_ENABLE_FAST_INSTALL[(DEFAULT)]
+# Where DEFAULT is either `yes' or `no'. If omitted, it defaults to
+# `yes'.
+AC_DEFUN([AC_ENABLE_FAST_INSTALL],
+[define([AC_ENABLE_FAST_INSTALL_DEFAULT], ifelse($1, no, no, yes))dnl
+AC_ARG_ENABLE(fast-install,
+changequote(<<, >>)dnl
+<< --enable-fast-install[=PKGS] optimize for fast installation [default=>>AC_ENABLE_FAST_INSTALL_DEFAULT],
+changequote([, ])dnl
+[p=${PACKAGE-default}
+case $enableval in
+yes) enable_fast_install=yes ;;
+no) enable_fast_install=no ;;
+*)
+ enable_fast_install=no
+ # Look at the argument we got. We use all the common list separators.
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}:,"
+ for pkg in $enableval; do
+ if test "X$pkg" = "X$p"; then
+ enable_fast_install=yes
+ fi
+ done
+ IFS="$ac_save_ifs"
+ ;;
+esac],
+enable_fast_install=AC_ENABLE_FAST_INSTALL_DEFAULT)dnl
+])
+
+# AC_DISABLE_FAST_INSTALL - set the default to --disable-fast-install
+AC_DEFUN([AC_DISABLE_FAST_INSTALL],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+AC_ENABLE_FAST_INSTALL(no)])
+
+# AC_LIBTOOL_PICMODE - implement the --with-pic flag
+# Usage: AC_LIBTOOL_PICMODE[(MODE)]
+# Where MODE is either `yes' or `no'. If omitted, it defaults to
+# `both'.
+AC_DEFUN([AC_LIBTOOL_PICMODE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+pic_mode=ifelse($#,1,$1,default)])
+
+
+# AC_PATH_TOOL_PREFIX - find a file program which can recognise shared library
+AC_DEFUN([AC_PATH_TOOL_PREFIX],
+[AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+ /*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+ ;;
+ ?:/*)
+ lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a dos path.
+ ;;
+ *)
+ ac_save_MAGIC_CMD="$MAGIC_CMD"
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=":"
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word. This closes a longstanding sh security hole.
+ ac_dummy="ifelse([$2], , $PATH, [$2])"
+ for ac_dir in $ac_dummy; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f $ac_dir/$1; then
+ lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+ if test -n "$file_magic_test_file"; then
+ case $deplibs_check_method in
+ "file_magic "*)
+ file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
+ MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ egrep "$file_magic_regex" > /dev/null; then
+ :
+ else
+ cat <<EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such. This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem. Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool@gnu.org
+
+EOF
+ fi ;;
+ esac
+ fi
+ break
+ fi
+ done
+ IFS="$ac_save_ifs"
+ MAGIC_CMD="$ac_save_MAGIC_CMD"
+ ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+ AC_MSG_RESULT($MAGIC_CMD)
+else
+ AC_MSG_RESULT(no)
+fi
+])
+
+
+# AC_PATH_MAGIC - find a file program which can recognise a shared library
+AC_DEFUN([AC_PATH_MAGIC],
+[AC_REQUIRE([AC_CHECK_TOOL_PREFIX])dnl
+AC_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin:$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+ if test -n "$ac_tool_prefix"; then
+ AC_PATH_TOOL_PREFIX(file, /usr/bin:$PATH)
+ else
+ MAGIC_CMD=:
+ fi
+fi
+])
+
+
+# AC_PROG_LD - find the path to the GNU or non-GNU linker
+AC_DEFUN([AC_PROG_LD],
+[AC_ARG_WITH(gnu-ld,
+[ --with-gnu-ld assume the C compiler uses GNU ld [default=no]],
+test "$withval" = no || with_gnu_ld=yes, with_gnu_ld=no)
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+ac_prog=ld
+if test "$GCC" = yes; then
+ # Check if gcc -print-prog-name=ld gives a path.
+ AC_MSG_CHECKING([for ld used by GCC])
+ case $host in
+ *-*-mingw*)
+ # gcc leaves a trailing carriage return which upsets mingw
+ ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+ *)
+ ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+ esac
+ case $ac_prog in
+ # Accept absolute paths.
+ [[\\/]]* | [[A-Za-z]]:[[\\/]]*)
+ re_direlt='/[[^/]][[^/]]*/\.\./'
+ # Canonicalize the path of ld
+ ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
+ while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
+ ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
+ done
+ test -z "$LD" && LD="$ac_prog"
+ ;;
+ "")
+ # If it fails, then pretend we aren't using GCC.
+ ac_prog=ld
+ ;;
+ *)
+ # If it is relative, then search for the first ld in PATH.
+ with_gnu_ld=unknown
+ ;;
+ esac
+elif test "$with_gnu_ld" = yes; then
+ AC_MSG_CHECKING([for GNU ld])
+else
+ AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH; do
+ test -z "$ac_dir" && ac_dir=.
+ if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+ lt_cv_path_LD="$ac_dir/$ac_prog"
+ # Check to see if the program is GNU ld. I'd rather use --version,
+ # but apparently some GNU ld's only accept -v.
+ # Break only if it was the GNU/non-GNU ld that we prefer.
+ if "$lt_cv_path_LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
+ test "$with_gnu_ld" != no && break
+ else
+ test "$with_gnu_ld" != yes && break
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+else
+ lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+ AC_MSG_RESULT($LD)
+else
+ AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+AC_PROG_LD_GNU
+])
+
+# AC_PROG_LD_GNU -
+AC_DEFUN([AC_PROG_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU ld's only accept -v.
+if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
+ lt_cv_prog_gnu_ld=yes
+else
+ lt_cv_prog_gnu_ld=no
+fi])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])
+
+# AC_PROG_LD_RELOAD_FLAG - find reload flag for linker
+# -- PORTME Some linkers may need a different reload flag.
+AC_DEFUN([AC_PROG_LD_RELOAD_FLAG],
+[AC_CACHE_CHECK([for $LD option to reload object files], lt_cv_ld_reload_flag,
+[lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+test -n "$reload_flag" && reload_flag=" $reload_flag"
+])
+
+# AC_DEPLIBS_CHECK_METHOD - how to check for library dependencies
+# -- PORTME fill in with the dynamic library characteristics
+AC_DEFUN([AC_DEPLIBS_CHECK_METHOD],
+[AC_CACHE_CHECK([how to recognise dependant libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given egrep regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix4* | aix5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+beos*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+bsdi4*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ ;;
+
+cygwin* | mingw* | pw32*)
+ lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ lt_cv_file_magic_cmd='$OBJDUMP -f'
+ ;;
+
+darwin* | rhapsody*)
+ lt_cv_deplibs_check_method='file_magic Mach-O dynamically linked shared library'
+ lt_cv_file_magic_cmd='/usr/bin/file -L'
+ case "$host_os" in
+ rhapsody* | darwin1.[[012]])
+ lt_cv_file_magic_test_file=`echo /System/Library/Frameworks/System.framework/Versions/*/System | head -1`
+ ;;
+ *) # Darwin 1.3 on
+ lt_cv_file_magic_test_file='/usr/lib/libSystem.dylib'
+ ;;
+ esac
+ ;;
+
+freebsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ case $host_cpu in
+ i*86 )
+ # Not sure whether the presence of OpenBSD here was a mistake.
+ # Let's accept both of them until this is cleared up.
+ lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD)/i[[3-9]]86 (compact )?demand paged shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ ;;
+ esac
+ else
+ lt_cv_deplibs_check_method=pass_all
+ fi
+ ;;
+
+gnu*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+hpux10.20*|hpux11*)
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libc.sl
+ ;;
+
+irix5* | irix6*)
+ case $host_os in
+ irix5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 ") libmagic=32-bit;;
+ *-n32|*"-n32 ") libmagic=N32;;
+ *-64|*"-64 ") libmagic=64-bit;;
+ *) libmagic=never-match;;
+ esac
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method="file_magic ELF ${libmagic} MSB mips-[[1234]] dynamic lib MIPS - version 1"
+ ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib${libsuff}/libc.so*`
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+# This must be Linux ELF.
+linux-gnu*)
+ case $host_cpu in
+ alpha* | hppa* | i*86 | powerpc* | sparc* | ia64* )
+ lt_cv_deplibs_check_method=pass_all ;;
+ *)
+ # glibc up to 2.1.1 does not perform some relocations on ARM
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;;
+ esac
+ lt_cv_file_magic_test_file=`echo /lib/libc.so* /lib/libc-*.so`
+ ;;
+
+netbsd*)
+ if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so\.[[0-9]]+\.[[0-9]]+$'
+ else
+ lt_cv_deplibs_check_method='match_pattern /lib[[^/\.]]+\.so$'
+ fi
+ ;;
+
+newos6*)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=/usr/lib/libnls.so
+ ;;
+
+nto-qnx)
+ lt_cv_deplibs_check_method=unknown
+ ;;
+
+openbsd*)
+ lt_cv_file_magic_cmd=/usr/bin/file
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB shared object'
+ else
+ lt_cv_deplibs_check_method='file_magic OpenBSD.* shared library'
+ fi
+ ;;
+
+osf3* | osf4* | osf5*)
+ # this will be overridden with pass_all, but let us keep it just in case
+ lt_cv_deplibs_check_method='file_magic COFF format alpha shared library'
+ lt_cv_file_magic_test_file=/shlib/libc.so
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sco3.2v5*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+solaris*)
+ lt_cv_deplibs_check_method=pass_all
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+
+sysv5uw[[78]]* | sysv4*uw2*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
+sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ case $host_vendor in
+ motorola)
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+ lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+ ;;
+ ncr)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+ sequent)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+ ;;
+ sni)
+ lt_cv_file_magic_cmd='/bin/file'
+ lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+ lt_cv_file_magic_test_file=/lib/libc.so
+ ;;
+ esac
+ ;;
+esac
+])
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+])
+
+
+# AC_PROG_NM - find the path to a BSD-compatible name lister
+AC_DEFUN([AC_PROG_NM],
+[AC_REQUIRE([_LT_AC_LIBTOOL_SYS_PATH_SEPARATOR])dnl
+AC_MSG_CHECKING([for BSD-compatible nm])
+AC_CACHE_VAL(lt_cv_path_NM,
+[if test -n "$NM"; then
+ # Let the user override the test.
+ lt_cv_path_NM="$NM"
+else
+ IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+ for ac_dir in $PATH /usr/ccs/bin /usr/ucb /bin; do
+ test -z "$ac_dir" && ac_dir=.
+ tmp_nm=$ac_dir/${ac_tool_prefix}nm
+ if test -f $tmp_nm || test -f $tmp_nm$ac_exeext ; then
+ # Check to see if the nm accepts a BSD-compat flag.
+ # Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ # nm: unknown option "B" ignored
+ # Tru64's nm complains that /dev/null is an invalid object file
+ if ($tmp_nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep '(/dev/null|Invalid file or object type)' >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -B"
+ break
+ elif ($tmp_nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
+ lt_cv_path_NM="$tmp_nm -p"
+ break
+ else
+ lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+ continue # so that we can try to find one that supports BSD flags
+ fi
+ fi
+ done
+ IFS="$ac_save_ifs"
+ test -z "$lt_cv_path_NM" && lt_cv_path_NM=nm
+fi])
+NM="$lt_cv_path_NM"
+AC_MSG_RESULT([$NM])
+])
+
+# AC_CHECK_LIBM - check for math library
+AC_DEFUN([AC_CHECK_LIBM],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cygwin* | *-*-pw32*)
+ # These system don't have libm
+ ;;
+*-ncr-sysv4.3*)
+ AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+ AC_CHECK_LIB(m, main, LIBM="$LIBM -lm")
+ ;;
+*)
+ AC_CHECK_LIB(m, main, LIBM="-lm")
+ ;;
+esac
+])
+
+# AC_LIBLTDL_CONVENIENCE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl convenience library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-convenience to the
+# configure arguments. Note that LIBLTDL and INCLTDL are not
+# AC_SUBSTed, nor is AC_CONFIG_SUBDIRS called. If DIR is not
+# provided, it is assumed to be `libltdl'. LIBLTDL will be prefixed
+# with '${top_builddir}/' and INCLTDL will be prefixed with
+# '${top_srcdir}/' (note the single quotes!). If your package is not
+# flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+AC_DEFUN([AC_LIBLTDL_CONVENIENCE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ case $enable_ltdl_convenience in
+ no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;;
+ "") enable_ltdl_convenience=yes
+ ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;;
+ esac
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdlc.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+])
+
+# AC_LIBLTDL_INSTALLABLE[(dir)] - sets LIBLTDL to the link flags for
+# the libltdl installable library and INCLTDL to the include flags for
+# the libltdl header and adds --enable-ltdl-install to the configure
+# arguments. Note that LIBLTDL and INCLTDL are not AC_SUBSTed, nor is
+# AC_CONFIG_SUBDIRS called. If DIR is not provided and an installed
+# libltdl is not found, it is assumed to be `libltdl'. LIBLTDL will
+# be prefixed with '${top_builddir}/' and INCLTDL will be prefixed
+# with '${top_srcdir}/' (note the single quotes!). If your package is
+# not flat and you're not using automake, define top_builddir and
+# top_srcdir appropriately in the Makefiles.
+# In the future, this macro may have to be called after AC_PROG_LIBTOOL.
+AC_DEFUN([AC_LIBLTDL_INSTALLABLE],
+[AC_BEFORE([$0],[AC_LIBTOOL_SETUP])dnl
+ AC_CHECK_LIB(ltdl, main,
+ [test x"$enable_ltdl_install" != xyes && enable_ltdl_install=no],
+ [if test x"$enable_ltdl_install" = xno; then
+ AC_MSG_WARN([libltdl not installed, but installation disabled])
+ else
+ enable_ltdl_install=yes
+ fi
+ ])
+ if test x"$enable_ltdl_install" = x"yes"; then
+ ac_configure_args="$ac_configure_args --enable-ltdl-install"
+ LIBLTDL='${top_builddir}/'ifelse($#,1,[$1],['libltdl'])/libltdl.la
+ INCLTDL='-I${top_srcdir}/'ifelse($#,1,[$1],['libltdl'])
+ else
+ ac_configure_args="$ac_configure_args --enable-ltdl-install=no"
+ LIBLTDL="-lltdl"
+ INCLTDL=
+ fi
+])
+
+# old names
+AC_DEFUN([AM_PROG_LIBTOOL], [AC_PROG_LIBTOOL])
+AC_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AC_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AC_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+AC_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+AC_DEFUN([AM_PROG_LD], [AC_PROG_LD])
+AC_DEFUN([AM_PROG_NM], [AC_PROG_NM])
+
+# This is just to silence aclocal about the macro not being used
+ifelse([AC_DISABLE_FAST_INSTALL])
diff --git a/bdb/dist/aclocal/mutex.ac b/bdb/dist/aclocal/mutex.ac
new file mode 100644
index 00000000000..f3f5529c74f
--- /dev/null
+++ b/bdb/dist/aclocal/mutex.ac
@@ -0,0 +1,611 @@
+# $Id: mutex.ac,v 11.38 2002/07/25 20:07:52 sue Exp $
+
+# POSIX pthreads tests: inter-process safe and intra-process only.
+#
+# We need to run a test here, because the PTHREAD_PROCESS_SHARED flag compiles
+# fine on problematic systems, but won't actually work. This is a problem for
+# cross-compilation environments. I think inter-process mutexes are as likely
+# to fail in cross-compilation environments as real ones (especially since the
+# likely cross-compilation environment is Linux, where inter-process mutexes
+# don't currently work -- the latest estimate I've heard is Q1 2002, as part
+# of IBM's NGPT package). So:
+#
+# If checking for inter-process pthreads mutexes:
+# If it's local, run a test.
+# If it's a cross-compilation, fail.
+#
+# If the user specified pthreads mutexes and we're checking for intra-process
+# mutexes only:
+# If it's local, run a test.
+# If it's a cross-compilation, run a link-test.
+#
+# So, the thing you can't do here is configure for inter-process POSIX pthread
+# mutexes when cross-compiling. Since we're using the GNU/Cygnus toolchain for
+# cross-compilation, the target system is likely Linux or *BSD, so we're doing
+# the right thing.
+AC_DEFUN(AM_PTHREADS_SHARED, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],, [db_cv_mutex="no"])])
+AC_DEFUN(AM_PTHREADS_PRIVATE, [
+AC_TRY_RUN([
+#include <pthread.h>
+main() {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+}], [db_cv_mutex="$1"],,
+AC_TRY_LINK([
+#include <pthread.h>],[
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+ pthread_condattr_t condattr;
+ pthread_mutexattr_t mutexattr;
+ exit (
+ pthread_condattr_init(&condattr) ||
+ pthread_mutexattr_init(&mutexattr) ||
+ pthread_cond_init(&cond, &condattr) ||
+ pthread_mutex_init(&mutex, &mutexattr) ||
+ pthread_mutex_lock(&mutex) ||
+ pthread_mutex_unlock(&mutex) ||
+ pthread_mutex_destroy(&mutex) ||
+ pthread_cond_destroy(&cond) ||
+ pthread_condattr_destroy(&condattr) ||
+ pthread_mutexattr_destroy(&mutexattr));
+], [db_cv_mutex="$1"]))])
+
+# Figure out mutexes for this compiler/architecture.
+AC_DEFUN(AM_DEFINE_MUTEXES, [
+
+# Mutexes we don't test for, but want the #defines to exist for
+# other ports.
+AH_TEMPLATE(HAVE_MUTEX_VMS, [Define to 1 to use VMS mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_VXWORKS, [Define to 1 to use VxWorks mutexes.])
+AH_TEMPLATE(HAVE_MUTEX_WIN32, [Define to 1 to use Windows mutexes.])
+
+AC_CACHE_CHECK([for mutexes], db_cv_mutex, [
+db_cv_mutex=no
+
+orig_libs=$LIBS
+
+# User-specified POSIX or UI mutexes.
+#
+# There are two different reasons to specify mutexes: First, the application
+# is already using one type of mutex and doesn't want to mix-and-match (for
+# example, on Solaris, which has POSIX, UI and LWP mutexes). Second, the
+# applications POSIX pthreads mutexes don't support inter-process locking,
+# but the application wants to use them anyway (for example, current Linux
+# and *BSD systems).
+#
+# If we're on Solaris, we insist that -lthread or -lpthread be used. The
+# problem is the Solaris C library has UI/POSIX interface stubs, but they're
+# broken, configuring them for inter-process mutexes doesn't return an error,
+# but it doesn't work either. Otherwise, we try first without the library
+# and then with it: there's some information that SCO/UnixWare/OpenUNIX needs
+# this. [#4950]
+#
+# Test for LWP threads before testing for UI/POSIX threads, we prefer them
+# on Solaris. There's a bug in SunOS 5.7 where applications get pwrite, not
+# pwrite64, if they load the C library before the appropriate threads library,
+# e.g., tclsh using dlopen to load the DB library. By using LWP threads we
+# avoid answering lots of user questions, not to mention the bugs.
+if test "$db_cv_posixmutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="posix_library_only";;
+ *)
+ db_cv_mutex="posix_only";;
+ esac
+fi
+
+if test "$db_cv_uimutexes" = yes; then
+ case "$host_os" in
+ solaris*)
+ db_cv_mutex="ui_library_only";;
+ *)
+ db_cv_mutex="ui_only";;
+ esac
+fi
+
+# LWP threads: _lwp_XXX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ static lwp_mutex_t mi = SHAREDMUTEX;
+ static lwp_cond_t ci = SHAREDCV;
+ lwp_mutex_t mutex = mi;
+ lwp_cond_t cond = ci;
+ exit (
+ _lwp_mutex_lock(&mutex) ||
+ _lwp_mutex_unlock(&mutex));
+], [db_cv_mutex="Solaris/lwp"])
+fi
+
+# UI threads: thr_XXX
+#
+# Try with and without the -lthread library.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads"])
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+LIBS="$LIBS -lthread"
+AC_TRY_LINK([
+#include <thread.h>
+#include <synch.h>],[
+ mutex_t mutex;
+ cond_t cond;
+ int type = USYNC_PROCESS;
+ exit (
+ mutex_init(&mutex, type, NULL) ||
+ cond_init(&cond, type, NULL) ||
+ mutex_lock(&mutex) ||
+ mutex_unlock(&mutex));
+], [db_cv_mutex="UI/threads/library"])
+LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "ui_only" -o "$db_cv_mutex" = "ui_library_only"; then
+ AC_MSG_ERROR([unable to find UI mutex interfaces])
+fi
+
+# POSIX.1 pthreads: pthread_XXX
+#
+# Try with and without the -lpthread library. If the user specified we use
+# POSIX pthreads mutexes, and we fail to find the full interface, try and
+# configure for just intra-process support.
+if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_SHARED("POSIX/pthreads")
+fi
+if test "$db_cv_mutex" = no -o \
+ "$db_cv_mutex" = "posix_only" -o "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_SHARED("POSIX/pthreads/library")
+ LIBS="$orig_libs"
+fi
+if test "$db_cv_mutex" = "posix_only"; then
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/private")
+fi
+if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ LIBS="$LIBS -lpthread"
+ AM_PTHREADS_PRIVATE("POSIX/pthreads/library/private")
+ LIBS="$orig_libs"
+fi
+
+if test "$db_cv_mutex" = "posix_only" -o \
+ "$db_cv_mutex" = "posix_library_only"; then
+ AC_MSG_ERROR([unable to find POSIX 1003.1 mutex interfaces])
+fi
+
+# msemaphore: HPPA only
+# Try HPPA before general msem test, it needs special alignment.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/mman.h>],[
+#if defined(__hppa)
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HP/msem_init"])
+fi
+
+# msemaphore: AIX, OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <sys/mman.h>],[
+ typedef msemaphore tsl_t;
+ msemaphore x;
+ msem_init(&x, 0);
+ msem_lock(&x, 0);
+ msem_unlock(&x, 0);
+ exit(0);
+], [db_cv_mutex="UNIX/msem_init"])
+fi
+
+# ReliantUNIX
+if test "$db_cv_mutex" = no; then
+LIBS="$LIBS -lmproc"
+AC_TRY_LINK([
+#include <ulocks.h>],[
+ typedef spinlock_t tsl_t;
+ spinlock_t x;
+ initspin(&x, 1);
+ cspinlock(&x);
+ spinunlock(&x);
+], [db_cv_mutex="ReliantUNIX/initspin"])
+LIBS="$orig_libs"
+fi
+
+# SCO: UnixWare has threads in libthread, but OpenServer doesn't.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__USLC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="SCO/x86/cc-assembly"])
+fi
+
+# abilock_t: SGI
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <abi_mutex.h>],[
+ typedef abilock_t tsl_t;
+ abilock_t x;
+ init_lock(&x);
+ acquire_lock(&x);
+ release_lock(&x);
+], [db_cv_mutex="SGI/init_lock"])
+fi
+
+# sema_t: Solaris
+# The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
+# turn this test on, unless we find some other platform that uses the old
+# POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
+if test "$db_cv_mutex" = DOESNT_WORK; then
+AC_TRY_LINK([
+#include <synch.h>],[
+ typedef sema_t tsl_t;
+ sema_t x;
+ sema_init(&x, 1, USYNC_PROCESS, NULL);
+ sema_wait(&x);
+ sema_post(&x);
+], [db_cv_mutex="UNIX/sema_init"])
+fi
+
+# _lock_try/_lock_clear: Solaris
+# On Solaris systems without Pthread or UI mutex interfaces, DB uses the
+# undocumented _lock_try _lock_clear function calls instead of either the
+# sema_trywait(3T) or sema_wait(3T) function calls. This is because of
+# problems in those interfaces in some releases of the Solaris C library.
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/machlock.h>],[
+ typedef lock_t tsl_t;
+ lock_t x;
+ _lock_try(&x);
+ _lock_clear(&x);
+], [db_cv_mutex="Solaris/_lock_try"])
+fi
+
+# _check_lock/_clear_lock: AIX
+if test "$db_cv_mutex" = no; then
+AC_TRY_LINK([
+#include <sys/atomic_op.h>],[
+ int x;
+ _check_lock(&x,0,1);
+ _clear_lock(&x,0);
+], [db_cv_mutex="AIX/_check_lock"])
+fi
+
+# Alpha/gcc: OSF/1
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__alpha) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ALPHA/gcc-assembly"])
+fi
+
+# ARM/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__arm__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ARM/gcc-assembly"])
+fi
+
+# PaRisc/gcc: HP/UX
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__hppa) || defined(__hppa__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="HPPA/gcc-assembly"])
+fi
+
+# PPC/gcc:
+# Test for Apple first, it requires slightly different assembly.
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__) && defined(__APPLE__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_APPLE/gcc-assembly"])
+fi
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="PPC_GENERIC/gcc-assembly"])
+fi
+
+# Sparc/gcc: SunOS, Solaris
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__sparc__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="Sparc/gcc-assembly"])
+fi
+
+# 68K/gcc: SunOS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(mc68020) || defined(sun3)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="68K/gcc-assembly"])
+fi
+
+# x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if (defined(i386) || defined(__i386__)) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="x86/gcc-assembly"])
+fi
+
+# S390/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__s390__) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="S390/gcc-assembly"])
+fi
+
+# ia86/gcc: Linux
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(__ia64) && defined(__GNUC__)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="ia64/gcc-assembly"])
+fi
+
+# uts/cc: UTS
+if test "$db_cv_mutex" = no; then
+AC_TRY_COMPILE(,[
+#if defined(_UTS)
+ exit(0);
+#else
+ FAIL TO COMPILE/LINK
+#endif
+], [db_cv_mutex="UTS/cc-assembly"])
+fi
+
+# default to UNIX fcntl system call mutexes.
+if test "$db_cv_mutex" = no; then
+ db_cv_mutex="UNIX/fcntl"
+fi
+])
+
+case "$db_cv_mutex" in
+68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_68K_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and 68K assembly language mutexes.]);;
+AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_AIX_CHECK_LOCK,
+ [Define to 1 to use the AIX _check_lock mutexes.]);;
+ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Alpha assembly language mutexes.]);;
+ARM/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_ARM_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_ARM_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and ARM assembly language mutexes.]);;
+HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on HP-UX.]);;
+HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.]);;
+ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_IA64_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and IA64 assembly language mutexes.]);;
+POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/private) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+POSIX/pthreads/library) LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.]);;
+POSIX/pthreads/library/private)
+ LIBS="$LIBS -lpthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PTHREADS)
+ AH_TEMPLATE(HAVE_MUTEX_PTHREADS,
+ [Define to 1 to use POSIX 1003.1 pthread_XXX mutexes.])
+ AC_DEFINE(HAVE_MUTEX_THREAD_ONLY)
+ AH_TEMPLATE(HAVE_MUTEX_THREAD_ONLY,
+ [Define to 1 to configure mutexes intra-process only.]);;
+PPC_GENERIC/gcc-assembly)
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and generic PowerPC assembly language.]);;
+PPC_APPLE/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Apple PowerPC assembly language.]);;
+ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
+ ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN)
+ AH_TEMPLATE(HAVE_MUTEX_RELIANTUNIX_INITSPIN,
+ [Define to 1 to use Reliant UNIX initspin mutexes.]);;
+S390/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_S390_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_S390_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and S/390 assembly language mutexes.]);;
+SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY,
+ [Define to 1 to use the SCO compiler and x86 assembly language mutexes.]);;
+SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK)
+ AH_TEMPLATE(HAVE_MUTEX_SGI_INIT_LOCK,
+ [Define to 1 to use the SGI XXX_lock mutexes.]);;
+Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LOCK_TRY,
+ [Define to 1 to use the Solaris _lock_XXX mutexes.]);;
+Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP)
+ AH_TEMPLATE(HAVE_MUTEX_SOLARIS_LWP,
+ [Define to 1 to use the Solaris lwp threads mutexes.]);;
+Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and Sparc assembly language mutexes.]);;
+UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UI/threads/library) LIBS="$LIBS -lthread"
+ ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_UI_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_UI_THREADS,
+ [Define to 1 to use the UNIX International mutexes.]);;
+UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_MSEM_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_MSEM_INIT,
+ [Define to 1 to use the msem_XXX mutexes on systems other than HP-UX.]);;
+UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_SEMA_INIT)
+ AH_TEMPLATE(HAVE_MUTEX_SEMA_INIT,
+ [Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes.]);;
+UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
+ AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_UTS_CC_ASSEMBLY,
+ [Define to 1 to use the UTS compiler and assembly language mutexes.]);;
+x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY)
+ AH_TEMPLATE(HAVE_MUTEX_X86_GCC_ASSEMBLY,
+ [Define to 1 to use the GCC compiler and x86 assembly language mutexes.]);;
+UNIX/fcntl) AC_MSG_WARN(
+ [NO FAST MUTEXES FOUND FOR THIS COMPILER/ARCHITECTURE.])
+ ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
+ AC_DEFINE(HAVE_MUTEX_FCNTL)
+ AH_TEMPLATE(HAVE_MUTEX_FCNTL,
+ [Define to 1 to use the UNIX fcntl system call mutexes.]);;
+*) AC_MSG_ERROR([Unknown mutex interface: $db_cv_mutex]);;
+esac
+
+if test "$db_cv_mutex" != "UNIX/fcntl"; then
+ AC_DEFINE(HAVE_MUTEX_THREADS)
+ AH_TEMPLATE(HAVE_MUTEX_THREADS,
+ [Define to 1 if fast mutexes are available.])
+fi
+
+# There are 3 classes of mutexes:
+#
+# 1: Mutexes requiring no cleanup, for example, test-and-set mutexes.
+# 2: Mutexes that must be destroyed, but which don't hold permanent system
+# resources, for example, pthread mutexes on MVS aka OS/390 aka z/OS.
+# 3: Mutexes that must be destroyed, even after the process is gone, for
+# example, pthread mutexes on QNX and binary semaphores on VxWorks.
+#
+# DB cannot currently distinguish between #2 and #3 because DB does not know
+# if the application is running environment recovery as part of startup and
+# does not need to do cleanup, or if the environment is being removed and/or
+# recovered in a loop in the application, and so does need to clean up. If
+# we get it wrong, we're going to call the mutex destroy routine on a random
+# piece of memory, which usually works, but just might drop core. For now,
+# we group #2 and #3 into the HAVE_MUTEX_SYSTEM_RESOURCES define, until we
+# have a better solution or reason to solve this in a general way -- so far,
+# the places we've needed to handle this are few.
+AH_TEMPLATE(HAVE_MUTEX_SYSTEM_RESOURCES,
+ [Define to 1 if mutexes hold system resources.])
+
+case "$host_os$db_cv_mutex" in
+*qnx*POSIX/pthread*|openedition*POSIX/pthread*)
+ AC_DEFINE(HAVE_MUTEX_SYSTEM_RESOURCES);;
+esac])
diff --git a/bdb/dist/aclocal/mutex.m4 b/bdb/dist/aclocal/mutex.m4
deleted file mode 100644
index a6b1fa1a053..00000000000
--- a/bdb/dist/aclocal/mutex.m4
+++ /dev/null
@@ -1,407 +0,0 @@
-dnl $Id: mutex.m4,v 11.20 2000/12/20 22:16:56 bostic Exp $
-
-dnl Figure out mutexes for this compiler/architecture.
-AC_DEFUN(AM_DEFINE_MUTEXES, [
-
-AC_CACHE_CHECK([for mutexes], db_cv_mutex, [dnl
-db_cv_mutex=no
-
-orig_libs=$LIBS
-
-dnl User-specified POSIX mutexes.
-dnl
-dnl Assume that -lpthread exists when the user specifies POSIX mutexes. (I
-dnl only expect this option to be used on Solaris, which has -lpthread.)
-if test "$db_cv_posixmutexes" = yes; then
- db_cv_mutex="posix_only"
-fi
-
-dnl User-specified UI mutexes.
-dnl
-dnl Assume that -lthread exists when the user specifies UI mutexes. (I only
-dnl expect this option to be used on Solaris, which has -lthread.)
-if test "$db_cv_uimutexes" = yes; then
- db_cv_mutex="ui_only"
-fi
-
-dnl LWP threads: _lwp_XXX
-dnl
-dnl Test for LWP threads before testing for UI/POSIX threads, we prefer them
-dnl on Solaris. There are two reasons: the Solaris C library has UI/POSIX
-dnl interface stubs, but they're broken, configuring them for inter-process
-dnl mutexes doesn't return an error, but it doesn't work either. Second,
-dnl there's a bug in SunOS 5.7 where applications get pwrite, not pwrite64,
-dnl if they load the C library before the appropriate threads library, e.g.,
-dnl tclsh using dlopen to load the DB library. Anyway, by using LWP threads
-dnl we avoid answering lots of user questions, not to mention the bugs.
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([
-#include <synch.h>
-main(){
- static lwp_mutex_t mi = SHAREDMUTEX;
- static lwp_cond_t ci = SHAREDCV;
- lwp_mutex_t mutex = mi;
- lwp_cond_t cond = ci;
- exit (
- _lwp_mutex_lock(&mutex) ||
- _lwp_mutex_unlock(&mutex));
-}], [db_cv_mutex="Solaris/lwp"])
-fi
-
-dnl UI threads: thr_XXX
-dnl
-dnl Try with and without the -lthread library.
-if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
-LIBS="-lthread $LIBS"
-AC_TRY_RUN([
-#include <thread.h>
-#include <synch.h>
-main(){
- mutex_t mutex;
- cond_t cond;
- int type = USYNC_PROCESS;
- exit (
- mutex_init(&mutex, type, NULL) ||
- cond_init(&cond, type, NULL) ||
- mutex_lock(&mutex) ||
- mutex_unlock(&mutex));
-}], [db_cv_mutex="UI/threads/library"])
-LIBS="$orig_libs"
-fi
-if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "ui_only"; then
-AC_TRY_RUN([
-#include <thread.h>
-#include <synch.h>
-main(){
- mutex_t mutex;
- cond_t cond;
- int type = USYNC_PROCESS;
- exit (
- mutex_init(&mutex, type, NULL) ||
- cond_init(&cond, type, NULL) ||
- mutex_lock(&mutex) ||
- mutex_unlock(&mutex));
-}], [db_cv_mutex="UI/threads"])
-fi
-if test "$db_cv_mutex" = "ui_only"; then
- AC_MSG_ERROR([unable to find UI mutex interfaces])
-fi
-
-
-dnl POSIX.1 pthreads: pthread_XXX
-dnl
-dnl Try with and without the -lpthread library.
-if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
-AC_TRY_RUN([
-#include <pthread.h>
-main(){
- pthread_cond_t cond;
- pthread_mutex_t mutex;
- pthread_condattr_t condattr;
- pthread_mutexattr_t mutexattr;
- exit (
- pthread_condattr_init(&condattr) ||
- pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
- pthread_mutexattr_init(&mutexattr) ||
- pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
- pthread_cond_init(&cond, &condattr) ||
- pthread_mutex_init(&mutex, &mutexattr) ||
- pthread_mutex_lock(&mutex) ||
- pthread_mutex_unlock(&mutex) ||
- pthread_mutex_destroy(&mutex) ||
- pthread_cond_destroy(&cond) ||
- pthread_condattr_destroy(&condattr) ||
- pthread_mutexattr_destroy(&mutexattr));
-}], [db_cv_mutex="POSIX/pthreads"])
-fi
-if test "$db_cv_mutex" = no -o "$db_cv_mutex" = "posix_only"; then
-LIBS="-lpthread $LIBS"
-AC_TRY_RUN([
-#include <pthread.h>
-main(){
- pthread_cond_t cond;
- pthread_mutex_t mutex;
- pthread_condattr_t condattr;
- pthread_mutexattr_t mutexattr;
- exit (
- pthread_condattr_init(&condattr) ||
- pthread_condattr_setpshared(&condattr, PTHREAD_PROCESS_SHARED) ||
- pthread_mutexattr_init(&mutexattr) ||
- pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED) ||
- pthread_cond_init(&cond, &condattr) ||
- pthread_mutex_init(&mutex, &mutexattr) ||
- pthread_mutex_lock(&mutex) ||
- pthread_mutex_unlock(&mutex) ||
- pthread_mutex_destroy(&mutex) ||
- pthread_cond_destroy(&cond) ||
- pthread_condattr_destroy(&condattr) ||
- pthread_mutexattr_destroy(&mutexattr));
-}], [db_cv_mutex="POSIX/pthreads/library"])
-LIBS="$orig_libs"
-fi
-if test "$db_cv_mutex" = "posix_only"; then
- AC_MSG_ERROR([unable to find POSIX mutex interfaces])
-fi
-
-dnl msemaphore: HPPA only
-dnl Try HPPA before general msem test, it needs special alignment.
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([
-#include <sys/mman.h>
-main(){
-#if defined(__hppa)
- typedef msemaphore tsl_t;
- msemaphore x;
- msem_init(&x, 0);
- msem_lock(&x, 0);
- msem_unlock(&x, 0);
- exit(0);
-#else
- exit(1);
-#endif
-}], [db_cv_mutex="HP/msem_init"])
-fi
-
-dnl msemaphore: AIX, OSF/1
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([
-#include <sys/types.h>
-#include <sys/mman.h>;
-main(){
- typedef msemaphore tsl_t;
- msemaphore x;
- msem_init(&x, 0);
- msem_lock(&x, 0);
- msem_unlock(&x, 0);
- exit(0);
-}], [db_cv_mutex="UNIX/msem_init"])
-fi
-
-dnl ReliantUNIX
-if test "$db_cv_mutex" = no; then
-LIBS="$LIBS -lmproc"
-AC_TRY_LINK([#include <ulocks.h>],
-[typedef spinlock_t tsl_t;
-spinlock_t x; initspin(&x, 1); cspinlock(&x); spinunlock(&x);],
-[db_cv_mutex="ReliantUNIX/initspin"])
-LIBS="$orig_libs"
-fi
-
-dnl SCO: UnixWare has threads in libthread, but OpenServer doesn't.
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([
-main(){
-#if defined(__USLC__)
- exit(0);
-#endif
- exit(1);
-}], [db_cv_mutex="SCO/x86/cc-assembly"])
-fi
-
-dnl abilock_t: SGI
-if test "$db_cv_mutex" = no; then
-AC_TRY_LINK([#include <abi_mutex.h>],
-[typedef abilock_t tsl_t;
-abilock_t x; init_lock(&x); acquire_lock(&x); release_lock(&x);],
-[db_cv_mutex="SGI/init_lock"])
-fi
-
-dnl sema_t: Solaris
-dnl The sema_XXX calls do not work on Solaris 5.5. I see no reason to ever
-dnl turn this test on, unless we find some other platform that uses the old
-dnl POSIX.1 interfaces. (I plan to move directly to pthreads on Solaris.)
-if test "$db_cv_mutex" = DOESNT_WORK; then
-AC_TRY_LINK([#include <synch.h>],
-[typedef sema_t tsl_t;
- sema_t x;
- sema_init(&x, 1, USYNC_PROCESS, NULL); sema_wait(&x); sema_post(&x);],
-[db_cv_mutex="UNIX/sema_init"])
-fi
-
-dnl _lock_try/_lock_clear: Solaris
-dnl On Solaris systems without Pthread or UI mutex interfaces, DB uses the
-dnl undocumented _lock_try _lock_clear function calls instead of either the
-dnl sema_trywait(3T) or sema_wait(3T) function calls. This is because of
-dnl problems in those interfaces in some releases of the Solaris C library.
-if test "$db_cv_mutex" = no; then
-AC_TRY_LINK([#include <sys/machlock.h>],
-[typedef lock_t tsl_t;
- lock_t x;
- _lock_try(&x); _lock_clear(&x);],
-[db_cv_mutex="Solaris/_lock_try"])
-fi
-
-dnl _check_lock/_clear_lock: AIX
-if test "$db_cv_mutex" = no; then
-AC_TRY_LINK([#include <sys/atomic_op.h>],
-[int x; _check_lock(&x,0,1); _clear_lock(&x,0);],
-[db_cv_mutex="AIX/_check_lock"])
-fi
-
-dnl Alpha/gcc: OSF/1
-dnl The alpha/gcc code doesn't work as far as I know. There are
-dnl two versions, both have problems. See Support Request #1583.
-if test "$db_cv_mutex" = DOESNT_WORK; then
-AC_TRY_RUN([main(){
-#if defined(__alpha)
-#if defined(__GNUC__)
-exit(0);
-#endif
-#endif
-exit(1);}],
-[db_cv_mutex="ALPHA/gcc-assembly"])
-fi
-
-dnl PaRisc/gcc: HP/UX
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(__hppa)
-#if defined(__GNUC__)
-exit(0);
-#endif
-#endif
-exit(1);}],
-[db_cv_mutex="HPPA/gcc-assembly"])
-fi
-
-dnl PPC/gcc:
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(__powerpc__)
-#if defined(__GNUC__)
-exit(0);
-#endif
-#endif
-exit(1);}],
-[db_cv_mutex="PPC/gcc-assembly"])
-fi
-
-dnl Sparc/gcc: SunOS, Solaris
-dnl The sparc/gcc code doesn't always work, specifically, I've seen assembler
-dnl failures from the stbar instruction on SunOS 4.1.4/sun4c and gcc 2.7.2.2.
-if test "$db_cv_mutex" = DOESNT_WORK; then
-AC_TRY_RUN([main(){
-#if defined(__sparc__)
-#if defined(__GNUC__)
- exit(0);
-#endif
-#endif
- exit(1);
-}], [db_cv_mutex="Sparc/gcc-assembly"])
-fi
-
-dnl 68K/gcc: SunOS
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if (defined(mc68020) || defined(sun3))
-#if defined(__GNUC__)
- exit(0);
-#endif
-#endif
- exit(1);
-}], [db_cv_mutex="68K/gcc-assembly"])
-fi
-
-dnl x86/gcc: FreeBSD, NetBSD, BSD/OS, Linux
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(i386) || defined(__i386__)
-#if defined(__GNUC__)
- exit(0);
-#endif
-#endif
- exit(1);
-}], [db_cv_mutex="x86/gcc-assembly"])
-fi
-
-dnl x86_64/gcc: FreeBSD, NetBSD, BSD/OS, Linux
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(x86_64) || defined(__x86_64__)
-#if defined(__GNUC__)
- exit(0);
-#endif
-#endif
- exit(1);
-}], [db_cv_mutex="x86_64/gcc-assembly"])
-fi
-
-dnl ia86/gcc: Linux
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(__ia64)
-#if defined(__GNUC__)
- exit(0);
-#endif
-#endif
- exit(1);
-}], [db_cv_mutex="ia64/gcc-assembly"])
-fi
-
-dnl: uts/cc: UTS
-if test "$db_cv_mutex" = no; then
-AC_TRY_RUN([main(){
-#if defined(_UTS)
- exit(0);
-#endif
- exit(1);
-}], [db_cv_mutex="UTS/cc-assembly"])
-fi
-])
-
-if test "$db_cv_mutex" = no; then
- AC_MSG_WARN(
- [THREAD MUTEXES NOT AVAILABLE FOR THIS COMPILER/ARCHITECTURE.])
- ADDITIONAL_OBJS="mut_fcntl${o} $ADDITIONAL_OBJS"
-else
- AC_DEFINE(HAVE_MUTEX_THREADS)
-fi
-
-case "$db_cv_mutex" in
-68K/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_68K_GCC_ASSEMBLY);;
-AIX/_check_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_AIX_CHECK_LOCK);;
-ALPHA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_ALPHA_GCC_ASSEMBLY);;
-HP/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_HPPA_MSEM_INIT);;
-HPPA/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_HPPA_GCC_ASSEMBLY);;
-ia64/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_IA64_GCC_ASSEMBLY);;
-POSIX/pthreads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_PTHREADS);;
-POSIX/pthreads/library) LIBS="-lpthread $LIBS"
- ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_PTHREADS);;
-PPC/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_PPC_GCC_ASSEMBLY);;
-ReliantUNIX/initspin) LIBS="$LIBS -lmproc"
- ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_RELIANTUNIX_INITSPIN);;
-SCO/x86/cc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SCO_X86_CC_ASSEMBLY);;
-SGI/init_lock) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SGI_INIT_LOCK);;
-Solaris/_lock_try) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SOLARIS_LOCK_TRY);;
-Solaris/lwp) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SOLARIS_LWP);;
-Sparc/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SPARC_GCC_ASSEMBLY);;
-UI/threads) ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_UI_THREADS);;
-UI/threads/library) LIBS="-lthread $LIBS"
- ADDITIONAL_OBJS="mut_pthread${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_UI_THREADS);;
-UNIX/msem_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_MSEM_INIT);;
-UNIX/sema_init) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_SEMA_INIT);;
-UTS/cc-assembly) ADDITIONAL_OBJS="$ADDITIONAL_OBJS uts4.cc${o}"
- AC_DEFINE(HAVE_MUTEX_UTS_CC_ASSEMBLY);;
-x86/gcc-assembly) ADDITIONAL_OBJS="mut_tas${o} $ADDITIONAL_OBJS"
- AC_DEFINE(HAVE_MUTEX_X86_GCC_ASSEMBLY);;
-esac
-])dnl
diff --git a/bdb/dist/aclocal/options.ac b/bdb/dist/aclocal/options.ac
new file mode 100644
index 00000000000..ba45c34dfe9
--- /dev/null
+++ b/bdb/dist/aclocal/options.ac
@@ -0,0 +1,197 @@
+# $Id: options.ac,v 11.19 2002/06/25 19:31:48 bostic Exp $
+
+# Process user-specified options.
+AC_DEFUN(AM_OPTIONS_SET, [
+
+# --enable-bigfile was the configuration option that Berkeley DB used before
+# autoconf 2.50 was released (which had --enable-largefile integrated in).
+AC_ARG_ENABLE(bigfile,
+ [AC_HELP_STRING([--disable-bigfile],
+ [Obsolete; use --disable-largefile instead.])],
+ [AC_MSG_ERROR(
+ [--enable-bigfile no longer supported, use --enable-largefile])])
+
+AC_MSG_CHECKING(if --enable-compat185 option specified)
+AC_ARG_ENABLE(compat185,
+ [AC_HELP_STRING([--enable-compat185],
+ [Build DB 1.85 compatibility API.])],
+ [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"])
+AC_MSG_RESULT($db_cv_compat185)
+
+AC_MSG_CHECKING(if --enable-cxx option specified)
+AC_ARG_ENABLE(cxx,
+ [AC_HELP_STRING([--enable-cxx],
+ [Build C++ API.])],
+ [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"])
+AC_MSG_RESULT($db_cv_cxx)
+
+AC_MSG_CHECKING(if --enable-debug option specified)
+AC_ARG_ENABLE(debug,
+ [AC_HELP_STRING([--enable-debug],
+ [Build a debugging version.])],
+ [db_cv_debug="$enable_debug"], [db_cv_debug="no"])
+AC_MSG_RESULT($db_cv_debug)
+
+AC_MSG_CHECKING(if --enable-debug_rop option specified)
+AC_ARG_ENABLE(debug_rop,
+ [AC_HELP_STRING([--enable-debug_rop],
+ [Build a version that logs read operations.])],
+ [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"])
+AC_MSG_RESULT($db_cv_debug_rop)
+
+AC_MSG_CHECKING(if --enable-debug_wop option specified)
+AC_ARG_ENABLE(debug_wop,
+ [AC_HELP_STRING([--enable-debug_wop],
+ [Build a version that logs write operations.])],
+ [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"])
+AC_MSG_RESULT($db_cv_debug_wop)
+
+AC_MSG_CHECKING(if --enable-diagnostic option specified)
+AC_ARG_ENABLE(diagnostic,
+ [AC_HELP_STRING([--enable-diagnostic],
+ [Build a version with run-time diagnostics.])],
+ [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"])
+AC_MSG_RESULT($db_cv_diagnostic)
+
+AC_MSG_CHECKING(if --enable-dump185 option specified)
+AC_ARG_ENABLE(dump185,
+ [AC_HELP_STRING([--enable-dump185],
+ [Build db_dump185(1) to dump 1.85 databases.])],
+ [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"])
+AC_MSG_RESULT($db_cv_dump185)
+
+AC_MSG_CHECKING(if --enable-java option specified)
+AC_ARG_ENABLE(java,
+ [AC_HELP_STRING([--enable-java],
+ [Build Java API.])],
+ [db_cv_java="$enable_java"], [db_cv_java="no"])
+AC_MSG_RESULT($db_cv_java)
+
+AC_MSG_CHECKING(if --enable-posixmutexes option specified)
+AC_ARG_ENABLE(posixmutexes,
+ [AC_HELP_STRING([--enable-posixmutexes],
+ [Force use of POSIX standard mutexes.])],
+ [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
+AC_MSG_RESULT($db_cv_posixmutexes)
+
+AC_MSG_CHECKING(if --enable-rpc option specified)
+AC_ARG_ENABLE(rpc,
+ [AC_HELP_STRING([--enable-rpc],
+ [Build RPC client/server.])],
+ [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"])
+AC_MSG_RESULT($db_cv_rpc)
+
+AC_MSG_CHECKING(if --enable-tcl option specified)
+AC_ARG_ENABLE(tcl,
+ [AC_HELP_STRING([--enable-tcl],
+ [Build Tcl API.])],
+ [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"])
+AC_MSG_RESULT($db_cv_tcl)
+
+AC_MSG_CHECKING(if --enable-test option specified)
+AC_ARG_ENABLE(test,
+ [AC_HELP_STRING([--enable-test],
+ [Configure to run the test suite.])],
+ [db_cv_test="$enable_test"], [db_cv_test="no"])
+AC_MSG_RESULT($db_cv_test)
+
+AC_MSG_CHECKING(if --enable-uimutexes option specified)
+AC_ARG_ENABLE(uimutexes,
+ [AC_HELP_STRING([--enable-uimutexes],
+ [Force use of Unix International mutexes.])],
+ [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"])
+AC_MSG_RESULT($db_cv_uimutexes)
+
+AC_MSG_CHECKING(if --enable-umrw option specified)
+AC_ARG_ENABLE(umrw,
+ [AC_HELP_STRING([--enable-umrw],
+ [Mask harmless unitialized memory read/writes.])],
+ [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"])
+AC_MSG_RESULT($db_cv_umrw)
+
+AC_MSG_CHECKING([if --with-embedix=DIR option specified])
+AC_ARG_WITH(embedix,
+ [AC_HELP_STRING([--with-embedix=DIR],
+ [Embedix install directory location.])],
+ [with_embedix="$withval"], [with_embedix="no"])
+if test "$with_embedix" = "no"; then
+ db_cv_embedix="no"
+ AC_MSG_RESULT($with_embedix)
+else
+ db_cv_embedix="yes"
+ if test "$with_embedix" = "yes"; then
+ db_cv_path_embedix_install="/opt/Embedix"
+ else
+ db_cv_path_embedix_install="$with_embedix"
+ fi
+ AC_MSG_RESULT($db_cv_path_embedix_install)
+fi
+
+AC_MSG_CHECKING(if --with-mutex=MUTEX option specified)
+AC_ARG_WITH(mutex,
+ [AC_HELP_STRING([--with-mutex=MUTEX],
+ [Selection of non-standard mutexes.])],
+ [with_mutex="$withval"], [with_mutex="no"])
+if test "$with_mutex" = "yes"; then
+ AC_MSG_ERROR([--with-mutex requires a mutex name argument])
+fi
+if test "$with_mutex" != "no"; then
+ db_cv_mutex="$with_mutex"
+fi
+AC_MSG_RESULT($with_mutex)
+
+AC_MSG_CHECKING(if --with-rpm=DIR option specified)
+AC_ARG_WITH(rpm,
+ [AC_HELP_STRING([--with-rpm=DIR],
+ [Directory location of RPM archive.])],
+ [with_rpm="$withval"], [with_rpm="no"])
+if test "$with_rpm" = "no"; then
+ db_cv_rpm="no"
+else
+ if test "$with_rpm" = "yes"; then
+ AC_MSG_ERROR([--with-rpm requires a directory argument])
+ fi
+ db_cv_rpm="yes"
+ db_cv_path_rpm_archive="$with_rpm"
+fi
+AC_MSG_RESULT($with_rpm)
+
+AC_MSG_CHECKING([if --with-tcl=DIR option specified])
+AC_ARG_WITH(tcl,
+ [AC_HELP_STRING([--with-tcl=DIR],
+ [Directory location of tclConfig.sh.])],
+ [with_tclconfig="$withval"], [with_tclconfig="no"])
+AC_MSG_RESULT($with_tclconfig)
+if test "$with_tclconfig" != "no"; then
+ db_cv_tcl="yes"
+fi
+
+AC_MSG_CHECKING([if --with-uniquename=NAME option specified])
+AC_ARG_WITH(uniquename,
+ [AC_HELP_STRING([--with-uniquename=NAME],
+ [Build a uniquely named library.])],
+ [with_uniquename="$withval"], [with_uniquename="no"])
+if test "$with_uniquename" = "no"; then
+ db_cv_uniquename="no"
+ AC_MSG_RESULT($with_uniquename)
+else
+ db_cv_uniquename="yes"
+ if test "$with_uniquename" != "yes"; then
+ DB_VERSION_UNIQUE_NAME="$with_uniquename"
+ fi
+ AC_MSG_RESULT($DB_VERSION_UNIQUE_NAME)
+fi
+
+# Embedix requires RPM.
+if test "$db_cv_embedix" = "yes"; then
+ if test "$db_cv_rpm" = "no"; then
+ AC_MSG_ERROR([--with-embedix requires --with-rpm])
+ fi
+fi
+
+# Test requires Tcl
+if test "$db_cv_test" = "yes"; then
+ if test "$db_cv_tcl" = "no"; then
+ AC_MSG_ERROR([--enable-test requires --enable-tcl])
+ fi
+fi])
diff --git a/bdb/dist/aclocal/options.m4 b/bdb/dist/aclocal/options.m4
deleted file mode 100644
index c51a3952419..00000000000
--- a/bdb/dist/aclocal/options.m4
+++ /dev/null
@@ -1,121 +0,0 @@
-dnl $Id: options.m4,v 11.10 2000/07/07 15:50:39 bostic Exp $
-
-dnl Process user-specified options.
-AC_DEFUN(AM_OPTIONS_SET, [
-
-AC_MSG_CHECKING(if --disable-bigfile option specified)
-AC_ARG_ENABLE(bigfile,
- [ --disable-bigfile Disable AIX, HP/UX, Solaris big files.],
- [db_cv_bigfile="yes"], [db_cv_bigfile="no"])
-AC_MSG_RESULT($db_cv_bigfile)
-
-AC_MSG_CHECKING(if --enable-compat185 option specified)
-AC_ARG_ENABLE(compat185,
- [ --enable-compat185 Build DB 1.85 compatibility API.],
- [db_cv_compat185="$enable_compat185"], [db_cv_compat185="no"])
-AC_MSG_RESULT($db_cv_compat185)
-
-AC_MSG_CHECKING(if --enable-cxx option specified)
-AC_ARG_ENABLE(cxx,
- [ --enable-cxx Build C++ API.],
- [db_cv_cxx="$enable_cxx"], [db_cv_cxx="no"])
-AC_MSG_RESULT($db_cv_cxx)
-
-AC_MSG_CHECKING(if --enable-debug option specified)
-AC_ARG_ENABLE(debug,
- [ --enable-debug Build a debugging version.],
- [db_cv_debug="$enable_debug"], [db_cv_debug="no"])
-AC_MSG_RESULT($db_cv_debug)
-
-AC_MSG_CHECKING(if --enable-debug_rop option specified)
-AC_ARG_ENABLE(debug_rop,
- [ --enable-debug_rop Build a version that logs read operations.],
- [db_cv_debug_rop="$enable_debug_rop"], [db_cv_debug_rop="no"])
-AC_MSG_RESULT($db_cv_debug_rop)
-
-AC_MSG_CHECKING(if --enable-debug_wop option specified)
-AC_ARG_ENABLE(debug_wop,
- [ --enable-debug_wop Build a version that logs write operations.],
- [db_cv_debug_wop="$enable_debug_wop"], [db_cv_debug_wop="no"])
-AC_MSG_RESULT($db_cv_debug_wop)
-
-AC_MSG_CHECKING(if --enable-diagnostic option specified)
-AC_ARG_ENABLE(diagnostic,
- [ --enable-diagnostic Build a version with run-time diagnostics.],
- [db_cv_diagnostic="$enable_diagnostic"], [db_cv_diagnostic="no"])
-AC_MSG_RESULT($db_cv_diagnostic)
-
-AC_MSG_CHECKING(if --enable-dump185 option specified)
-AC_ARG_ENABLE(dump185,
- [ --enable-dump185 Build db_dump185(1) to dump 1.85 databases.],
- [db_cv_dump185="$enable_dump185"], [db_cv_dump185="no"])
-AC_MSG_RESULT($db_cv_dump185)
-
-AC_MSG_CHECKING(if --enable-dynamic option specified)
-AC_ARG_ENABLE(dynamic,
- [ --enable-dynamic Build with dynamic libraries.],
- [db_cv_dynamic="$enable_dynamic"], [db_cv_dynamic="no"])
-AC_MSG_RESULT($db_cv_dynamic)
-
-AC_MSG_CHECKING(if --enable-java option specified)
-AC_ARG_ENABLE(java,
- [ --enable-java Build Java API.],
- [db_cv_java="$enable_java"], [db_cv_java="no"])
-AC_MSG_RESULT($db_cv_java)
-
-AC_MSG_CHECKING(if --enable-posixmutexes option specified)
-AC_ARG_ENABLE(posixmutexes,
- [ --enable-posixmutexes Force use of POSIX standard mutexes.],
- [db_cv_posixmutexes="$enable_posixmutexes"], [db_cv_posixmutexes="no"])
-AC_MSG_RESULT($db_cv_posixmutexes)
-
-AC_MSG_CHECKING(if --enable-rpc option specified)
-AC_ARG_ENABLE(rpc,
- [ --enable-rpc Build RPC client/server.],
- [db_cv_rpc="$enable_rpc"], [db_cv_rpc="no"])
-AC_MSG_RESULT($db_cv_rpc)
-
-dnl --enable-shared is an alias for --enable-dynamic. We support it for
-dnl compatibility with other applications, e.g., Tcl.
-AC_MSG_CHECKING(if --enable-shared option specified)
-AC_ARG_ENABLE(shared,
- [ --enable-shared Build with dynamic libraries.],
- [db_cv_shared="$enable_shared"], [db_cv_shared="no"])
-AC_MSG_RESULT($db_cv_shared)
-if test "$db_cv_shared" != "no"; then
- db_cv_dynamic="yes"
-fi
-
-AC_MSG_CHECKING(if --enable-tcl option specified)
-AC_ARG_ENABLE(tcl,
- [ --enable-tcl Build Tcl API.],
- [db_cv_tcl="$enable_tcl"], [db_cv_tcl="no"])
-AC_MSG_RESULT($db_cv_tcl)
-
-AC_MSG_CHECKING(if --enable-test option specified)
-AC_ARG_ENABLE(test,
- [ --enable-test Configure to run the test suite.],
- [db_cv_test="$enable_test"], [db_cv_test="no"])
-AC_MSG_RESULT($db_cv_test)
-
-AC_MSG_CHECKING(if --enable-uimutexes option specified)
-AC_ARG_ENABLE(uimutexes,
- [ --enable-uimutexes Force use of Unix International mutexes.],
- [db_cv_uimutexes="$enable_uimutexes"], [db_cv_uimutexes="no"])
-AC_MSG_RESULT($db_cv_uimutexes)
-
-AC_MSG_CHECKING(if --enable-umrw option specified)
-AC_ARG_ENABLE(umrw,
- [ --enable-umrw Mask harmless unitialized memory read/writes.],
- [db_cv_umrw="$enable_umrw"], [db_cv_umrw="no"])
-AC_MSG_RESULT($db_cv_umrw)
-
-AC_MSG_CHECKING([if --with-tcl option specified])
-AC_ARG_WITH(tcl,
- [ --with-tcl=DIR Directory location of tclConfig.sh.],
- with_tclconfig=${withval}, with_tclconfig="no")
-AC_MSG_RESULT($with_tclconfig)
-if test "$with_tclconfig" != "no"; then
- db_cv_tcl="yes"
-fi
-])dnl
diff --git a/bdb/dist/aclocal/programs.ac b/bdb/dist/aclocal/programs.ac
new file mode 100644
index 00000000000..7bfa1fa2646
--- /dev/null
+++ b/bdb/dist/aclocal/programs.ac
@@ -0,0 +1,80 @@
+# $Id: programs.ac,v 11.20 2001/09/24 02:09:25 bostic Exp $
+
+# Check for programs used in building/installation.
+AC_DEFUN(AM_PROGRAMS_SET, [
+
+AC_CHECK_TOOL(db_cv_path_ar, ar, missing_ar)
+if test "$db_cv_path_ar" = missing_ar; then
+ AC_MSG_ERROR([No ar utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_chmod, chmod, missing_chmod)
+if test "$db_cv_path_chmod" = missing_chmod; then
+ AC_MSG_ERROR([No chmod utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_cp, cp, missing_cp)
+if test "$db_cv_path_cp" = missing_cp; then
+ AC_MSG_ERROR([No cp utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(path_ldconfig, ldconfig, missing_ldconfig)
+ AC_PATH_PROG(db_cv_path_ldconfig, $path_ldconfig, missing_ldconfig)
+ if test "$db_cv_path_ldconfig" != missing_ldconfig; then
+ RPM_POST_INSTALL="%post -p $db_cv_path_ldconfig"
+ RPM_POST_UNINSTALL="%postun -p $db_cv_path_ldconfig"
+ fi
+fi
+
+AC_CHECK_TOOL(db_cv_path_ln, ln, missing_ln)
+if test "$db_cv_path_ln" = missing_ln; then
+ AC_MSG_ERROR([No ln utility found.])
+fi
+
+AC_CHECK_TOOL(db_cv_path_mkdir, mkdir, missing_mkdir)
+if test "$db_cv_path_mkdir" = missing_mkdir; then
+ AC_MSG_ERROR([No mkdir utility found.])
+fi
+
+# We need a complete path for ranlib, because it doesn't exist on some
+# architectures because the ar utility packages the library itself.
+AC_CHECK_TOOL(path_ranlib, ranlib, missing_ranlib)
+AC_PATH_PROG(db_cv_path_ranlib, $path_ranlib, missing_ranlib)
+
+AC_CHECK_TOOL(db_cv_path_rm, rm, missing_rm)
+if test "$db_cv_path_rm" = missing_rm; then
+ AC_MSG_ERROR([No rm utility found.])
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_rpm, rpm, missing_rpm)
+ if test "$db_cv_path_rpm" = missing_rpm; then
+ AC_MSG_ERROR([No rpm utility found.])
+ fi
+fi
+
+# We need a complete path for sh, because some implementations of make
+# get upset if SHELL is set to just the command name.
+AC_CHECK_TOOL(path_sh, sh, missing_sh)
+AC_PATH_PROG(db_cv_path_sh, $path_sh, missing_sh)
+if test "$db_cv_path_sh" = missing_sh; then
+ AC_MSG_ERROR([No sh utility found.])
+fi
+
+# Don't strip the binaries if --enable-debug was specified.
+if test "$db_cv_debug" = yes; then
+ db_cv_path_strip=debug_build_no_strip
+else
+ AC_CHECK_TOOL(path_strip, strip, missing_strip)
+ AC_PATH_PROG(db_cv_path_strip, $path_strip, missing_strip)
+fi
+
+if test "$db_cv_test" = "yes"; then
+ AC_CHECK_TOOL(db_cv_path_kill, kill, missing_kill)
+ if test "$db_cv_path_kill" = missing_kill; then
+ AC_MSG_ERROR([No kill utility found.])
+ fi
+fi
+
+])
diff --git a/bdb/dist/aclocal/programs.m4 b/bdb/dist/aclocal/programs.m4
deleted file mode 100644
index 9ec04f4d8cd..00000000000
--- a/bdb/dist/aclocal/programs.m4
+++ /dev/null
@@ -1,48 +0,0 @@
-dnl $Id: programs.m4,v 11.11 2000/03/30 21:20:50 bostic Exp $
-
-dnl Check for programs used in building/installation.
-AC_DEFUN(AM_PROGRAMS_SET, [
-
-AC_PATH_PROG(db_cv_path_ar, ar, missing_ar)
-if test "$db_cv_path_ar" = missing_ar; then
- AC_MSG_ERROR([No ar utility found.])
-fi
-AC_PATH_PROG(db_cv_path_chmod, chmod, missing_chmod)
-if test "$db_cv_path_chmod" = missing_chmod; then
- AC_MSG_ERROR([No chmod utility found.])
-fi
-AC_PATH_PROG(db_cv_path_cp, cp, missing_cp)
-if test "$db_cv_path_cp" = missing_cp; then
- AC_MSG_ERROR([No cp utility found.])
-fi
-AC_PATH_PROG(db_cv_path_ln, ln, missing_ln)
-if test "$db_cv_path_ln" = missing_ln; then
- AC_MSG_ERROR([No ln utility found.])
-fi
-AC_PATH_PROG(db_cv_path_mkdir, mkdir, missing_mkdir)
-if test "$db_cv_path_mkdir" = missing_mkdir; then
- AC_MSG_ERROR([No mkdir utility found.])
-fi
-AC_PATH_PROG(db_cv_path_ranlib, ranlib, missing_ranlib)
-AC_PATH_PROG(db_cv_path_rm, rm, missing_rm)
-if test "$db_cv_path_rm" = missing_rm; then
- AC_MSG_ERROR([No rm utility found.])
-fi
-AC_PATH_PROG(db_cv_path_sh, sh, missing_sh)
-if test "$db_cv_path_sh" = missing_sh; then
- AC_MSG_ERROR([No sh utility found.])
-fi
-AC_PATH_PROG(db_cv_path_strip, strip, missing_strip)
-if test "$db_cv_path_strip" = missing_strip; then
- AC_MSG_ERROR([No strip utility found.])
-fi
-
-dnl Check for programs used in testing.
-if test "$db_cv_test" = "yes"; then
- AC_PATH_PROG(db_cv_path_kill, kill, missing_kill)
- if test "$db_cv_path_kill" = missing_kill; then
- AC_MSG_ERROR([No kill utility found.])
- fi
-fi
-
-])dnl
diff --git a/bdb/dist/aclocal/sosuffix.ac b/bdb/dist/aclocal/sosuffix.ac
new file mode 100644
index 00000000000..1197128293b
--- /dev/null
+++ b/bdb/dist/aclocal/sosuffix.ac
@@ -0,0 +1,69 @@
+# $Id: sosuffix.ac,v 1.1 2002/07/08 13:15:05 dda Exp $
+# Determine shared object suffixes.
+#
+# Our method is to use the libtool variable $library_names_spec,
+# set by using AC_PROG_LIBTOOL. This variable is a snippet of shell
+# defined in terms of $versuffix, $release, $libname, $module and $jnimodule.
+# We want to eval it and grab the suffix used for shared objects.
+# By setting $module and $jnimodule to yes/no, we obtain the suffixes
+# used to create dlloadable, or java loadable modules.
+# On many (*nix) systems, these all evaluate to .so, but there
+# are some notable exceptions.
+
+# This macro is used internally to discover the suffix for the current
+# settings of $module and $jnimodule. The result is stored in $_SOSUFFIX.
+AC_DEFUN(_SOSUFFIX_INTERNAL, [
+ versuffix=""
+ release=""
+ libname=libfoo
+ eval library_names=\"$library_names_spec\"
+ _SOSUFFIX=`echo "$library_names" | sed -e 's/.*\.\([[a-zA-Z0-9_]]*\).*/\1/'`
+ if test "$_SOSUFFIX" = '' ; then
+ _SOSUFFIX=so
+ if test "$enable_shared" = "yes" && test "$_SOSUFFIX_MESSAGE" = ""; then
+ _SOSUFFIX_MESSAGE=yes
+ AC_MSG_WARN([libtool may not know about this architecture.])
+ AC_MSG_WARN([assuming .$_SUFFIX suffix for dynamic libraries.])
+ fi
+ fi
+])
+
+# SOSUFFIX_CONFIG will set the variable SOSUFFIX to be the
+# shared library extension used for general linking, not dlopen.
+AC_DEFUN(SOSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([SOSUFFIX from libtool])
+ module=no
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ SOSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($SOSUFFIX)
+ AC_SUBST(SOSUFFIX)
+])
+
+# MODSUFFIX_CONFIG will set the variable MODSUFFIX to be the
+# shared library extension used for dlopen'ed modules.
+# To discover this, we set $module, simulating libtool's -module option.
+AC_DEFUN(MODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([MODSUFFIX from libtool])
+ module=yes
+ jnimodule=no
+ _SOSUFFIX_INTERNAL
+ MODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($MODSUFFIX)
+ AC_SUBST(MODSUFFIX)
+])
+
+# JMODSUFFIX_CONFIG will set the variable JMODSUFFIX to be the
+# shared library extension used JNI modules opened by Java.
+# To discover this, we set $jnimodule, simulating libtool's -jnimodule option.
+# -jnimodule is currently a Sleepycat local extension to libtool.
+AC_DEFUN(JMODSUFFIX_CONFIG, [
+ AC_MSG_CHECKING([JMODSUFFIX from libtool])
+ module=yes
+ jnimodule=yes
+ _SOSUFFIX_INTERNAL
+ JMODSUFFIX=$_SOSUFFIX
+ AC_MSG_RESULT($JMODSUFFIX)
+ AC_SUBST(JMODSUFFIX)
+])
+
diff --git a/bdb/dist/aclocal/tcl.ac b/bdb/dist/aclocal/tcl.ac
new file mode 100644
index 00000000000..80ed19c5a97
--- /dev/null
+++ b/bdb/dist/aclocal/tcl.ac
@@ -0,0 +1,136 @@
+# $Id: tcl.ac,v 11.14 2002/09/07 17:25:58 dda Exp $
+
+# The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
+# 8.3.0 distribution, with some minor changes. For this reason, license
+# terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as
+# follows (copied from the license.terms file in the Tcl 8.3 distribution):
+#
+# This software is copyrighted by the Regents of the University of
+# California, Sun Microsystems, Inc., Scriptics Corporation,
+# and other parties. The following terms apply to all files associated
+# with the software unless explicitly disclaimed in individual files.
+#
+# The authors hereby grant permission to use, copy, modify, distribute,
+# and license this software and its documentation for any purpose, provided
+# that existing copyright notices are retained in all copies and that this
+# notice is included verbatim in any distributions. No written agreement,
+# license, or royalty fee is required for any of the authorized uses.
+# Modifications to this software may be copyrighted by their authors
+# and need not follow the licensing terms described here, provided that
+# the new terms are clearly indicated on the first page of each file where
+# they apply.
+#
+# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+# MODIFICATIONS.
+#
+# GOVERNMENT USE: If you are acquiring this software on behalf of the
+# U.S. government, the Government shall have only "Restricted Rights"
+# in the software and related documentation as defined in the Federal
+# Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+# are acquiring the software on behalf of the Department of Defense, the
+# software shall be classified as "Commercial Computer Software" and the
+# Government shall have only "Restricted Rights" as defined in Clause
+# 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
+# authors grant the U.S. Government and others acting in its behalf
+# permission to use and distribute the software in accordance with the
+# terms specified in this license.
+
+AC_DEFUN(SC_PATH_TCLCONFIG, [
+ AC_CACHE_VAL(ac_cv_c_tclconfig,[
+
+ # First check to see if --with-tclconfig was specified.
+ if test "${with_tclconfig}" != no; then
+ if test -f "${with_tclconfig}/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+ else
+ AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
+ fi
+ fi
+
+ # check in a few common install locations
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ for i in `ls -d /usr/local/lib 2>/dev/null` ; do
+ if test -f "$i/tclConfig.sh" ; then
+ ac_cv_c_tclconfig=`(cd $i; pwd)`
+ break
+ fi
+ done
+ fi
+
+ ])
+
+ if test x"${ac_cv_c_tclconfig}" = x ; then
+ TCL_BIN_DIR="# no Tcl configs found"
+ AC_MSG_ERROR(can't find Tcl configuration definitions)
+ else
+ TCL_BIN_DIR=${ac_cv_c_tclconfig}
+ fi
+])
+
+AC_DEFUN(SC_LOAD_TCLCONFIG, [
+ AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
+
+ if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+ AC_MSG_RESULT([loading])
+ . $TCL_BIN_DIR/tclConfig.sh
+ else
+ AC_MSG_RESULT([file not found])
+ fi
+
+ #
+ # The eval is required to do the TCL_DBGX substitution in the
+ # TCL_LIB_FILE variable
+ #
+ eval TCL_LIB_FILE="${TCL_LIB_FILE}"
+ eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
+ eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
+
+ #
+ # If the DB Tcl library isn't loaded with the Tcl spec and library
+ # flags on AIX, the resulting libdb_tcl-X.Y.so.0 will drop core at
+ # load time. [#4843] Furthermore, with Tcl 8.3, the link flags
+ # given by the Tcl spec are insufficient for our use. [#5779]
+ #
+ case "$host_os" in
+ aix4.[[2-9]].*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG"
+ LIBTSO_LIBS="$LIBTSO_LIBS -L$TCL_EXEC_PREFIX/lib -ltcl$TCL_VERSION";;
+ aix*)
+ LIBTSO_LIBS="$LIBTSO_LIBS $TCL_LIB_SPEC $TCL_LIB_FLAG";;
+ esac
+ AC_SUBST(TCL_BIN_DIR)
+ AC_SUBST(TCL_SRC_DIR)
+ AC_SUBST(TCL_LIB_FILE)
+
+ AC_SUBST(TCL_TCLSH)
+ TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
+])
+
+# Optional Tcl API.
+AC_DEFUN(AM_TCL_LOAD, [
+if test "$db_cv_tcl" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Tcl requires shared libraries])
+ fi
+
+ AC_SUBST(TCFLAGS)
+
+ SC_PATH_TCLCONFIG
+ SC_LOAD_TCLCONFIG
+
+ if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
+ TCFLAGS="-I$TCL_PREFIX/include"
+ fi
+
+ INSTALL_LIBS="${INSTALL_LIBS} \$(libtso_target)"
+fi])
diff --git a/bdb/dist/aclocal/tcl.m4 b/bdb/dist/aclocal/tcl.m4
deleted file mode 100644
index 3d0aec2e8ff..00000000000
--- a/bdb/dist/aclocal/tcl.m4
+++ /dev/null
@@ -1,126 +0,0 @@
-dnl $Id: tcl.m4,v 11.5 2000/06/27 13:21:28 bostic Exp $
-
-dnl The SC_* macros in this file are from the unix/tcl.m4 files in the Tcl
-dnl 8.3.0 distribution, with some minor changes. For this reason, license
-dnl terms for the Berkeley DB distribution dist/aclocal/tcl.m4 file are as
-dnl follows (copied from the license.terms file in the Tcl 8.3 distribution):
-dnl
-dnl This software is copyrighted by the Regents of the University of
-dnl California, Sun Microsystems, Inc., Scriptics Corporation,
-dnl and other parties. The following terms apply to all files associated
-dnl with the software unless explicitly disclaimed in individual files.
-dnl
-dnl The authors hereby grant permission to use, copy, modify, distribute,
-dnl and license this software and its documentation for any purpose, provided
-dnl that existing copyright notices are retained in all copies and that this
-dnl notice is included verbatim in any distributions. No written agreement,
-dnl license, or royalty fee is required for any of the authorized uses.
-dnl Modifications to this software may be copyrighted by their authors
-dnl and need not follow the licensing terms described here, provided that
-dnl the new terms are clearly indicated on the first page of each file where
-dnl they apply.
-dnl
-dnl IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
-dnl FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
-dnl ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
-dnl DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
-dnl POSSIBILITY OF SUCH DAMAGE.
-dnl
-dnl THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
-dnl INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
-dnl FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
-dnl IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
-dnl NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
-dnl MODIFICATIONS.
-dnl
-dnl GOVERNMENT USE: If you are acquiring this software on behalf of the
-dnl U.S. government, the Government shall have only "Restricted Rights"
-dnl in the software and related documentation as defined in the Federal
-dnl Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
-dnl are acquiring the software on behalf of the Department of Defense, the
-dnl software shall be classified as "Commercial Computer Software" and the
-dnl Government shall have only "Restricted Rights" as defined in Clause
-dnl 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
-dnl authors grant the U.S. Government and others acting in its behalf
-dnl permission to use and distribute the software in accordance with the
-dnl terms specified in this license.
-
-AC_DEFUN(SC_PATH_TCLCONFIG, [
- AC_CACHE_VAL(ac_cv_c_tclconfig,[
-
- # First check to see if --with-tclconfig was specified.
- if test "${with_tclconfig}" != no; then
- if test -f "${with_tclconfig}/tclConfig.sh" ; then
- ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
- else
- AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
- fi
- fi
-
- # check in a few common install locations
- if test x"${ac_cv_c_tclconfig}" = x ; then
- for i in `ls -d /usr/local/lib 2>/dev/null` ; do
- if test -f "$i/tclConfig.sh" ; then
- ac_cv_c_tclconfig=`(cd $i; pwd)`
- break
- fi
- done
- fi
-
- ])
-
- if test x"${ac_cv_c_tclconfig}" = x ; then
- TCL_BIN_DIR="# no Tcl configs found"
- AC_MSG_ERROR(can't find Tcl configuration definitions)
- else
- TCL_BIN_DIR=${ac_cv_c_tclconfig}
- fi
-])
-
-AC_DEFUN(SC_LOAD_TCLCONFIG, [
- AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
-
- if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
- AC_MSG_RESULT([loading])
- . $TCL_BIN_DIR/tclConfig.sh
- else
- AC_MSG_RESULT([file not found])
- fi
-
- #
- # The eval is required to do the TCL_DBGX substitution in the
- # TCL_LIB_FILE variable
- #
- eval TCL_LIB_FILE="${TCL_LIB_FILE}"
- eval TCL_LIB_FLAG="${TCL_LIB_FLAG}"
- eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\""
-
- AC_SUBST(TCL_BIN_DIR)
- AC_SUBST(TCL_SRC_DIR)
- AC_SUBST(TCL_LIB_FILE)
-
- AC_SUBST(TCL_TCLSH)
- TCL_TCLSH="${TCL_PREFIX}/bin/tclsh${TCL_VERSION}"
-])
-
-dnl Optional Tcl API.
-AC_DEFUN(AM_TCL_LOAD, [
-if test "$db_cv_tcl" != no; then
- if test "$db_cv_dynamic" != "yes"; then
- AC_MSG_ERROR([--with-tcl requires --enable-dynamic])
- fi
-
- AC_SUBST(TCFLAGS)
-
- SC_PATH_TCLCONFIG
- SC_LOAD_TCLCONFIG
-
- if test x"$TCL_PREFIX" != x && test -f "$TCL_PREFIX/include/tcl.h"; then
- TCFLAGS="-I$TCL_PREFIX/include"
- fi
-
- LIBS="$LIBS $TCL_LIB_SPEC $TCL_LIBS"
-
- ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libtso_target)"
- DEFAULT_INSTALL="${DEFAULT_INSTALL} install_tcl"
-fi])
diff --git a/bdb/dist/aclocal/types.ac b/bdb/dist/aclocal/types.ac
new file mode 100644
index 00000000000..db8aaac6884
--- /dev/null
+++ b/bdb/dist/aclocal/types.ac
@@ -0,0 +1,146 @@
+# $Id: types.ac,v 11.10 2001/12/10 14:16:49 bostic Exp $
+
+# db.h includes <sys/types.h> and <stdio.h>, not the other default includes
+# autoconf usually includes. For that reason, we specify a set of includes
+# for all type checking tests. [#5060]
+AC_DEFUN(DB_INCLUDES, [[
+#include <sys/types.h>
+#include <stdio.h>]])
+
+# Check the sizes we know about, and see if any of them match what's needed.
+#
+# Prefer ints to anything else, because read, write and others historically
+# returned an int.
+AC_DEFUN(AM_SEARCH_USIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_unsigned_int")
+ $1="typedef unsigned int $2;";;
+ "$ac_cv_sizeof_unsigned_char")
+ $1="typedef unsigned char $2;";;
+ "$ac_cv_sizeof_unsigned_short")
+ $1="typedef unsigned short $2;";;
+ "$ac_cv_sizeof_unsigned_long")
+ $1="typedef unsigned long $2;";;
+ *)
+ AC_MSG_ERROR([No unsigned $3-byte integral type]);;
+ esac])
+AC_DEFUN(AM_SEARCH_SSIZES, [
+ case "$3" in
+ "$ac_cv_sizeof_int")
+ $1="typedef int $2;";;
+ "$ac_cv_sizeof_char")
+ $1="typedef char $2;";;
+ "$ac_cv_sizeof_short")
+ $1="typedef short $2;";;
+ "$ac_cv_sizeof_long")
+ $1="typedef long $2;";;
+ *)
+ AC_MSG_ERROR([No signed $3-byte integral type]);;
+ esac])
+
+# Check for the standard system types.
+AC_DEFUN(AM_TYPES, [
+
+# We need to know the sizes of various objects on this system.
+# We don't use the SIZEOF_XXX values created by autoconf.
+AC_CHECK_SIZEOF(char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned char,, DB_INCLUDES)
+AC_CHECK_SIZEOF(short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned short,, DB_INCLUDES)
+AC_CHECK_SIZEOF(int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned int,, DB_INCLUDES)
+AC_CHECK_SIZEOF(long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(unsigned long,, DB_INCLUDES)
+AC_CHECK_SIZEOF(size_t,, DB_INCLUDES)
+AC_CHECK_SIZEOF(char *,, DB_INCLUDES)
+
+# We require off_t and size_t, and we don't try to substitute our own
+# if we can't find them.
+AC_CHECK_TYPE(off_t,,, DB_INCLUDES)
+if test "$ac_cv_type_off_t" = no; then
+ AC_MSG_ERROR([No off_t type.])
+fi
+
+AC_CHECK_TYPE(size_t,,, DB_INCLUDES)
+if test "$ac_cv_type_size_t" = no; then
+ AC_MSG_ERROR([No size_t type.])
+fi
+
+# We look for u_char, u_short, u_int, u_long -- if we can't find them,
+# we create our own.
+AC_SUBST(u_char_decl)
+AC_CHECK_TYPE(u_char,,, DB_INCLUDES)
+if test "$ac_cv_type_u_char" = no; then
+ u_char_decl="typedef unsigned char u_char;"
+fi
+
+AC_SUBST(u_short_decl)
+AC_CHECK_TYPE(u_short,,, DB_INCLUDES)
+if test "$ac_cv_type_u_short" = no; then
+ u_short_decl="typedef unsigned short u_short;"
+fi
+
+AC_SUBST(u_int_decl)
+AC_CHECK_TYPE(u_int,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int" = no; then
+ u_int_decl="typedef unsigned int u_int;"
+fi
+
+AC_SUBST(u_long_decl)
+AC_CHECK_TYPE(u_long,,, DB_INCLUDES)
+if test "$ac_cv_type_u_long" = no; then
+ u_long_decl="typedef unsigned long u_long;"
+fi
+
+AC_SUBST(u_int8_decl)
+AC_CHECK_TYPE(u_int8_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int8_t" = no; then
+ AM_SEARCH_USIZES(u_int8_decl, u_int8_t, 1)
+fi
+
+AC_SUBST(u_int16_decl)
+AC_CHECK_TYPE(u_int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int16_t" = no; then
+ AM_SEARCH_USIZES(u_int16_decl, u_int16_t, 2)
+fi
+
+AC_SUBST(int16_decl)
+AC_CHECK_TYPE(int16_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int16_t" = no; then
+ AM_SEARCH_SSIZES(int16_decl, int16_t, 2)
+fi
+
+AC_SUBST(u_int32_decl)
+AC_CHECK_TYPE(u_int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_u_int32_t" = no; then
+ AM_SEARCH_USIZES(u_int32_decl, u_int32_t, 4)
+fi
+
+AC_SUBST(int32_decl)
+AC_CHECK_TYPE(int32_t,,, DB_INCLUDES)
+if test "$ac_cv_type_int32_t" = no; then
+ AM_SEARCH_SSIZES(int32_decl, int32_t, 4)
+fi
+
+# Check for ssize_t -- if none exists, find a signed integral type that's
+# the same size as a size_t.
+AC_SUBST(ssize_t_decl)
+AC_CHECK_TYPE(ssize_t,,, DB_INCLUDES)
+if test "$ac_cv_type_ssize_t" = no; then
+ AM_SEARCH_SSIZES(ssize_t_decl, ssize_t, $ac_cv_sizeof_size_t)
+fi
+
+# Find the largest integral type.
+AC_SUBST(db_align_t_decl)
+AC_CHECK_TYPE(unsigned long long,,, DB_INCLUDES)
+if test "$ac_cv_type_unsigned_long_long" = no; then
+ db_align_t_decl="typedef unsigned long db_align_t;"
+else
+ db_align_t_decl="typedef unsigned long long db_align_t;"
+fi
+
+# Find an integral type which is the same size as a pointer.
+AC_SUBST(db_alignp_t_decl)
+AM_SEARCH_USIZES(db_alignp_t_decl, db_alignp_t, $ac_cv_sizeof_char_p)
+
+])
diff --git a/bdb/dist/aclocal/types.m4 b/bdb/dist/aclocal/types.m4
deleted file mode 100644
index a9a03ab6d87..00000000000
--- a/bdb/dist/aclocal/types.m4
+++ /dev/null
@@ -1,139 +0,0 @@
-dnl $Id: types.m4,v 11.4 1999/12/04 19:18:28 bostic Exp $
-
-dnl Check for the standard shorthand types.
-AC_DEFUN(AM_SHORTHAND_TYPES, [dnl
-
-AC_SUBST(ssize_t_decl)
-AC_CACHE_CHECK([for ssize_t], db_cv_ssize_t, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], ssize_t foo;,
- [db_cv_ssize_t=yes], [db_cv_ssize_t=no])])
-if test "$db_cv_ssize_t" = no; then
- ssize_t_decl="typedef int ssize_t;"
-fi
-
-AC_SUBST(u_char_decl)
-AC_CACHE_CHECK([for u_char], db_cv_uchar, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_char foo;,
- [db_cv_uchar=yes], [db_cv_uchar=no])])
-if test "$db_cv_uchar" = no; then
- u_char_decl="typedef unsigned char u_char;"
-fi
-
-AC_SUBST(u_short_decl)
-AC_CACHE_CHECK([for u_short], db_cv_ushort, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_short foo;,
- [db_cv_ushort=yes], [db_cv_ushort=no])])
-if test "$db_cv_ushort" = no; then
- u_short_decl="typedef unsigned short u_short;"
-fi
-
-AC_SUBST(u_int_decl)
-AC_CACHE_CHECK([for u_int], db_cv_uint, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_int foo;,
- [db_cv_uint=yes], [db_cv_uint=no])])
-if test "$db_cv_uint" = no; then
- u_int_decl="typedef unsigned int u_int;"
-fi
-
-AC_SUBST(u_long_decl)
-AC_CACHE_CHECK([for u_long], db_cv_ulong, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_long foo;,
- [db_cv_ulong=yes], [db_cv_ulong=no])])
-if test "$db_cv_ulong" = no; then
- u_long_decl="typedef unsigned long u_long;"
-fi
-
-dnl DB/Vi use specific integer sizes.
-AC_SUBST(u_int8_decl)
-AC_CACHE_CHECK([for u_int8_t], db_cv_uint8, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_int8_t foo;,
- [db_cv_uint8=yes],
- AC_TRY_RUN([main(){exit(sizeof(unsigned char) != 1);}],
- [db_cv_uint8="unsigned char"], [db_cv_uint8=no]))])
-if test "$db_cv_uint8" = no; then
- AC_MSG_ERROR(No unsigned 8-bit integral type.)
-fi
-if test "$db_cv_uint8" != yes; then
- u_int8_decl="typedef $db_cv_uint8 u_int8_t;"
-fi
-
-AC_SUBST(u_int16_decl)
-AC_CACHE_CHECK([for u_int16_t], db_cv_uint16, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_int16_t foo;,
- [db_cv_uint16=yes],
-AC_TRY_RUN([main(){exit(sizeof(unsigned short) != 2);}],
- [db_cv_uint16="unsigned short"],
-AC_TRY_RUN([main(){exit(sizeof(unsigned int) != 2);}],
- [db_cv_uint16="unsigned int"], [db_cv_uint16=no])))])
-if test "$db_cv_uint16" = no; then
- AC_MSG_ERROR([No unsigned 16-bit integral type.])
-fi
-if test "$db_cv_uint16" != yes; then
- u_int16_decl="typedef $db_cv_uint16 u_int16_t;"
-fi
-
-AC_SUBST(int16_decl)
-AC_CACHE_CHECK([for int16_t], db_cv_int16, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], int16_t foo;,
- [db_cv_int16=yes],
-AC_TRY_RUN([main(){exit(sizeof(short) != 2);}],
- [db_cv_int16="short"],
-AC_TRY_RUN([main(){exit(sizeof(int) != 2);}],
- [db_cv_int16="int"], [db_cv_int16=no])))])
-if test "$db_cv_int16" = no; then
- AC_MSG_ERROR([No signed 16-bit integral type.])
-fi
-if test "$db_cv_int16" != yes; then
- int16_decl="typedef $db_cv_int16 int16_t;"
-fi
-
-AC_SUBST(u_int32_decl)
-AC_CACHE_CHECK([for u_int32_t], db_cv_uint32, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], u_int32_t foo;,
- [db_cv_uint32=yes],
-AC_TRY_RUN([main(){exit(sizeof(unsigned int) != 4);}],
- [db_cv_uint32="unsigned int"],
-AC_TRY_RUN([main(){exit(sizeof(unsigned long) != 4);}],
- [db_cv_uint32="unsigned long"], [db_cv_uint32=no])))])
-if test "$db_cv_uint32" = no; then
- AC_MSG_ERROR([No unsigned 32-bit integral type.])
-fi
-if test "$db_cv_uint32" != yes; then
- u_int32_decl="typedef $db_cv_uint32 u_int32_t;"
-fi
-
-AC_SUBST(int32_decl)
-AC_CACHE_CHECK([for int32_t], db_cv_int32, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], int32_t foo;,
- [db_cv_int32=yes],
-AC_TRY_RUN([main(){exit(sizeof(int) != 4);}],
- [db_cv_int32="int"],
-AC_TRY_RUN([main(){exit(sizeof(long) != 4);}],
- [db_cv_int32="long"], [db_cv_int32=no])))])
-if test "$db_cv_int32" = no; then
- AC_MSG_ERROR([No signed 32-bit integral type.])
-fi
-if test "$db_cv_int32" != yes; then
- int32_decl="typedef $db_cv_int32 int32_t;"
-fi
-
-dnl Figure out largest integral type.
-AC_SUBST(db_align_t_decl)
-AC_CACHE_CHECK([for largest integral type], db_cv_align_t, [dnl
-AC_TRY_COMPILE([#include <sys/types.h>], long long foo;,
- [db_cv_align_t="unsigned long long"], [db_cv_align_t="unsigned long"])])
-db_align_t_decl="typedef $db_cv_align_t db_align_t;"
-
-dnl Figure out integral type the same size as a pointer.
-AC_SUBST(db_alignp_t_decl)
-AC_CACHE_CHECK([for integral type equal to pointer size], db_cv_alignp_t, [dnl
-db_cv_alignp_t=$db_cv_align_t
-AC_TRY_RUN([main(){exit(sizeof(unsigned int) != sizeof(char *));}],
- [db_cv_alignp_t="unsigned int"])
-AC_TRY_RUN([main(){exit(sizeof(unsigned long) != sizeof(char *));}],
- [db_cv_alignp_t="unsigned long"])
-AC_TRY_RUN([main(){exit(sizeof(unsigned long long) != sizeof(char *));}],
- [db_cv_alignp_t="unsigned long long"])])
-db_alignp_t_decl="typedef $db_cv_alignp_t db_alignp_t;"
-
-])dnl
diff --git a/bdb/dist/aclocal_java/ac_check_class.ac b/bdb/dist/aclocal_java/ac_check_class.ac
new file mode 100644
index 00000000000..915198af567
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_check_class.ac
@@ -0,0 +1,107 @@
+dnl @synopsis AC_CHECK_CLASS
+dnl
+dnl AC_CHECK_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_class.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_CLASS],[
+AC_REQUIRE([AC_PROG_JAVA])
+ac_var_name=`echo $1 | sed 's/\./_/g'`
+dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is
+dnl dynamic I need an extra level of extraction
+AC_MSG_CHECKING([for $1 class])
+AC_CACHE_VAL(ac_cv_class_$ac_var_name, [
+if test x$ac_cv_prog_uudecode_base64 = xyes; then
+dnl /**
+dnl * Test.java: used to test dynamicaly if a class exists.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl Class lib;
+dnl if (argv.length < 1)
+dnl {
+dnl System.err.println ("Missing argument");
+dnl System.exit (77);
+dnl }
+dnl try
+dnl {
+dnl lib = Class.forName (argv[0]);
+dnl }
+dnl catch (ClassNotFoundException e)
+dnl {
+dnl System.exit (1);
+dnl }
+dnl lib = null;
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ
+AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt
+ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV
+ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp
+VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM
+amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi
+AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B
+AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA
+AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN
+uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK
+AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA
+JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA
+JwAAAAIAKA==
+====
+EOF
+ if uudecode$EXEEXT Test.uue; then
+ :
+ else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+ fi
+ rm -f Test.uue
+ if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then
+ eval "ac_cv_class_$ac_var_name=yes"
+ else
+ eval "ac_cv_class_$ac_var_name=no"
+ fi
+ rm -f Test.class
+else
+ AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"],
+ [eval "ac_cv_class_$ac_var_name=no"])
+fi
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`"
+HAVE_LAST_CLASS=$ac_var_val
+if test x$ac_var_val = xyes; then
+ ifelse([$2], , :, [$2])
+else
+ ifelse([$3], , :, [$3])
+fi
+])
+dnl for some reason the above statment didn't fall though here?
+dnl do scripts have variable scoping?
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+AC_MSG_RESULT($ac_var_val)
+])
diff --git a/bdb/dist/aclocal_java/ac_check_classpath.ac b/bdb/dist/aclocal_java/ac_check_classpath.ac
new file mode 100644
index 00000000000..4a78d0f8785
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_check_classpath.ac
@@ -0,0 +1,23 @@
+dnl @synopsis AC_CHECK_CLASSPATH
+dnl
+dnl AC_CHECK_CLASSPATH just displays the CLASSPATH, for the edification
+dnl of the user.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_classpath.ac,v 1.1 2001/08/23 16:58:42 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_CLASSPATH],[
+if test "x$CLASSPATH" = x; then
+ echo "You have no CLASSPATH, I hope it is good"
+else
+ echo "You have CLASSPATH $CLASSPATH, hope it is correct"
+fi
+])
diff --git a/bdb/dist/aclocal_java/ac_check_junit.ac b/bdb/dist/aclocal_java/ac_check_junit.ac
new file mode 100644
index 00000000000..3b81d1dc3fc
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_check_junit.ac
@@ -0,0 +1,54 @@
+dnl @synopsis AC_CHECK_JUNIT
+dnl
+dnl AC_CHECK_JUNIT tests the availability of the Junit testing
+dnl framework, and set some variables for conditional compilation
+dnl of the test suite by automake.
+dnl
+dnl If available, JUNIT is set to a command launching the text
+dnl based user interface of Junit, @JAVA_JUNIT@ is set to $JAVA_JUNIT
+dnl and @TESTS_JUNIT@ is set to $TESTS_JUNIT, otherwise they are set
+dnl to empty values.
+dnl
+dnl You can use these variables in your Makefile.am file like this :
+dnl
+dnl # Some of the following classes are built only if junit is available
+dnl JAVA_JUNIT = Class1Test.java Class2Test.java AllJunitTests.java
+dnl
+dnl noinst_JAVA = Example1.java Example2.java @JAVA_JUNIT@
+dnl
+dnl EXTRA_JAVA = $(JAVA_JUNIT)
+dnl
+dnl TESTS_JUNIT = AllJunitTests
+dnl
+dnl TESTS = StandaloneTest1 StandaloneTest2 @TESTS_JUNIT@
+dnl
+dnl EXTRA_TESTS = $(TESTS_JUNIT)
+dnl
+dnl AllJunitTests :
+dnl echo "#! /bin/sh" > $@
+dnl echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@
+dnl chmod +x $@
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id: ac_check_junit.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_CHECK_JUNIT],[
+AC_CACHE_VAL(ac_cv_prog_JUNIT,[
+AC_CHECK_CLASS(junit.textui.TestRunner)
+if test x"`eval 'echo $ac_cv_class_junit_textui_TestRunner'`" != xno ; then
+ ac_cv_prog_JUNIT='$(CLASSPATH_ENV) $(JAVA) $(JAVAFLAGS) junit.textui.TestRunner'
+fi])
+AC_MSG_CHECKING([for junit])
+if test x"`eval 'echo $ac_cv_prog_JUNIT'`" != x ; then
+ JUNIT="$ac_cv_prog_JUNIT"
+ JAVA_JUNIT='$(JAVA_JUNIT)'
+ TESTS_JUNIT='$(TESTS_JUNIT)'
+else
+ JUNIT=
+ JAVA_JUNIT=
+ TESTS_JUNIT=
+fi
+AC_MSG_RESULT($JAVA_JUNIT)
+AC_SUBST(JUNIT)
+AC_SUBST(JAVA_JUNIT)
+AC_SUBST(TESTS_JUNIT)])
diff --git a/bdb/dist/aclocal_java/ac_check_rqrd_class.ac b/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
new file mode 100644
index 00000000000..ab62e33c887
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_check_rqrd_class.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_CHECK_RQRD_CLASS
+dnl
+dnl AC_CHECK_RQRD_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file and fails if it doesn't exist.
+dnl Its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_check_rqrd_class.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+
+AC_DEFUN([AC_CHECK_RQRD_CLASS],[
+CLASS=`echo $1|sed 's/\./_/g'`
+AC_CHECK_CLASS($1)
+if test "$HAVE_LAST_CLASS" = "no"; then
+ AC_MSG_ERROR([Required class $1 missing, exiting.])
+fi
+])
diff --git a/bdb/dist/aclocal_java/ac_java_options.ac b/bdb/dist/aclocal_java/ac_java_options.ac
new file mode 100644
index 00000000000..567afca7fa5
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_java_options.ac
@@ -0,0 +1,32 @@
+dnl @synopsis AC_JAVA_OPTIONS
+dnl
+dnl AC_JAVA_OPTIONS adds configure command line options used for Java m4
+dnl macros. This Macro is optional.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_java_options.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_JAVA_OPTIONS],[
+AC_ARG_WITH(java-prefix,
+ [ --with-java-prefix=PFX prefix where Java runtime is installed (optional)])
+AC_ARG_WITH(javac-flags,
+ [ --with-javac-flags=FLAGS flags to pass to the Java compiler (optional)])
+AC_ARG_WITH(java-flags,
+ [ --with-java-flags=FLAGS flags to pass to the Java VM (optional)])
+JAVAPREFIX=$with_java_prefix
+JAVACFLAGS=$with_javac_flags
+JAVAFLAGS=$with_java_flags
+AC_SUBST(JAVAPREFIX)dnl
+AC_SUBST(JAVACFLAGS)dnl
+AC_SUBST(JAVAFLAGS)dnl
+AC_SUBST(JAVA)dnl
+AC_SUBST(JAVAC)dnl
+])
diff --git a/bdb/dist/aclocal_java/ac_jni_include_dirs.ac b/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
new file mode 100644
index 00000000000..65cfbbfd13e
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_jni_include_dirs.ac
@@ -0,0 +1,112 @@
+dnl @synopsis AC_JNI_INCLUDE_DIR
+dnl
+dnl AC_JNI_INCLUDE_DIR finds include directories needed
+dnl for compiling programs using the JNI interface.
+dnl
+dnl JNI include directories are usually in the java distribution
+dnl This is deduced from the value of JAVAC. When this macro
+dnl completes, a list of directories is left in the variable
+dnl JNI_INCLUDE_DIRS.
+dnl
+dnl Example usage follows:
+dnl
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+dnl do
+dnl CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+dnl done
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl Note: This macro can work with the autoconf M4 macros for Java programs.
+dnl This particular macro is not part of the original set of macros.
+dnl
+dnl @author Don Anderson <dda@sleepycat.com>
+dnl @version $Id: ac_jni_include_dirs.ac,v 1.8 2002/09/04 21:27:30 dda Exp $
+dnl
+AC_DEFUN(AC_JNI_INCLUDE_DIR,[
+
+JNI_INCLUDE_DIRS=""
+
+test "x$JAVAC" = x && AC_MSG_ERROR(['$JAVAC' undefined])
+AC_PATH_PROG(_ACJNI_JAVAC, $JAVAC, $JAVAC)
+test ! -x "$_ACJNI_JAVAC" && AC_MSG_ERROR([$JAVAC could not be found in path])
+AC_MSG_CHECKING(absolute path of $JAVAC)
+case "$_ACJNI_JAVAC" in
+/*) AC_MSG_RESULT($_ACJNI_JAVAC);;
+*) AC_MSG_ERROR([$_ACJNI_JAVAC is not an absolute path name]);;
+esac
+
+_ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC")
+_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
+case "$host_os" in
+ darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ _JINC="$_JTOPDIR/Headers";;
+ *) _JINC="$_JTOPDIR/include";;
+esac
+
+# If we find jni.h in /usr/include, then it's not a java-only tree, so
+# don't add /usr/include or subdirectories to the list of includes.
+# An extra -I/usr/include can foul things up with newer gcc's.
+if test -f "$_JINC/jni.h"; then
+ if test "$_JINC" != "/usr/include"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC"
+ fi
+else
+ _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ if test -f "$_JTOPDIR/include/jni.h"; then
+ if test "$_JTOPDIR" != "/usr"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include"
+ fi
+ else
+ AC_MSG_ERROR([cannot find java include files])
+ fi
+fi
+
+# get the likely subdirectories for system specific java includes
+if test "$_JTOPDIR" != "/usr"; then
+ case "$host_os" in
+ aix*) _JNI_INC_SUBDIRS="aix";;
+ bsdi*) _JNI_INC_SUBDIRS="bsdos";;
+ linux*) _JNI_INC_SUBDIRS="linux genunix";;
+ osf*) _JNI_INC_SUBDIRS="alpha";;
+ solaris*) _JNI_INC_SUBDIRS="solaris";;
+ *) _JNI_INC_SUBDIRS="genunix";;
+ esac
+fi
+
+# add any subdirectories that are present
+for _JINCSUBDIR in $_JNI_INC_SUBDIRS
+do
+ if test -d "$_JTOPDIR/include/$_JINCSUBDIR"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$_JINCSUBDIR"
+ fi
+done
+])
+
+# _ACJNI_FOLLOW_SYMLINKS <path>
+# Follows symbolic links on <path>,
+# finally setting variable _ACJNI_FOLLOWED
+# --------------------
+AC_DEFUN(_ACJNI_FOLLOW_SYMLINKS,[
+# find the include directory relative to the javac executable
+_cur="$1"
+while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do
+ AC_MSG_CHECKING(symlink for $_cur)
+ _slink=`ls -ld "$_cur" | sed 's/.* -> //'`
+ case "$_slink" in
+ /*) _cur="$_slink";;
+ # 'X' avoids triggering unwanted echo options.
+ *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";;
+ esac
+ AC_MSG_RESULT($_cur)
+done
+_ACJNI_FOLLOWED="$_cur"
+])# _ACJNI
+
diff --git a/bdb/dist/aclocal_java/ac_prog_jar.ac b/bdb/dist/aclocal_java/ac_prog_jar.ac
new file mode 100644
index 00000000000..9dfa1be6dad
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_jar.ac
@@ -0,0 +1,36 @@
+dnl @synopsis AC_PROG_JAR
+dnl
+dnl AC_PROG_JAR tests for an existing jar program. It uses the environment
+dnl variable JAR then tests in sequence various common jar programs.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAR=yourcompiler before calling
+dnl AC_PROG_JAR
+dnl
+dnl - at the configure level, setenv JAR
+dnl
+dnl You can use the JAR variable in your Makefile.in, with @JAR@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id: ac_prog_jar.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAR],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT)
+else
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX)
+fi
+test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
diff --git a/bdb/dist/aclocal_java/ac_prog_java.ac b/bdb/dist/aclocal_java/ac_prog_java.ac
new file mode 100644
index 00000000000..8cb24445132
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_java.ac
@@ -0,0 +1,77 @@
+dnl @synopsis AC_PROG_JAVA
+dnl
+dnl Here is a summary of the main macros:
+dnl
+dnl AC_PROG_JAVAC: finds a Java compiler.
+dnl
+dnl AC_PROG_JAVA: finds a Java virtual machine.
+dnl
+dnl AC_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
+dnl
+dnl AC_CHECK_RQRD_CLASS: finds if we have the given class and stops otherwise.
+dnl
+dnl AC_TRY_COMPILE_JAVA: attempt to compile user given source.
+dnl
+dnl AC_TRY_RUN_JAVA: attempt to compile and run user given source.
+dnl
+dnl AC_JAVA_OPTIONS: adds Java configure options.
+dnl
+dnl AC_PROG_JAVA tests an existing Java virtual machine. It uses the
+dnl environment variable JAVA then tests in sequence various common Java
+dnl virtual machines. For political reasons, it starts with the free ones.
+dnl You *must* call [AC_PROG_JAVAC] before.
+dnl
+dnl If you want to force a specific VM:
+dnl
+dnl - at the configure.in level, set JAVA=yourvm before calling AC_PROG_JAVA
+dnl (but after AC_INIT)
+dnl
+dnl - at the configure level, setenv JAVA
+dnl
+dnl You can use the JAVA variable in your Makefile.in, with @JAVA@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude virtual machines (rationale: most Java programs
+dnl cannot run with some VM like kaffe).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl A Web page, with a link to the latest CVS snapshot is at
+dnl <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
+dnl
+dnl This is a sample configure.in
+dnl Process this file with autoconf to produce a configure script.
+dnl
+dnl AC_INIT(UnTag.java)
+dnl
+dnl dnl Checks for programs.
+dnl AC_CHECK_CLASSPATH
+dnl AC_PROG_JAVAC
+dnl AC_PROG_JAVA
+dnl
+dnl dnl Checks for classes
+dnl AC_CHECK_RQRD_CLASS(org.xml.sax.Parser)
+dnl AC_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
+dnl
+dnl AC_OUTPUT(Makefile)
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_java.ac,v 1.1 2001/08/23 16:58:43 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVA],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test x$JAVAPREFIX = x; then
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT)
+else
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT, $JAVAPREFIX)
+fi
+test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
+AC_PROG_JAVA_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/bdb/dist/aclocal_java/ac_prog_java_works.ac b/bdb/dist/aclocal_java/ac_prog_java_works.ac
new file mode 100644
index 00000000000..36acd2676fa
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_java_works.ac
@@ -0,0 +1,97 @@
+dnl @synopsis AC_PROG_JAVA_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_java_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVA_WORKS], [
+AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes)
+if test x$uudecode = xyes; then
+AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [
+dnl /**
+dnl * Test.java: used to test if java compiler works.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s
+YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG
+aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB
+AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB
+AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ=
+====
+EOF
+if uudecode$EXEEXT Test.uue; then
+ ac_cv_prog_uudecode_base64=yes
+else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+fi
+rm -f Test.uue])
+fi
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ rm -f Test.class
+ AC_MSG_WARN([I have to compile Test.class from scratch])
+ if test x$ac_cv_prog_javac_works = xno; then
+ AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly])
+ fi
+ if test x$ac_cv_prog_javac_works = x; then
+ AC_PROG_JAVAC
+ fi
+fi
+AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+TEST=Test
+changequote(, )dnl
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+public static void main (String args[]) {
+ System.exit (0);
+} }
+EOF
+changequote([, ])dnl
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then
+ :
+ else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?))
+ fi
+fi
+if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then
+ ac_cv_prog_java_works=yes
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?))
+fi
+rm -fr $JAVA_TEST $CLASS_TEST Test.uue
+])
+AC_PROVIDE([$0])dnl
+]
+)
diff --git a/bdb/dist/aclocal_java/ac_prog_javac.ac b/bdb/dist/aclocal_java/ac_prog_javac.ac
new file mode 100644
index 00000000000..5ded7d1b7e6
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_javac.ac
@@ -0,0 +1,43 @@
+dnl @synopsis AC_PROG_JAVAC
+dnl
+dnl AC_PROG_JAVAC tests an existing Java compiler. It uses the environment
+dnl variable JAVAC then tests in sequence various common Java compilers. For
+dnl political reasons, it starts with the free ones.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_PROG_JAVAC
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl You can use the JAVAC variable in your Makefile.in, with @JAVAC@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude compilers (rationale: most Java programs cannot compile
+dnl with some compilers like guavac).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_javac.ac,v 1.3 2001/08/23 17:08:22 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT)
+else
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX)
+fi
+test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH])
+AC_PROG_JAVAC_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/bdb/dist/aclocal_java/ac_prog_javac_works.ac b/bdb/dist/aclocal_java/ac_prog_javac_works.ac
new file mode 100644
index 00000000000..139a99f989b
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_javac_works.ac
@@ -0,0 +1,35 @@
+dnl @synopsis AC_PROG_JAVAC_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version $Id: ac_prog_javac_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAC_WORKS],[
+AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then
+ ac_cv_prog_javac_works=yes
+else
+ AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)])
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+])
+AC_PROVIDE([$0])dnl
+])
diff --git a/bdb/dist/aclocal_java/ac_prog_javadoc.ac b/bdb/dist/aclocal_java/ac_prog_javadoc.ac
new file mode 100644
index 00000000000..5154d3f1f3b
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_javadoc.ac
@@ -0,0 +1,37 @@
+dnl @synopsis AC_PROG_JAVADOC
+dnl
+dnl AC_PROG_JAVADOC tests for an existing javadoc generator. It uses the environment
+dnl variable JAVADOC then tests in sequence various common javadoc generator.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVADOC=yourgenerator before calling
+dnl AC_PROG_JAVADOC
+dnl
+dnl - at the configure level, setenv JAVADOC
+dnl
+dnl You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version $Id: ac_prog_javadoc.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVADOC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc$EXEEXT)
+else
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX)
+fi
+test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
+
diff --git a/bdb/dist/aclocal_java/ac_prog_javah.ac b/bdb/dist/aclocal_java/ac_prog_javah.ac
new file mode 100644
index 00000000000..1b16d9e24e5
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_prog_javah.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_PROG_JAVAH
+dnl
+dnl AC_PROG_JAVAH tests the availability of the javah header generator
+dnl and looks for the jni.h header file. If available, JAVAH is set to
+dnl the full path of javah and CPPFLAGS is updated accordingly.
+dnl
+dnl @author Luc Maisonobe
+dnl @version $Id: ac_prog_javah.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_PROG_JAVAH],[
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_PATH_PROG(JAVAH,javah)
+if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then
+ AC_TRY_CPP([#include <jni.h>],,[
+ ac_save_CPPFLAGS="$CPPFLAGS"
+changequote(, )dnl
+ ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'`
+ ac_machdep=`echo $build_os | sed 's,[-0-9].*,,'`
+changequote([, ])dnl
+ CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep"
+ AC_TRY_CPP([#include <jni.h>],
+ ac_save_CPPFLAGS="$CPPFLAGS",
+ AC_MSG_WARN([unable to include <jni.h>]))
+ CPPFLAGS="$ac_save_CPPFLAGS"])
+fi])
diff --git a/bdb/dist/aclocal_java/ac_try_compile_java.ac b/bdb/dist/aclocal_java/ac_try_compile_java.ac
new file mode 100644
index 00000000000..775569ba054
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_try_compile_java.ac
@@ -0,0 +1,39 @@
+dnl @synopsis AC_TRY_COMPILE_JAVA
+dnl
+dnl AC_TRY_COMPILE_JAVA attempt to compile user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_try_compile_java.ac,v 1.1 2001/08/23 16:58:44 dda Exp $
+dnl
+AC_DEFUN([AC_TRY_COMPILE_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [import $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/bdb/dist/aclocal_java/ac_try_run_javac.ac b/bdb/dist/aclocal_java/ac_try_run_javac.ac
new file mode 100644
index 00000000000..cf91306aff6
--- /dev/null
+++ b/bdb/dist/aclocal_java/ac_try_run_javac.ac
@@ -0,0 +1,40 @@
+dnl @synopsis AC_TRY_RUN_JAVA
+dnl
+dnl AC_TRY_RUN_JAVA attempt to compile and run user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version $Id: ac_try_run_javac.ac,v 1.1 2001/08/23 16:58:45 dda Exp $
+dnl
+AC_DEFUN([AC_TRY_RUN_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+AC_REQUIRE([AC_PROG_JAVA])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [include $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/bdb/dist/build/script b/bdb/dist/buildrel
index 8eef3099f08..b796169c719 100644
--- a/bdb/dist/build/script
+++ b/bdb/dist/buildrel
@@ -1,12 +1,12 @@
#!/bin/sh -
-# $Id: script,v 1.21 2001/01/19 18:13:16 bostic Exp $
+# $Id: buildrel,v 1.39 2002/09/06 14:30:31 bostic Exp $
#
# Build the distribution archives.
#
# A set of commands intended to be cut and pasted into a csh window.
# Development tree, release home.
-setenv D /a/db
+setenv D `pwd`
# Update the release number.
cd $D/dist
@@ -21,13 +21,8 @@ cd $D && cvs -q update
cd $D/dist && sh s_all
cd $D && cvs -q commit
-# Build the documentation.
-cd $D/docs_src && make clean
-cd $D/docs_src && make
-cd $D/docs_src && make && make check
-
# Copy a development tree into a release tree.
-setenv R /a/db-$VERSION
+setenv R /var/tmp/db-$VERSION
rm -rf $R && mkdir -p $R
cd $D && tar cf - \
`cvs -q status | sed -n -e "/Repository/s;.*/CVSROOT/db/;;" -e "s/,v//p"` | \
@@ -37,34 +32,29 @@ cd $D && tar cf - \
cd $R/dist && sh s_perm
cd $R/dist && sh s_symlink
-# Build the documents.
-cd $R/docs_src && make
-
# Build a version.
cd $R && rm -rf build_run && mkdir build_run
cd $R/build_run && ~bostic/bin/dbconf && make >& mklog
# Smoke test.
-./ex_access
+cd $R/build_run && ./ex_access
-# Check the install
-make prefix=`pwd`/BDB install
+# Build the documentation.
+cd $R/docs_src && sh build clean
+cd $R/docs_src && sh build |& sed '/.html$/d'
-# Run distribution check scripts
-$R/dist/build/chk.def
-$R/dist/build/chk.define
-$R/dist/build/chk.offt
-$R/dist/build/chk.srcfiles
-$R/dist/build/chk.tags
+# Check the install
+cd $R/build_run && make prefix=`pwd`/BDB install
# Clean up the tree.
cd $R && rm -rf build_run docs_src
-cd $R && rm -rf test_thread test_purify test_server test_vxworks test/TODO
-cd $R && rm -rf test/upgrade/databases && mkdir test/upgrade/databases
+cd $R && rm -rf test/TODO test/upgrade test_perf test_purify
+cd $R && rm -rf test_server test_thread test_vxworks test_xa
# ACQUIRE ROOT PRIVILEGES
cd $R && find . -type d | xargs chmod 775
cd $R && find . -type f | xargs chmod 444
+cd $R && chmod 664 build_win32/*.dsp
cd $R/dist && sh s_perm
chown -R 100.100 $R
# DISCARD ROOT PRIVILEGES
@@ -76,19 +66,44 @@ cd $R/../db-${LR} && find . | sort > /tmp/__OLD
cd $R && find . | sort > /tmp/__NEW
diff -c /tmp/__OLD /tmp/__NEW
-# Create the tar archive release.
+# Create the crypto tar archive release.
setenv T "$R/../db-$VERSION.tar.gz"
cd $R/.. && tar cf - db-$VERSION | gzip --best > $T
chmod 444 $T
-# Create the zip archive release.
-#
-# Remove symbolic links to tags files. They're large and we don't want to
-# store real symbolic links in the archive for portability reasons.
+# Create the non-crypto tree.
+setenv RNC "$R/../db-$VERSION.NC"
+rm -rf $RNC $R/../__TMP && mkdir $R/../__TMP
+cd $R/../__TMP && gzcat $T | tar xpf - && mv -i db-$VERSION $RNC
+cd $R && rm -rf $R/../__TMP
+cd $RNC/dist && sh s_crypto
+
+# ACQUIRE ROOT PRIVILEGES
+cd $RNC && find . -type d | xargs chmod 775
+cd $RNC && find . -type f | xargs chmod 444
+cd $RNC && chmod 664 build_win32/*.dsp
+cd $RNC/dist && sh s_perm
+chown -R 100.100 $RNC
+# DISCARD ROOT PRIVILEGES
+
+# Create the non-crypto tar archive release.
+setenv T "$R/../db-$VERSION.NC.tar.gz"
+cd $RNC/.. && tar cf - db-$VERSION.NC | gzip --best > $T
+chmod 444 $T
+
+# Remove symbolic links to tags files. They're large and we don't want
+# to store real symbolic links in the zip archive for portability reasons.
# ACQUIRE ROOT PRIVILEGES
cd $R && rm -f `find . -type l -name 'tags'`
+cd $RNC && rm -f `find . -type l -name 'tags'`
# DISCARD ROOT PRIVILEGES
+# Create the crypto zip archive release.
setenv T "$R/../db-$VERSION.zip"
cd $R/.. && zip -r - db-$VERSION > $T
chmod 444 $T
+
+# Create the non-crypto zip archive release.
+setenv T "$R/../db-$VERSION.NC.zip"
+cd $RNC/.. && zip -r - db-$VERSION.NC > $T
+chmod 444 $T
diff --git a/bdb/dist/config.guess b/bdb/dist/config.guess
new file mode 100755
index 00000000000..fd30ab0314c
--- /dev/null
+++ b/bdb/dist/config.guess
@@ -0,0 +1,1354 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-23'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Per Bothner <per@bothner.com>.
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit build system type.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# This shell variable is my proudest work .. or something. --bje
+
+set_cc_for_build='tmpdir=${TMPDIR-/tmp}/config-guess-$$ ;
+(old=`umask` && umask 077 && mkdir $tmpdir && umask $old && unset old)
+ || (echo "$me: cannot create $tmpdir" >&2 && exit 1) ;
+dummy=$tmpdir/dummy ;
+files="$dummy.c $dummy.o $dummy.rel $dummy" ;
+trap '"'"'rm -f $files; rmdir $tmpdir; exit 1'"'"' 1 2 15 ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c $dummy.c -c -o $dummy.o) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ rm -f $files ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ;
+unset files'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit 0 ;;
+ amiga:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ arc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ hp300:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mac68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ macppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme68k:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvme88k:OpenBSD:*:*)
+ echo m88k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ mvmeppc:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ pmax:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sgi:OpenBSD:*:*)
+ echo mipseb-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ sun3:OpenBSD:*:*)
+ echo m68k-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ wgrisc:OpenBSD:*:*)
+ echo mipsel-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ *:OpenBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
+ alpha:OSF1:*:*)
+ if test $UNAME_RELEASE = "V4.0"; then
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ fi
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ eval $set_cc_for_build
+ cat <<EOF >$dummy.s
+ .data
+\$Lformat:
+ .byte 37,100,45,37,120,10,0 # "%d-%x\n"
+
+ .text
+ .globl main
+ .align 4
+ .ent main
+main:
+ .frame \$30,16,\$26,0
+ ldgp \$29,0(\$27)
+ .prologue 1
+ .long 0x47e03d80 # implver \$0
+ lda \$2,-1
+ .long 0x47e20c21 # amask \$2,\$1
+ lda \$16,\$Lformat
+ mov \$0,\$17
+ not \$1,\$18
+ jsr \$26,printf
+ ldgp \$29,0(\$26)
+ mov 0,\$16
+ jsr \$26,exit
+ .end main
+EOF
+ $CC_FOR_BUILD $dummy.s -o $dummy 2>/dev/null
+ if test "$?" = 0 ; then
+ case `$dummy` in
+ 0-0)
+ UNAME_MACHINE="alpha"
+ ;;
+ 1-0)
+ UNAME_MACHINE="alphaev5"
+ ;;
+ 1-1)
+ UNAME_MACHINE="alphaev56"
+ ;;
+ 1-101)
+ UNAME_MACHINE="alphapca56"
+ ;;
+ 2-303)
+ UNAME_MACHINE="alphaev6"
+ ;;
+ 2-307)
+ UNAME_MACHINE="alphaev67"
+ ;;
+ 2-1307)
+ UNAME_MACHINE="alphaev68"
+ ;;
+ 3-1307)
+ UNAME_MACHINE="alphaev7"
+ ;;
+ esac
+ fi
+ rm -f $dummy.s $dummy && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit 0 ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit 0 ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit 0 ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit 0;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit 0 ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit 0 ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit 0 ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit 0;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit 0;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit 0 ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit 0 ;;
+ DRS?6000:UNIX_SV:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7 && exit 0 ;;
+ esac ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit 0 ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit 0 ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit 0 ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit 0 ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit 0 ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit 0 ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit 0 ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit 0 ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit 0 ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit 0 ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit 0 ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy \
+ && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
+ && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit 0 ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit 0 ;;
+ Night_Hawk:*:*:PowerMAX_OS)
+ echo powerpc-harris-powermax
+ exit 0 ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit 0 ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit 0 ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit 0 ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit 0 ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit 0 ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit 0 ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit 0 ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit 0 ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit 0 ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo rs6000-ibm-aix3.2.5
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit 0 ;;
+ *:AIX:*:[45])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit 0 ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit 0 ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit 0 ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit 0 ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit 0 ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit 0 ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit 0 ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit 0 ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null) && HP_ARCH=`$dummy`
+ if test -z "$HP_ARCH"; then HP_ARCH=hppa; fi
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ fi ;;
+ esac
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit 0 ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD $dummy.c -o $dummy && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+ rm -f $dummy.c $dummy && rmdir $tmpdir
+ echo unknown-hitachi-hiuxwe2
+ exit 0 ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit 0 ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit 0 ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit 0 ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit 0 ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit 0 ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit 0 ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit 0 ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3D:*:*:*)
+ echo alpha-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit 0 ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit 0 ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit 0 ;;
+ *:FreeBSD:*:*)
+ # Determine whether the default compiler uses glibc.
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #if __GLIBC__ >= 2
+ LIBC=gnu
+ #else
+ LIBC=
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`${LIBC:+-$LIBC}
+ exit 0 ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit 0 ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit 0 ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit 0 ;;
+ x86:Interix*:3*)
+ echo i386-pc-interix3
+ exit 0 ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i386-pc-interix
+ exit 0 ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit 0 ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit 0 ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit 0 ;;
+ *:GNU:*:*)
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit 0 ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit 0 ;;
+ arm*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ mips:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mipsel
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${CPU}" != x && echo "${CPU}-pc-linux-gnu" && exit 0
+ ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit 0 ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit 0 ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit 0 ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit 0 ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit 0 ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit 0 ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit 0 ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit 0 ;;
+ i*86:Linux:*:*)
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ # Set LC_ALL=C to ensure ld outputs messages in English.
+ ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
+ | sed -ne '/supported targets:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported targets: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_targets" in
+ elf32-i386)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ a.out-i386-linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit 0 ;;
+ coff-i386)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit 0 ;;
+ "")
+ # Either a pre-BFD a.out linker (linux-gnuoldld) or
+ # one that does not give us useful --help.
+ echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
+ exit 0 ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #ifdef __ELF__
+ # ifdef __GLIBC__
+ # if __GLIBC__ >= 2
+ LIBC=gnu
+ # else
+ LIBC=gnulibc1
+ # endif
+ # else
+ LIBC=gnulibc1
+ # endif
+ #else
+ #ifdef __INTEL_COMPILER
+ LIBC=gnu
+ #else
+ LIBC=gnuaout
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
+ rm -f $dummy.c && rmdir $tmpdir
+ test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0
+ test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
+ ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit 0 ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit 0 ;;
+ i*86:*:5:[78]*)
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit 0 ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit 0 ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit 0 ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit 0 ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit 0 ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit 0 ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit 0 ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit 0 ;;
+ M68*:*:R3V[567]*:*)
+ test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
+ 3[34]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && echo i486-ncr-sysv4 && exit 0 ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit 0 ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit 0 ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit 0 ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit 0 ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit 0 ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit 0 ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit 0 ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit 0 ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit 0 ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit 0 ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit 0 ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit 0 ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit 0 ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit 0 ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit 0 ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit 0 ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit 0 ;;
+ *:Darwin:*:*)
+ echo `uname -p`-apple-darwin${UNAME_RELEASE}
+ exit 0 ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit 0 ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit 0 ;;
+ NSR-[GKLNPTVW]:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit 0 ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit 0 ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit 0 ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit 0 ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit 0 ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit 0 ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit 0 ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit 0 ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit 0 ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit 0 ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit 0 ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit 0 ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit 0 ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit 0 ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD $dummy.c -o $dummy 2>/dev/null && $dummy && rm -f $dummy.c $dummy && rmdir $tmpdir && exit 0
+rm -f $dummy.c $dummy && rmdir $tmpdir
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit 0 ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit 0 ;;
+ c34*)
+ echo c34-convex-bsd
+ exit 0 ;;
+ c38*)
+ echo c38-convex-bsd
+ exit 0 ;;
+ c4*)
+ echo c4-convex-bsd
+ exit 0 ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ ftp://ftp.gnu.org/pub/gnu/config/
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/bdb/dist/config.sub b/bdb/dist/config.sub
new file mode 100755
index 00000000000..9ff085efaf7
--- /dev/null
+++ b/bdb/dist/config.sub
@@ -0,0 +1,1460 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002 Free Software Foundation, Inc.
+
+timestamp='2002-07-03'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit 0 ;;
+ --version | -v )
+ echo "$version" ; exit 0 ;;
+ --help | --h* | -h )
+ echo "$usage"; exit 0 ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit 0;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | freebsd*-gnu* | storm-chaos* | os2-emx* | windows32-* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k \
+ | m32r | m68000 | m68k | m88k | mcore \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64orion | mips64orionel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | ns16k | ns32k \
+ | openrisc | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | pyramid \
+ | sh | sh[1234] | sh3e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv9 | sparcv9b \
+ | strongarm \
+ | tahoe | thumb | tic80 | tron \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xscale | xstormy16 | xtensa \
+ | z8k)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* \
+ | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c54x-* \
+ | clipper-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* \
+ | m32r-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | mcore-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipstx39 | mipstx39el \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | pyramid-* \
+ | romp-* | rs6000-* \
+ | sh-* | sh[1234]-* | sh3e-* | sh[34]eb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
+ | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
+ | tahoe-* | thumb-* | tic30-* | tic54x-* | tic80-* | tron-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
+ | xtensa-* \
+ | ymp-* \
+ | z8k-*)
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ mmix*)
+ basic_machine=mmix-knuth
+ os=-mmixware
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ or32 | or32-*)
+ basic_machine=or32-unknown
+ os=-coff
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2)
+ basic_machine=i686-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3d)
+ basic_machine=alpha-cray
+ os=-unicos
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ windows32)
+ basic_machine=i386-pc
+ os=-windows32-msvcrt
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh3 | sh4 | sh3eb | sh4eb | sh[1234]le | sh3ele)
+ basic_machine=sh-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparc | sparcv9 | sparcv9b)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ c4x*)
+ basic_machine=c4x-none
+ os=-coff
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -netbsd* | -openbsd* | -freebsd* | -riscix* \
+ | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* | -powermax*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto*)
+ os=-nto-qnx
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/bdb/dist/configure.ac b/bdb/dist/configure.ac
new file mode 100644
index 00000000000..4a747cfe3b6
--- /dev/null
+++ b/bdb/dist/configure.ac
@@ -0,0 +1,608 @@
+# $Id: configure.ac,v 11.156 2002/09/04 13:51:17 bostic Exp $
+# Process this file with autoconf to produce a configure script.
+
+PACKAGE=db
+AC_INIT(Berkeley DB,
+ __EDIT_DB_VERSION__, support@sleepycat.com, db-__EDIT_DB_VERSION__)
+AC_CONFIG_SRCDIR([../db/db.c])
+AC_CONFIG_HEADER(db_config.h:config.hin)
+
+# Configure setup.
+AC_CANONICAL_HOST()
+AC_ARG_PROGRAM()
+
+# We cannot build in the top-level directory.
+AC_MSG_CHECKING(if building in the top-level directory)
+[ test -d db_archive ] && AC_MSG_ERROR([
+Berkeley DB cannot be built in the top-level distribution directory.])
+AC_MSG_RESULT(no)
+
+# Substitution variables.
+AC_SUBST(ADDITIONAL_INCS)
+AC_SUBST(ADDITIONAL_LANG)
+AC_SUBST(ADDITIONAL_OBJS)
+AC_SUBST(ADDITIONAL_PROGS)
+AC_SUBST(BUILD_TARGET)
+AC_SUBST(CFLAGS)
+AC_SUBST(CONFIGURATION_ARGS)
+AC_SUBST(CONFIGURATION_PATH)
+AC_SUBST(CPPFLAGS)
+AC_SUBST(CXX)
+AC_SUBST(CXXFLAGS)
+AC_SUBST(DEFAULT_LIB)
+AC_SUBST(DEFAULT_LIB_CXX)
+AC_SUBST(EMBEDIX_ECD_CXX)
+AC_SUBST(EMBEDIX_ECD_RPC)
+AC_SUBST(EMBEDIX_ROOT)
+AC_SUBST(INSTALLER)
+AC_SUBST(INSTALL_LIBS)
+AC_SUBST(INSTALL_TARGET)
+AC_SUBST(JAR)
+AC_SUBST(JAVACFLAGS)
+AC_SUBST(LDFLAGS)
+AC_SUBST(LIBJSO_LIBS)
+AC_SUBST(LIBS)
+AC_SUBST(LIBSO_LIBS)
+AC_SUBST(LIBTOOL)
+AC_SUBST(LIBTSO_LIBS)
+AC_SUBST(LIBXSO_LIBS)
+AC_SUBST(LOAD_LIBS)
+AC_SUBST(MAKEFILE_CC)
+AC_SUBST(MAKEFILE_CCLINK)
+AC_SUBST(MAKEFILE_CXX)
+AC_SUBST(MAKEFILE_CXXLINK)
+AC_SUBST(MAKEFILE_SOLINK)
+AC_SUBST(MAKEFILE_XSOLINK)
+AC_SUBST(POSTLINK)
+AC_SUBST(RPC_CLIENT_OBJS)
+AC_SUBST(RPM_POST_INSTALL)
+AC_SUBST(RPM_POST_UNINSTALL)
+AC_SUBST(SOFLAGS)
+AC_SUBST(db_cv_path_embedix_install)
+AC_SUBST(db_cv_path_rpm_archive)
+AC_SUBST(db_int_def)
+AC_SUBST(o)
+
+# RPM needs the current absolute path.
+# RPM needs the list of original arguments, but we don't include the RPM
+# option itself.
+CONFIGURATION_PATH=${PWD-`pwd`}
+CONFIGURATION_ARGS=`echo "$*" |
+ sed -e 's/--with-embedix[[^ ]]*//' -e 's/--with-rpm[[^ ]]*//'`
+
+# Set the version.
+AM_VERSION_SET
+
+# Set the default installation location.
+AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@)
+
+# Process all options before using them.
+AM_OPTIONS_SET
+
+# Set some #defines based on configuration options.
+if test "$db_cv_diagnostic" = yes; then
+ AC_DEFINE(DIAGNOSTIC)
+ AH_TEMPLATE(DIAGNOSTIC,
+ [Define to 1 if you want a version with run-time diagnostic checking.])
+fi
+if test "$db_cv_debug_rop" = yes; then
+ AC_DEFINE(DEBUG_ROP)
+ AH_TEMPLATE(DEBUG_ROP,
+ [Define to 1 if you want a version that logs read operations.])
+fi
+if test "$db_cv_debug_wop" = yes; then
+ AC_DEFINE(DEBUG_WOP)
+ AH_TEMPLATE(DEBUG_WOP,
+ [Define to 1 if you want a version that logs write operations.])
+fi
+if test "$db_cv_umrw" = yes; then
+ AC_DEFINE(UMRW)
+ AH_TEMPLATE(UMRW,
+ [Define to 1 to mask harmless unitialized memory read/writes.])
+
+fi
+if test "$db_cv_test" = yes; then
+ AC_DEFINE(CONFIG_TEST)
+ AH_TEMPLATE(CONFIG_TEST,
+ [Define to 1 if you want to build a version for running the test suite.])
+fi
+
+# Check for programs used in building and installation.
+AM_PROGRAMS_SET
+AC_PROG_INSTALL
+
+# RPM/Embedix support: change the standard make and install targets
+if test "$db_cv_rpm" = "yes"; then
+ BUILD_TARGET="rpm_build"
+ echo "topdir: $CONFIGURATION_PATH" > rpmrc
+ if test "$db_cv_embedix" = "yes"; then
+ EMBEDIX_ROOT="/usr"
+ INSTALL_TARGET="embedix_install"
+ else
+ INSTALL_TARGET="rpm_install"
+ fi
+else
+ BUILD_TARGET="library_build"
+ INSTALL_TARGET="library_install"
+fi
+
+# This is where we handle stuff that autoconf can't handle: compiler,
+# preprocessor and load flags, libraries that the standard tests don't
+# look for. The default optimization is -O. We would like to set the
+# default optimization for systems using gcc to -O2, but we can't. By
+# the time we know we're using gcc, it's too late to set optimization
+# flags.
+#
+# There are additional libraries we need for some compiler/architecture
+# combinations.
+#
+# Some architectures require DB to be compiled with special flags and/or
+# libraries for threaded applications
+#
+# The makefile CC may be different than the CC used in config testing,
+# because the makefile CC may be set to use $(LIBTOOL).
+#
+# XXX
+# Don't override anything if it's already set from the environment.
+optimize_def="-O"
+case "$host_os" in
+aix4.3.*|aix5*)
+ optimize_def="-O2"
+ CC=${CC-"xlc_r"}
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -Wl,-brtl";;
+bsdi3*) optimize_def="-O2"
+ CC=${CC-"shlicc2"}
+ LIBS="$LIBS -lipc";;
+bsdi*) optimize_def="-O2";;
+freebsd*)
+ optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE"
+ LDFLAGS="$LDFLAGS -pthread";;
+hpux*) CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+irix*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_SGI_MP_SOURCE";;
+linux*) optimize_def="-O2"
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE -D_REENTRANT";;
+mpeix*) CPPFLAGS="$CPPFLAGS -D_POSIX_SOURCE -D_SOCKET_SOURCE"
+ LIBS="$LIBS -lsocket -lsvipc";;
+osf*) CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ LDFLAGS="$LDFLAGS -pthread";;
+*qnx) AC_DEFINE(HAVE_QNX)
+ AH_TEMPLATE(HAVE_QNX, [Define to 1 if building on QNX.]);;
+solaris*)
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT";;
+esac
+
+# Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
+# compiler configuration macros, because if we don't, they set CFLAGS
+# to no optimization and -g, which isn't what we want.
+CFLAGS=${CFLAGS-$optimize_def}
+CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
+
+# If the user wants a debugging environment, add -g to the CFLAGS value.
+#
+# XXX
+# Some compilers can't mix optimizing and debug flags. The only way to
+# handle this is to specify CFLAGS in the environment before configuring.
+if test "$db_cv_debug" = yes; then
+ AC_DEFINE(DEBUG)
+ AH_TEMPLATE(DEBUG, [Define to 1 if you want a debugging version.])
+
+ CFLAGS="$CFLAGS -g"
+ CXXFLAGS="$CXXFLAGS -g"
+fi
+
+# The default compiler is cc (NOT gcc), the default CFLAGS is as specified
+# above, NOT what is set by AC_PROG_CC, as it won't set optimization flags
+# for any compiler other than gcc.
+AC_PROG_CC(cc gcc)
+
+# Because of shared library building, the ${CC} used for config tests
+# may be different than the ${CC} we want to put in the Makefile.
+# The latter is known as ${MAKEFILE_CC} in this script.
+MAKEFILE_CC="${CC}"
+MAKEFILE_CCLINK="${CC}"
+MAKEFILE_CXX="nocxx"
+MAKEFILE_CXXLINK="nocxx"
+
+# See if we need the C++ compiler at all. If so, we'd like to find one that
+# interoperates with the C compiler we chose. Since we prefered cc over gcc,
+# we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
+# user can set CC and CXX in their environment before running configure.
+#
+# AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
+# first choices.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$GCC" != "yes"; then
+ case "$host_os" in
+ aix*) AC_CHECK_TOOL(CCC, xlC_r)
+ LIBXSO_LIBS="-lC_r $LIBXSO_LIBS"
+ LIBS="-lC_r $LIBS";;
+ hpux*) AC_CHECK_TOOL(CCC, aCC);;
+ irix*) AC_CHECK_TOOL(CCC, CC);;
+ osf*) AC_CHECK_TOOL(CCC, cxx);;
+ solaris*) AC_CHECK_TOOL(CCC, CC);;
+ esac
+ fi
+ AC_PROG_CXX
+ AC_CXX_HAVE_STDHEADERS
+ MAKEFILE_CXX="${CXX}"
+ MAKEFILE_CXXLINK="${CXX}"
+fi
+
+# Do some gcc specific configuration.
+AC_GCC_CONFIG1
+AC_GCC_CONFIG2
+
+# We need the -Kthread/-pthread flag when compiling on SCO/Caldera's UnixWare
+# and OpenUNIX releases. We can't make the test until we know which compiler
+# we're using.
+case "$host_os" in
+sysv5UnixWare*|sysv5OpenUNIX8*)
+ if test "$GCC" == "yes"; then
+ CPPFLAGS="$CPPFLAGS -pthread"
+ LDFLAGS="$LDFLAGS -pthread"
+ else
+ CPPFLAGS="$CPPFLAGS -Kthread"
+ LDFLAGS="$LDFLAGS -Kthread"
+ fi;;
+esac
+
+# Export our compiler preferences for the libtool configuration.
+export CC CCC
+CCC=CXX
+
+# Libtool configuration.
+AC_PROG_LIBTOOL
+
+LIBTOOL="\$(SHELL) ./libtool"
+SOFLAGS="-rpath \$(libdir)"
+
+# Set SOSUFFIX and friends
+SOSUFFIX_CONFIG
+MODSUFFIX_CONFIG
+JMODSUFFIX_CONFIG
+
+INSTALLER="\$(LIBTOOL) --mode=install cp -p"
+
+MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${MAKEFILE_CC}"
+MAKEFILE_SOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK} -avoid-version"
+MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CCLINK}"
+MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${MAKEFILE_CXX}"
+MAKEFILE_XSOLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK} -avoid-version"
+MAKEFILE_CXXLINK="\$(LIBTOOL) --mode=link ${MAKEFILE_CXXLINK}"
+
+# Configure for shared libraries, static libraries, or both. If both are
+# configured, build the utilities and example programs with shared versions.
+#
+# $o is set to ".o" or ".lo", and is the file suffix used in the Makefile
+# instead of .o
+if test "$enable_shared" = "no"; then
+ DEFAULT_LIB="\$(libdb)"
+ POSTLINK="@true"
+ o=".o"
+fi
+if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB="\$(libso_target)"
+ POSTLINK="\$(LIBTOOL) --mode=execute true"
+ o=".lo"
+fi
+INSTALL_LIBS="$DEFAULT_LIB"
+
+# Optional C++ API.
+if test "$db_cv_cxx" = "yes"; then
+ if test "$enable_shared" = "no"; then
+ DEFAULT_LIB_CXX="\$(libcxx)"
+ fi
+ if test "$enable_shared" = "yes"; then
+ DEFAULT_LIB_CXX="\$(libxso_target)"
+ fi
+ INSTALL_LIBS="$INSTALL_LIBS $DEFAULT_LIB_CXX"
+
+ # Fill in C++ library for Embedix.
+ EMBEDIX_ECD_CXX='<OPTION db-extra>\
+ TYPE=bool\
+ DEFAULT_VALUE=1\
+ PROMPT=Include BerkeleyDB C++ library?\
+ <KEEPLIST>\
+ /usr/include/db_cxx.h\
+ /usr/lib/libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </KEEPLIST>\
+ <PROVIDES>\
+ libdb_cxx-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so\
+ </PROVIDES>\
+ <REQUIRES>\
+ ld-linux.so.2\
+ libc.so.6\
+ </REQUIRES>\
+ STATIC_SIZE=0\
+ STORAGE_SIZE=523612\
+ STARTUP_TIME=0\
+ </OPTION>'
+fi
+
+# Optional Java API.
+if test "$db_cv_java" = "yes"; then
+ # Java requires shared libraries.
+ if test "$enable_shared" = "no"; then
+ AC_MSG_ERROR([Java requires shared libraries])
+ fi
+
+ AC_PROG_JAVAC
+ AC_PROG_JAR
+ AC_JNI_INCLUDE_DIR
+
+ for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+ do
+ CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+ done
+
+ ADDITIONAL_LANG="$ADDITIONAL_LANG java"
+ INSTALL_LIBS="$INSTALL_LIBS \$(libjso_target)"
+else
+ JAVAC=nojavac
+fi
+
+# Optional RPC client/server.
+if test "$db_cv_rpc" = "yes"; then
+ AC_DEFINE(HAVE_RPC)
+ AH_TEMPLATE(HAVE_RPC, [Define to 1 if building RPC client/server.])
+
+ RPC_CLIENT_OBJS="\$(RPC_CLIENT_OBJS)"
+ ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
+
+ EMBEDIX_ECD_RPC="/usr/bin/berkeley_db_svc"
+
+ case "$host_os" in
+ hpux*)
+ AC_CHECK_FUNC(svc_run,,
+ AC_CHECK_LIB(nsl, svc_run,
+ LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"));;
+ solaris*)
+ AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));;
+ esac
+fi
+
+AM_TCL_LOAD
+
+# Optional crypto support.
+if test -d "$srcdir/../crypto"; then
+ AC_DEFINE(HAVE_CRYPTO)
+ AH_TEMPLATE(HAVE_CRYPTO,
+ [Define to 1 if Berkeley DB release includes strong cryptography.])
+ ADDITIONAL_OBJS="aes_method${o} crypto${o} mt19937db${o} rijndael-alg-fst${o} rijndael-api-fst${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional DB 1.85 compatibility API.
+if test "$db_cv_compat185" = "yes"; then
+ ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
+ ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
+fi
+
+# Optional utilities.
+if test "$db_cv_dump185" = "yes"; then
+ ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
+fi
+
+# Checks for compiler characteristics.
+AC_C_CONST
+
+# Checks for include files, structures, C types.
+AC_HEADER_STAT
+AC_HEADER_TIME
+AC_HEADER_DIRENT
+AC_CHECK_HEADERS(sys/select.h sys/time.h)
+AC_CHECK_MEMBERS([struct stat.st_blksize])
+AM_TYPES
+
+AC_CACHE_CHECK([for ANSI C exit success/failure values], db_cv_exit_defines, [
+AC_TRY_COMPILE([#include <stdlib.h>], return (EXIT_SUCCESS);,
+ [db_cv_exit_defines=yes], [db_cv_exit_defines=no])])
+if test "$db_cv_exit_defines" = yes; then
+ AC_DEFINE(HAVE_EXIT_SUCCESS)
+ AH_TEMPLATE(HAVE_EXIT_SUCCESS,
+ [Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines.])
+fi
+
+# Test for various functions/libraries that the test and example programs use:
+# sched_yield function
+# pthreads, socket and math libraries
+AC_CHECK_FUNC(sched_yield,,
+ AC_SEARCH_LIBS(sched_yield, rt, LOAD_LIBS="$LOAD_LIBS -lrt"))
+
+# XXX
+# We can't check for pthreads in the same way we did the test for sched_yield
+# because the Solaris C library includes pthread interfaces which are not
+# thread-safe. For that reason we always add -lpthread if we find a pthread
+# library. Also we can't depend on any specific call existing (pthread_create,
+# for example), as it may be #defined in an include file -- OSF/1 (Tru64) has
+# this problem.
+AC_HAVE_LIBRARY(pthread, LOAD_LIBS="$LOAD_LIBS -lpthread")
+
+# XXX
+# We could be more exact about whether these libraries are needed, but we don't
+# bother -- if they exist, we load them.
+AC_HAVE_LIBRARY(m, LOAD_LIBS="$LOAD_LIBS -lm")
+AC_HAVE_LIBRARY(socket, LOAD_LIBS="$LOAD_LIBS -lsocket")
+AC_HAVE_LIBRARY(nsl, LOAD_LIBS="$LOAD_LIBS -lnsl")
+
+# Check for mutexes.
+# We do this here because it changes $LIBS.
+AM_DEFINE_MUTEXES
+
+# Checks for system functions for which we have replacements.
+#
+# XXX
+# The only portable getcwd call is getcwd(char *, size_t), where the
+# buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
+# deleted getwd().
+AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove raise)
+AC_REPLACE_FUNCS(snprintf strcasecmp strdup strerror vsnprintf)
+
+# Check for system functions we optionally use.
+AC_CHECK_FUNCS(_fstati64 clock_gettime directio gettimeofday getuid)
+AC_CHECK_FUNCS(pstat_getdynamic sched_yield select strtoul sysconf yield)
+
+# Checks for system functions for which we don't have replacements.
+# We require qsort(3).
+AC_CHECK_FUNCS(qsort, , AC_MSG_ERROR([No qsort library function.]))
+
+# Pread/pwrite.
+# HP-UX has pread/pwrite, but it doesn't work with largefile support.
+case "$host_os" in
+hpux*)
+ AC_MSG_WARN([pread/pwrite interfaces ignored on $host_os.]);;
+*) AC_CHECK_FUNCS(pread pwrite)
+esac
+
+# Check for fcntl(2) to deny child process access to file descriptors.
+AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ fcntl(1, F_SETFD, 1);
+], [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])])
+if test "$db_cv_fcntl_f_setfd" = yes; then
+ AC_DEFINE(HAVE_FCNTL_F_SETFD)
+ AH_TEMPLATE(HAVE_FCNTL_F_SETFD,
+ [Define to 1 if fcntl/F_SETFD denies child access to file descriptors.])
+fi
+
+# A/UX has a broken getopt(3).
+case "$host_os" in
+aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
+esac
+
+# Linux has the O_DIRECT flag, but you can't actually use it.
+AC_CACHE_CHECK([for open/O_DIRECT], db_cv_open_o_direct, [
+echo "test for working open/O_DIRECT" > __o_direct_file
+AC_TRY_RUN([
+#include <sys/types.h>
+#include <fcntl.h>
+main() {
+int c, fd = open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+exit ((fd == -1) || (read(fd, &c, 1) != 1));
+}], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no],
+AC_TRY_LINK([
+#include <sys/types.h>
+#include <fcntl.h>], [
+ open("__o_direct_file", O_RDONLY | O_DIRECT, 0);
+], [db_cv_open_o_direct=yes], [db_cv_open_o_direct=no]))
+rm -f __o_direct_file])
+if test "$db_cv_open_o_direct" = yes; then
+ AC_DEFINE(HAVE_O_DIRECT)
+ AH_TEMPLATE(HAVE_O_DIRECT, [Define to 1 if you have the O_DIRECT flag.])
+fi
+
+# Check for largefile support.
+AC_SYS_LARGEFILE
+
+# Figure out how to create shared regions.
+#
+# First, we look for mmap.
+#
+# BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
+#
+# Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
+# is defined in the C library) but does not support munmap(2). Don't
+# try to use mmap if we can't find munmap.
+#
+# Ultrix has mmap(2), but it doesn't work.
+mmap_ok=no
+case "$host_os" in
+bsdi3*|bsdi4.0)
+ AC_MSG_WARN([mlock(2) interface ignored on BSD/OS 3.X and 4.0.])
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+ultrix*)
+ AC_MSG_WARN([mmap(2) interface ignored on Ultrix.]);;
+*)
+ mmap_ok=yes
+ AC_CHECK_FUNCS(mlock munlock)
+ AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
+esac
+
+# Second, we look for shmget.
+#
+# SunOS has the shmget(2) interfaces, but there appears to be a missing
+# #include <debug/debug.h> file, so we ignore them.
+shmget_ok=no
+case "$host_os" in
+sunos*)
+ AC_MSG_WARN([shmget(2) interface ignored on SunOS.]);;
+*)
+ shmget_ok=yes
+ AC_CHECK_FUNCS(shmget, , shmget_ok=no);;
+esac
+
+# We require either mmap/munmap(2) or shmget(2).
+if test "$mmap_ok" = no -a "$shmget_ok" = no; then
+ AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.])
+fi
+
+# If we're not doing version name substitution, DB_VERSION_UNIQUE_NAME
+# needs to be erased.
+if test "$db_cv_uniquename" = "no"; then
+ DB_VERSION_UNIQUE_NAME=""
+fi
+
+# This is necessary so that .o files in LIBOBJS are also built via
+# the ANSI2KNR-filtering rules.
+LIB@&t@OBJS=`echo "$LIB@&t@OBJS" |
+ sed 's,\.[[^.]]* ,$U&,g;s,\.[[^.]]*$,$U&,'`
+LTLIBOBJS=`echo "$LIB@&t@OBJS" |
+ sed 's,\.[[^.]]* ,.lo ,g;s,\.[[^.]]*$,.lo,'`
+AC_SUBST(LTLIBOBJS)
+
+# Initial output file list.
+CREATE_LIST="Makefile
+ db_cxx.h:$srcdir/../dbinc/db_cxx.in
+ db_int.h:$srcdir/../dbinc/db_int.in
+ include.tcl:$srcdir/../test/include.tcl"
+
+# Create the db.h file from a source file, a list of global function
+# prototypes, and, if configured for unique names, a list of #defines
+# to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_def.in:$srcdir/../dbinc_auto/ext_prot.in"
+else
+ CREATE_LIST="$CREATE_LIST
+ db.h:$srcdir/../dbinc/db.in:$srcdir/../dbinc_auto/rpc_defs.in:$srcdir/../dbinc_auto/ext_prot.in"
+fi
+
+# If configured for unique names, create the db_int_uext.h file (which
+# does the DB_VERSION_UNIQUE_NAME substitution), which is included by
+# the db_int.h file.
+if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_int_def.h:$srcdir/../dbinc_auto/int_def.in"
+ db_int_def='#include "db_int_def.h"'
+fi
+
+# Create the db_185.h and db185_int.h files from source files, a list of
+# global function prototypes, and, if configured for unique names, a list
+# of #defines to do DB_VERSION_UNIQUE_NAME substitution.
+if test "$db_cv_compat185" = "yes"; then
+ if test "$db_cv_uniquename" = "yes"; then
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_def.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ else
+ CREATE_LIST="$CREATE_LIST
+ db_185.h:$srcdir/../dbinc/db_185.in:$srcdir/../dbinc_auto/ext_185_prot.in
+ db185_int.h:$srcdir/../db185/db185_int.in:$srcdir/../dbinc_auto/ext_185_prot.in"
+ fi
+fi
+
+if test "$db_cv_embedix" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.ecd:../dist/db.ecd.in"
+fi
+
+if test "$db_cv_rpm" = "yes"; then
+ CREATE_LIST="$CREATE_LIST db.spec:../dist/db.spec.in"
+fi
+
+AC_CONFIG_FILES($CREATE_LIST)
+AC_OUTPUT
diff --git a/bdb/dist/configure.in b/bdb/dist/configure.in
deleted file mode 100644
index 6656a588a66..00000000000
--- a/bdb/dist/configure.in
+++ /dev/null
@@ -1,591 +0,0 @@
-dnl $Id: configure.in,v 11.77 2001/01/18 19:05:25 bostic Exp $
-dnl Process this file with autoconf to produce a configure script.
-
-AC_INIT(../db/db.c)
-AC_CONFIG_HEADER(db_config.h:config.hin)
-
-dnl Configure setup.
-AC_PROG_INSTALL()
-AC_CANONICAL_HOST
-AC_ARG_PROGRAM()
-
-dnl We cannot build in the top-level directory.
-AC_MSG_CHECKING(if building in the top-level directory)
-[ test -d db_archive ] && AC_MSG_ERROR([
-Berkeley DB cannot be built in the top-level distribution directory.])
-AC_MSG_RESULT(no)
-
-dnl Substitution variables.
-AC_SUBST(ADDITIONAL_INCS)
-AC_SUBST(ADDITIONAL_LANG)
-AC_SUBST(ADDITIONAL_LIBS)
-AC_SUBST(ADDITIONAL_OBJS)
-AC_SUBST(ADDITIONAL_PROGS)
-AC_SUBST(CPPFLAGS)
-AC_SUBST(CXXFLAGS)
-AC_SUBST(DBS_LIBS)
-AC_SUBST(DEFAULT_INSTALL)
-AC_SUBST(DEFAULT_LIB)
-AC_SUBST(INSTALLER)
-AC_SUBST(INSTALL_LIBS)
-AC_SUBST(JAR)
-AC_SUBST(JAVAC)
-AC_SUBST(JAVACFLAGS)
-AC_SUBST(LDFLAGS)
-AC_SUBST(LIBDB_ARGS)
-AC_SUBST(LIBJSO_LIBS)
-AC_SUBST(LIBS)
-AC_SUBST(LIBSO_LIBS)
-AC_SUBST(LIBTOOL)
-AC_SUBST(LIBTSO_LIBS)
-AC_SUBST(LIBXSO_LIBS)
-AC_SUBST(MAKEFILE_CC)
-AC_SUBST(MAKEFILE_CCLINK)
-AC_SUBST(MAKEFILE_CXX)
-AC_SUBST(POSTLINK)
-AC_SUBST(RPC_OBJS)
-AC_SUBST(SOFLAGS)
-AC_SUBST(SOLINK)
-AC_SUBST(SOSUFFIX)
-
-dnl $o is set to ".o" or ".lo", and is the file suffix used in the
-dnl Makefile instead of .o
-AC_SUBST(o)
-o=.o
-INSTALLER="\$(cp)"
-DEFAULT_LIB="\$(libdb)"
-DEFAULT_INSTALL="install_static"
-
-dnl Set the version.
-AM_VERSION_SET
-
-dnl Set the default installation location.
-AC_PREFIX_DEFAULT(/usr/local/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@)
-
-dnl Process all options before using them. This is necessary because there
-dnl are dependencies among them.
-AM_OPTIONS_SET
-
-# This is to determine what compiler is being used and to set options.
-# i.e. SCO OpenServer 5.0.X and UnixWare 7.X.X
-# option, cache_name, variable
-AC_DEFUN(AC_SYS_COMPILER_FLAG,
-[
- AC_MSG_CHECKING($1)
- OLD_CFLAGS="[$]CFLAGS"
- AC_CACHE_VAL(db_cv_option_$2,
- [
- CFLAGS="[$]OLD_CFLAGS $1"
- AC_TRY_RUN([int main(){exit(0);}],db_cv_option_$2=yes,db_cv_option_$2=no,db_cv_option_$2=no)
- ])
-
- CFLAGS="[$]OLD_CFLAGS"
-
- if test x"[$]db_cv_option_$2" = "xyes" ; then
- $3="[$]$3 $1"
- AC_MSG_RESULT(yes)
- $5
- else
- AC_MSG_RESULT(no)
- $4
- fi
-])
-
-
-# os, option, cache_name, variable
-AC_DEFUN(AC_SYS_OS_COMPILER_FLAG,
-[
- if test "x$db_cv_sys_os" = "x$1" ; then
- AC_SYS_COMPILER_FLAG($2,$3,$4)
- fi
-])
-
-dnl This is where we handle stuff that autoconf can't handle: compiler,
-dnl preprocessor and load flags, libraries that the standard tests don't
-dnl look for. The default optimization is -O. We would like to set the
-dnl default optimization for systems using gcc to -O2, but we can't. By
-dnl the time we know we're using gcc, it's too late to set optimization
-dnl flags.
-dnl
-dnl There are additional libraries we need for some compiler/architecture
-dnl combinations.
-dnl
-dnl Some architectures require DB to be compiled with special flags and/or
-dnl libraries for threaded applications
-dnl
-dnl The makefile CC may be different than the CC used in config testing,
-dnl because the makefile CC may be set to use $(LIBTOOL).
-dnl
-dnl XXX
-dnl Don't override anything if it's already set from the environment.
-optimize_def="-O"
-case "$host_os" in
-aix4.*) optimize_def="-O2"
- CC=${CC-"xlc_r"}
- CPPFLAGS="-D_THREAD_SAFE $CPPFLAGS"
- LIBTSO_LIBS="\$(LIBS)";;
-bsdi3*) CC=${CC-"shlicc2"}
- optimize_def="-O2"
- LIBS="-lipc $LIBS";;
-bsdi*) optimize_def="-O2";;
-freebsd*) optimize_def="-O2"
- CPPFLAGS="-D_THREAD_SAFE $CPPFLAGS"
- LIBS="-pthread";;
-hpux*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
-irix*) optimize_def="-O2"
- CPPFLAGS="-D_SGI_MP_SOURCE $CPPFLAGS";;
-linux*) optimize_def="-O2"
- CFLAGS="-D_GNU_SOURCE"
- CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
-mpeix*) CPPFLAGS="-D_POSIX_SOURCE -D_SOCKET_SOURCE $CPPFLAGS"
- LIBS="-lsocket -lsvipc $LIBS";;
-osf*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
-*qnx) AC_DEFINE(HAVE_QNX);;
-sco3.2v4*) CC=${CC-"cc -belf"}
- LIBS="-lsocket -lnsl_s $LIBS";;
-sco*) CC=${CC-"cc -belf"}
- LIBS="-lsocket -lnsl $LIBS";;
-solaris*) CPPFLAGS="-D_REENTRANT $CPPFLAGS";;
-esac
-
-dnl Set CFLAGS/CXXFLAGS. We MUST set the flags before we call autoconf
-dnl compiler configuration macros, because if we don't, they set CFLAGS
-dnl to no optimization and -g, which isn't what we want.
-CFLAGS=${CFLAGS-$optimize_def}
-CXXFLAGS=${CXXFLAGS-"$CFLAGS"}
-
-dnl If the user wants a debugging environment, add -g to the CFLAGS value.
-dnl
-dnl XXX
-dnl Some compilers can't mix optimizing and debug flags. The only way to
-dnl handle this is to specify CFLAGS in the environment before configuring.
-if test "$db_cv_debug" = yes; then
- AC_DEFINE(DEBUG)
- CFLAGS="$CFLAGS -g"
- CXXFLAGS="$CXXFLAGS -g"
-fi
-
-dnl The default compiler is cc (NOT gcc), the default CFLAGS is as specified
-dnl above, NOT what is set by AC_PROG_CC, as it won't set optimization flags.
-dnl We still call AC_PROG_CC so that we get the other side-effects.
-AC_CHECK_PROG(CC, cc, cc)
-AC_CHECK_PROG(CC, gcc, gcc)
-AC_PROG_CC
-
-dnl Because of dynamic library building, the ${CC} used for config tests
-dnl may be different than the ${CC} we want to put in the Makefile.
-dnl The latter is known as ${MAKEFILE_CC} in this script.
-MAKEFILE_CC=${CC}
-MAKEFILE_CCLINK="\$(CC)"
-MAKEFILE_CXX="nocxx"
-
-dnl Set some #defines based on configuration options.
-if test "$db_cv_diagnostic" = yes; then
- AC_DEFINE(DIAGNOSTIC)
-fi
-if test "$db_cv_debug_rop" = yes; then
- AC_DEFINE(DEBUG_ROP)
-fi
-if test "$db_cv_debug_wop" = yes; then
- AC_DEFINE(DEBUG_WOP)
-fi
-if test "$db_cv_umrw" = yes; then
- AC_DEFINE(UMRW)
-fi
-if test "$db_cv_test" = yes; then
- AC_DEFINE(CONFIG_TEST)
-fi
-
-dnl See if we need the C++ compiler at all. If so, we'd like to find one that
-dnl interoperates with the C compiler we chose. Since we prefered cc over gcc,
-dnl we'll also prefer the vendor's compiler over g++/gcc. If we're wrong, the
-dnl user can set CC and CXX in their environment before running configure.
-dnl
-dnl AC_PROG_CXX sets CXX, but it uses $CXX and $CCC (in that order) as its
-dnl first choices.
-if test "$db_cv_cxx" = "yes"; then
- if test "$GCC" != "yes"; then
- case "$host_os" in
- aix*) AC_CHECK_PROG(CCC, xlC_r, xlC_r);;
- hpux*) AC_CHECK_PROG(CCC, aCC, aCC);;
- osf*) AC_CHECK_PROG(CCC, cxx, cxx);;
- solaris*) AC_CHECK_PROG(CCC, CC, CC);;
- esac
- fi
- AC_PROG_CXX
- MAKEFILE_CXX=${CXX}
-fi
-
-dnl XXX
-dnl Versions of GCC up to 2.8.0 required -fhandle-exceptions, but it is
-dnl renamed as -fexceptions and is the default in versions 2.8.0 and after.
-dnl
-dnl $GXX may be set as a result of enabling C++ or Java.
-if test "$GXX" = "yes"; then
- CXXVERSION=`${MAKEFILE_CXX} --version`
- case ${CXXVERSION} in
- 1.*|2.[[01234567]].*|*-1.*|*-2.[[01234567]].* )
- CXXFLAGS="-fhandle-exceptions $CXXFLAGS";;
- * ) CXXFLAGS="-fexceptions $CXXFLAGS";;
- esac
-fi
-
-dnl Give the OS a last chance to override CFLAGS and LDFLAGS
-
-case "$host_os" in
-sco3.2v5*)
- if test "$GCC" != "yes"; then
- CFLAGS="$CFLAGS"
- LD='$(CC) $(CFLAGS)'
- LIBS="-lsocket -lnsl $LIBS"
- CPPFLAGS="-D_THREAD_SAFE -pthread $CPPFLAGS"
- case "$CFLAGS" in
- *-belf*)
- AC_SYS_COMPILER_FLAG(-belf,sco_belf_option,CFLAGS,[],[
- case "$LDFLAGS" in
- *-belf*) ;;
- *) echo "Adding -belf option to ldflags."
- LDFLAGS="$LDFLAGS -belf"
- ;;
- esac
- ])
- ;;
- *)
- AC_SYS_COMPILER_FLAG(-belf,sco_belf_option,CFLAGS,[],[
- case "$LDFLAGS" in
- *-belf*) ;;
- *)
- echo "Adding -belf option to ldflags."
- LDFLAGS="$LDFLAGS -belf"
- ;;
- esac
- ])
- ;;
- esac
- else
- CC="gcc"
- LIBS="-lsocket -lnsl $LIBS"
- CPPFLAGS="-D_THREAD_SAFE -pthread $CPPFLAGS"
- fi ;;
-sysv5uw7*) LIBS="-lsocket -lnsl $LIBS"
- if test "$GCC" != "yes"; then
- # We are using built-in inline function
- CC="cc -belf"
- CXX="CC -belf -DNO_CPLUSPLUS_ALLOCA"
- CFLAGS="$CFLAGS -Kalloca -Kthread"
- LIBS="-Kthread -lsocket -lnsl $LIBS"
- else
- CFLAGS="$CFLAGS -Kalloca -pthread"
- CXX="$CXX -DNO_CPLUSPLUS_ALLOCA"
- CPPFLAGS="-D_THREAD_SAFE -pthread $CPPFLAGS"
- LIBS="-pthread -lsocket -lnsl $LIBS"
- fi
- ;;
-esac
-
-dnl Export our compiler preferences for the libtool configuration.
-export CC CCC
-CCC=CXX
-
-dnl Dynamic library and libtool configuration; optional, but required for
-dnl Tcl or Java support.
-LIBDB_ARGS="libdb.a"
-LIBTOOL="nolibtool"
-POSTLINK="@true"
-SOSUFFIX="so"
-if test "$db_cv_dynamic" = "yes"; then
- SAVE_CC="${MAKEFILE_CC}"
- SAVE_CXX="${MAKEFILE_CXX}"
-
- # Configure libtool.
- AC_MSG_CHECKING(libtool configuration)
- AC_MSG_RESULT([])
- ${CONFIG_SHELL-/bin/sh} $srcdir/ltconfig \
- --no-verify $srcdir/ltmain.sh \
- --output=./libtool $host_os \
- --disable-static \
- || AC_MSG_ERROR([libtool configure failed])
-
- SOSUFFIX=`sed -e '/^library_names_spec=/!d' -e 's/.*\.\([[a-zA-Z0-9_]]*\).*/\1/' ./libtool`
- DEFAULT_LIB="\$(libso_target)"
- DEFAULT_INSTALL="install_dynamic"
- LIBDB_ARGS="\$(libso_linkname)"
- LIBTOOL="\$(SHELL) ./libtool"
-
- MAKEFILE_CC="\$(LIBTOOL) --mode=compile ${SAVE_CC}"
- MAKEFILE_CXX="\$(LIBTOOL) --mode=compile ${SAVE_CXX}"
- MAKEFILE_CCLINK="\$(LIBTOOL) --mode=link ${SAVE_CC}"
-
- INSTALLER="\$(LIBTOOL) --mode=install cp"
- POSTLINK="\$(LIBTOOL) --mode=execute true"
- SOLINK="\$(LIBTOOL) --mode=link ${SAVE_CC} -avoid-version"
- SOFLAGS="-rpath \$(libdir)"
- o=".lo"
-fi
-
-dnl Optional C++ API.
-if test "$db_cv_cxx" = "yes"; then
- if test "$db_cv_dynamic" = "yes"; then
- ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libxso_target)"
- DEFAULT_INSTALL="${DEFAULT_INSTALL} install_dynamic_cxx"
- else
- ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libcxx)"
- DEFAULT_INSTALL="${DEFAULT_INSTALL} install_static_cxx"
- fi
-fi
-
-dnl Optional Java API.
-if test "$db_cv_java" = "yes"; then
- if test "$db_cv_dynamic" != "yes"; then
- AC_MSG_ERROR([--enable-java requires --enable-dynamic])
- fi
-
- AC_CHECK_PROG(JAVAC, javac, javac, nojavac)
- if test "$JAVAC" = "nojavac"; then
- AC_MSG_ERROR([no javac compiler in PATH])
- fi
- AC_CHECK_PROG(JAR, jar, jar, nojar)
- if test "$JAR" = "nojar"; then
- AC_MSG_ERROR([no jar utility in PATH])
- fi
- AC_PATH_PROG(JAVACABS, javac, nojavac)
- ADDITIONAL_LIBS="$ADDITIONAL_LIBS \$(libjso_target)"
- ADDITIONAL_LANG="$ADDITIONAL_LANG java"
- DEFAULT_INSTALL="${DEFAULT_INSTALL} install_java"
-
-dnl find the include directory relative to the javac executable
- while ls -ld "$JAVACABS" 2>/dev/null | grep " -> " >/dev/null; do
- AC_MSG_CHECKING(symlink for $JAVACABS)
- JAVACLINK=`ls -ld $JAVACABS | sed 's/.* -> //'`
- case "$JAVACLINK" in
- /*) JAVACABS="$JAVACLINK";;
-dnl 'X' avoids triggering unwanted echo options.
- *) JAVACABS=`echo "X$JAVACABS" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$JAVACLINK";;
- esac
- AC_MSG_RESULT($JAVACABS)
- done
- JTOPDIR=`echo "$JAVACABS" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
- if test -f "$JTOPDIR/include/jni.h"; then
- CPPFLAGS="$CPPFLAGSS -I$JTOPDIR/include"
- else
- JTOPDIR=`echo "$JTOPDIR" | sed -e 's:/[[^/]]*$::'`
- if test -f "$JTOPDIR/include/jni.h"; then
- CPPFLAGS="$CPPFLAGS -I$JTOPDIR/include"
- else
- AC_MSG_ERROR([cannot find java include files])
- fi
- fi
-
-dnl get the likely subdirectories for system specific java includes
- case "$host_os" in
- solaris*) JINCSUBDIRS="solaris";;
- linux*) JINCSUBDIRS="linux genunix";;
- *) JINCSUBDIRS="genunix";;
- esac
-
- for JINCSUBDIR in $JINCSUBDIRS
- do
- if test -d "$JTOPDIR/include/$JINCSUBDIR"; then
- CPPFLAGS="$CPPFLAGS -I$JTOPDIR/include/$JINCSUBDIR"
- fi
- done
-else
- JAVAC=nojavac
-fi
-
-dnl Optional RPC client/server.
-if test "$db_cv_rpc" = "yes"; then
- AC_DEFINE(HAVE_RPC)
-
- RPC_OBJS="\$(RPC_OBJS)"
- ADDITIONAL_PROGS="berkeley_db_svc $ADDITIONAL_PROGS"
-
- case "$host_os" in
- hpux*)
- AC_CHECK_FUNC(svc_run,,
- AC_CHECK_LIB(nsl, svc_run,
- LIBS="-lnsl $LIBS"; LIBTSO_LIBS="-lnsl $LIBTSO_LIBS"));;
- solaris*)
- AC_CHECK_FUNC(svc_run,, AC_CHECK_LIB(nsl, svc_run));;
- esac
-fi
-
-AM_TCL_LOAD
-
-dnl Optional DB 1.85 compatibility API.
-if test "$db_cv_compat185" = "yes"; then
- ADDITIONAL_INCS="db_185.h $ADDITIONAL_INCS"
- ADDITIONAL_OBJS="db185${o} $ADDITIONAL_OBJS"
-fi
-
-dnl Optional utilities.
-if test "$db_cv_dump185" = "yes"; then
- ADDITIONAL_PROGS="db_dump185 $ADDITIONAL_PROGS"
-fi
-
-dnl Test Server.
-dnl Include -lpthread if the library exists.
-SCOLIBS=$LIBS
-AC_CHECK_LIB(pthread, pthread_create, DBS_LIBS=-lpthread)
-case "$host_os" in
-sysv5uw7*) LIBS=$SCOLIBS;;
-sco3.2v5*) LIBS=$SCOLIBS;;
-esac
-
-dnl Checks for typedefs, structures, and system/compiler characteristics.
-AC_C_BIGENDIAN
-AC_C_CONST
-AC_HEADER_STAT
-AC_HEADER_TIME
-AC_STRUCT_ST_BLKSIZE
-AC_TYPE_MODE_T
-AC_TYPE_OFF_T
-AC_TYPE_PID_T
-AC_TYPE_SIZE_T
-
-dnl Define any short-hand types we're missing.
-AM_SHORTHAND_TYPES
-
-dnl Checks for header files.
-AC_HEADER_DIRENT
-AC_CHECK_HEADERS(sys/select.h)
-AC_CHECK_HEADERS(sys/time.h)
-
-dnl Check for mutexes. We do this here because it changes $LIBS.
-AM_DEFINE_MUTEXES
-
-dnl Checks for system functions for which we have replacements.
-dnl
-dnl XXX
-dnl The only portable getcwd call is getcwd(char *, size_t), where the
-dnl buffer is non-NULL -- Solaris can't handle a NULL buffer, and they
-dnl deleted getwd().
-AC_REPLACE_FUNCS(getcwd getopt memcmp memcpy memmove)
-AC_REPLACE_FUNCS(raise snprintf strcasecmp strerror vsnprintf)
-
-dnl XXX
-dnl Nasty hack. AC_REPLACE_FUNCS added entries of the form xxx.o to the
-dnl LIBOBJS variable. They have to be xxx.lo if we are building shared
-dnl libraries. Use sed, configure already requires it.
-tmp="`echo \"$LIBOBJS\" | sed \"s/\.o/${o}/g\"`"
-LIBOBJS="$tmp"
-
-dnl Check for system functions we optionally use.
-AC_CHECK_FUNCS(getuid pstat_getdynamic sysconf sched_yield strtoul yield)
-
-dnl Pread/pwrite.
-dnl
-dnl HP-UX has pread/pwrite, but it doesn't work with bigfile support.
-case "$host_os" in
-hpux*)
- AC_MSG_WARN([pread/pwrite interfaces ignored on $host_os.]);;
-*) AC_CHECK_FUNCS(pread pwrite)
-esac
-
-dnl Check for fcntl(2) to deny child process access to file descriptors.
-AC_CACHE_CHECK([for fcntl/F_SETFD], db_cv_fcntl_f_setfd, [dnl
-AC_TRY_RUN([
-#include <sys/types.h>
-#include <fcntl.h>
-main(){exit(fcntl(1, F_SETFD, 1) == -1);}],
- [db_cv_fcntl_f_setfd=yes], [db_cv_fcntl_f_setfd=no])])
-if test "$db_cv_fcntl_f_setfd" = yes; then
- AC_DEFINE(HAVE_FCNTL_F_SETFD)
-fi
-
-dnl A/UX has a broken getopt(3).
-case "$host_os" in
-aux*) ADDITIONAL_OBJS="getopt${o} $ADDITIONAL_OBJS";;
-esac
-
-dnl Checks for system functions for which we don't have replacements.
-
-dnl We require qsort(3) and select(2).
-AC_CHECK_FUNCS(qsort, , AC_MSG_ERROR([No qsort library function.]))
-AC_CHECK_FUNCS(select, , AC_MSG_ERROR([No select library function.]))
-
-dnl Some versions of sprintf return a pointer to the first argument instead
-dnl of a character count. We assume that the return value of snprintf and
-dnl vsprintf etc. will be the same as sprintf, and check the easy one.
-AC_CACHE_CHECK([for int type sprintf return value], db_cv_sprintf_count, [dnl
-AC_TRY_RUN([main(){char buf[20]; exit(sprintf(buf, "XXX") != 3);}],
- [db_cv_sprintf_count=yes], [db_cv_sprintf_count=no])])
-if test "$db_cv_sprintf_count" = no; then
- AC_DEFINE(SPRINTF_RET_CHARPNT)
-fi
-
-dnl Vendors are doing 64-bit lseek in different ways.
-dnl AIX, HP/UX, Solaris and Linux all use _FILE_OFFSET_BITS
-dnl to specify a "big-file" environment.
-dnl
-dnl You can't build C++ with big-file support on HP-UX, the include files
-dnl are wrong. On Solaris 8, <fcntl.h> included with big-file support
-dnl is not compatible with C++.
-if test "$db_cv_bigfile" = no; then
- case "$host_os" in
- solaris2.8|hpux*)
- if test "$db_cv_cxx" = "yes"; then
- AC_MSG_WARN([Large file and C++ API support are incompatible on HP-UX])
- AC_MSG_WARN([and Solaris 8; large file support has been turned off.])
- else
- AC_DEFINE(HAVE_FILE_OFFSET_BITS)
- fi;;
- aix*|solaris*|linux*)
- AC_DEFINE(HAVE_FILE_OFFSET_BITS);;
- esac
-fi
-
-dnl Figure out how to create shared regions.
-dnl
-dnl First, we look for mmap.
-dnl
-dnl BSD/OS has mlock(2), but it doesn't work until the 4.1 release.
-dnl
-dnl Nextstep (version 3.3) apparently supports mmap(2) (the mmap symbol
-dnl is defined in the C library) but does not support munmap(2). Don't
-dnl try to use mmap if we can't find munmap.
-dnl
-dnl Ultrix has mmap(2), but it doesn't work.
-mmap_ok=no
-case "$host_os" in
-bsdi3*|bsdi4.0)
- AC_MSG_WARN([mlock(2) interface ignored on BSD/OS 3.X and 4.0.])
- mmap_ok=yes
- AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
-ultrix*)
- AC_MSG_WARN([mmap(2) interface ignored on Ultrix.]);;
-*)
- mmap_ok=yes
- AC_CHECK_FUNCS(mlock munlock)
- AC_CHECK_FUNCS(mmap munmap, , mmap_ok=no);;
-esac
-
-dnl Second, we look for shmget.
-dnl
-dnl SunOS has the shmget(2) interfaces, but there appears to be a missing
-dnl #include <debug/debug.h> file, so we ignore them.
-shmget_ok=no
-case "$host_os" in
-sunos*)
- AC_MSG_WARN([shmget(2) interface ignored on SunOS.]);;
-*)
- shmget_ok=yes
- AC_CHECK_FUNCS(shmget, , shmget_ok=no);;
-esac
-
-dnl We require either mmap/munmap(2) or shmget(2).
-if test "$mmap_ok" = no -a "$shmget_ok" = no; then
- AC_MSG_WARN([Neither mmap/munmap(2) or shmget(2) library functions.])
-fi
-
-dnl Check for programs used in building and installation.
-AM_PROGRAMS_SET
-
-CREATE_LIST="Makefile
- include.tcl:../test/include.tcl
- db.h:../include/db.src
- db_int.h:../include/db_int.src"
-if test "$db_cv_compat185" = "yes"; then
- CREATE_LIST="${CREATE_LIST} db_185.h:../include/db_185.h"
-fi
-AC_OUTPUT(${CREATE_LIST})
diff --git a/bdb/dist/db.ecd.in b/bdb/dist/db.ecd.in
new file mode 100644
index 00000000000..92a6a090716
--- /dev/null
+++ b/bdb/dist/db.ecd.in
@@ -0,0 +1,64 @@
+# Embedix Componenet Description (ECD) file for BerkeleyDB.
+#
+# $Id: db.ecd.in,v 11.1 2001/04/04 14:06:13 bostic Exp $
+
+<GROUP System>
+<GROUP Library>
+<COMPONENT BerkeleyDB>
+ SRPM=db
+ <SPECPATCH></SPECPATCH>
+ <HELP>
+ Berkeley DB is Sleepycat Software's programmatic database toolkit.
+ </HELP>
+
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB library?
+ <KEEPLIST>
+ /usr/lib/libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ /usr/include/db.h
+ /usr/lib/libdb.so
+ </KEEPLIST>
+ <PROVIDES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ </PROVIDES>
+ <REQUIRES>
+ ld-linux.so.2
+ libc.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+
+ @EMBEDIX_ECD_CXX@
+
+ <OPTION db-extra>
+ TYPE=bool
+ DEFAULT_VALUE=1
+ PROMPT=Include BerkeleyDB Utilities?
+ <KEEPLIST>
+ /usr/bin/db_archive
+ /usr/bin/db_checkpoint
+ /usr/bin/db_deadlock
+ /usr/bin/db_dump
+ /usr/bin/db_load
+ /usr/bin/db_printlog
+ /usr/bin/db_recover
+ /usr/bin/db_stat
+ /usr/bin/db_upgrade
+ /usr/bin/db_verify
+ @EMBEDIX_ECD_RPC@
+ </KEEPLIST>
+ <REQUIRES>
+ libdb-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.so
+ ld-linux.so.2
+ libc.so.6
+ libdl.so.2
+ libm.so.6
+ </REQUIRES>
+ STATIC_SIZE=0
+ STARTUP_TIME=0
+ </OPTION>
+
+</COMPONENT>
+</GROUP>
+</GROUP>
diff --git a/bdb/dist/db.spec.in b/bdb/dist/db.spec.in
new file mode 100644
index 00000000000..ef253bcfcf4
--- /dev/null
+++ b/bdb/dist/db.spec.in
@@ -0,0 +1,52 @@
+# Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Summary: Sleepycat Berkeley DB database library
+Name: db
+Version: @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+Release: 1
+Copyright: Freely redistributable, see LICENSE for details.
+Source: http://www.sleepycat.com/update/@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/db-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@.tar.gz
+URL: http://www.sleepycat.com
+Group: System Environment/Libraries
+BuildRoot: @CONFIGURATION_PATH@/RPM_INSTALL
+
+%description
+Berkeley DB is a programmatic toolkit that provides fast, reliable,
+mission-critical, and scalable built-in database support for software
+ranging from embedded applications running on hand-held appliances to
+enterprise-scale servers.
+
+The Berkeley DB access methods include B+tree, Extended Linear Hashing,
+Fixed and Variable-length records, and Persistent Queues. Berkeley DB
+provides full transactional support, database recovery, online backups,
+and separate access to locking, logging and shared memory caching
+subsystems.
+
+Berkeley DB supports C, C++, Java, Tcl, Perl, and Python APIs. The
+software is available for Linux, a wide variety of UNIX platforms,
+Windows 95/98, Windows/NT, Windows 2000, VxWorks and QNX.
+
+%prep
+%setup
+
+%build
+cd build_unix
+CFLAGS="$RPM_OPT_FLAGS" ../dist/configure @CONFIGURATION_ARGS@
+make library_build
+
+%install
+cd build_unix
+make prefix=@CONFIGURATION_PATH@/RPM_INSTALL@EMBEDIX_ROOT@ install
+
+@RPM_POST_INSTALL@
+
+@RPM_POST_UNINSTALL@
+
+%files
+%defattr(-,root,root)
+%dir @EMBEDIX_ROOT@/bin
+%dir @EMBEDIX_ROOT@/docs
+%dir @EMBEDIX_ROOT@/include
+%dir @EMBEDIX_ROOT@/lib
+
+%changelog
diff --git a/bdb/dist/gen_inc.awk b/bdb/dist/gen_inc.awk
new file mode 100644
index 00000000000..4d245623bee
--- /dev/null
+++ b/bdb/dist/gen_inc.awk
@@ -0,0 +1,73 @@
+# This awk script parses C input files looking for lines marked "PUBLIC:"
+# and "EXTERN:". (PUBLIC lines are DB internal function prototypes and
+# #defines, EXTERN are DB external function prototypes and #defines.)
+#
+# PUBLIC lines are put into two versions of per-directory include files:
+# one file that contains the prototypes, and one file that contains a
+# #define for the name to be processed during configuration when creating
+# unique names for every global symbol in the DB library.
+#
+# The EXTERN lines are put into two files: one of which contains prototypes
+# which are always appended to the db.h file, and one of which contains a
+# #define list for use when creating unique symbol names.
+#
+# Four arguments:
+# e_dfile list of EXTERN #defines
+# e_pfile include file that contains EXTERN prototypes
+# i_dfile list of internal (PUBLIC) #defines
+# i_pfile include file that contains internal (PUBLIC) prototypes
+/PUBLIC:/ {
+ sub("^.*PUBLIC:[ ][ ]*", "")
+ if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ print $0 >> i_pfile
+ print $0 >> i_dfile
+ next
+ }
+ pline = sprintf("%s %s", pline, $0)
+ if (pline ~ "));") {
+ sub("^[ ]*", "", pline)
+ print pline >> i_pfile
+ if (pline !~ db_version_unique_name) {
+ def = gensub("[ ][ ]*__P.*", "", 1, pline)
+ sub("^.*[ ][*]*", "", def)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ def, def) >> i_dfile
+ }
+ pline = ""
+ }
+}
+
+# When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+# were the interfaces applications would likely use and not be willing to
+# change, due to the sheer volume of the calls. Provide wrappers -- we
+# could do txn_abort and txn_commit using macros, but not txn_begin, as
+# the name of the field is txn_begin, we didn't want to modify it.
+#
+# The issue with txn_begin hits us in another way. If configured with the
+# --with-uniquename option, we use #defines to re-define DB's interfaces
+# to unique names. We can't do that for these functions because txn_begin
+# is also a field name in the DB_ENV structure, and the #defines we use go
+# at the end of the db.h file -- we get control too late to #define a field
+# name. So, modify the script that generates the unique names #defines to
+# not generate them for these three functions, and don't include the three
+# functions in libraries built with that configuration option.
+/EXTERN:/ {
+ sub("^.*EXTERN:[ ][ ]*", "")
+ if ($0 ~ "^#if|^#ifdef|^#ifndef|^#else|^#endif") {
+ print $0 >> e_pfile
+ print $0 >> e_dfile
+ next
+ }
+ eline = sprintf("%s %s", eline, $0)
+ if (eline ~ "));") {
+ sub("^[ ]*", "", eline)
+ print eline >> e_pfile
+ if (eline !~ db_version_unique_name && eline !~ "^int txn_") {
+ def = gensub("[ ][ ]*__P.*", "", 1, eline)
+ sub("^.*[ ][*]*", "", def)
+ printf("#define %s %s@DB_VERSION_UNIQUE_NAME@\n",
+ def, def) >> e_dfile
+ }
+ eline = ""
+ }
+}
diff --git a/bdb/dist/gen_rec.awk b/bdb/dist/gen_rec.awk
index 5953ee05120..75f2e86ca9e 100644
--- a/bdb/dist/gen_rec.awk
+++ b/bdb/dist/gen_rec.awk
@@ -2,10 +2,10 @@
#
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: gen_rec.awk,v 11.26 2001/01/08 21:06:46 bostic Exp $
+# $Id: gen_rec.awk,v 11.70 2002/08/08 15:44:47 bostic Exp $
#
# This awk script generates all the log, print, and read routines for the DB
@@ -21,33 +21,51 @@
# (logical types are defined in each subsystem manually)
# structures to contain the data unmarshalled from the log.
#
-# This awk script requires that five variables be set when it is called:
+# This awk script requires that four variables be set when it is called:
#
# source_file -- the C source file being created
-# subsystem -- the subsystem prefix, e.g., "db"
# header_file -- the C #include file being created
# template_file -- the template file being created
-# template_dir -- the directory to find the source template
#
# And stdin must be the input file that defines the recovery setup.
+#
+# Within each file prefix.src, we use a number of public keywords (documented
+# in the reference guide) as well as the following ones which are private to
+# DB:
+# DBPRIVATE Indicates that a file will be built as part of DB,
+# rather than compiled independently, and so can use
+# DB-private interfaces (such as DB_NOCOPY).
+# DB A DB handle. Logs the dbreg fileid for that handle,
+# and makes the *_log interface take a DB * instead of a
+# DB_ENV *.
+# PGDBT Just like DBT, only we know it stores a page or page
+# header, so we can byte-swap it (once we write the
+# byte-swapping code, which doesn't exist yet).
+# WRLOCK
+# WRLOCKNZ An ARG that stores a db_pgno_t, which the getpgnos
+# function should acquire a lock on. WRLOCK implies
+# that we should always get the lock; WRLOCKNZ implies
+# that we should do so if and only if the pgno is non-zero
+# (unfortunately, 0 is both PGNO_INVALID and the main
+# metadata page number).
BEGIN {
- if (source_file == "" || subsystem == "" ||
- header_file == "" || template_file == "" || template_dir == "") {
- print "Usage: gen_rec.awk requires five variables to be set:"
+ if (source_file == "" ||
+ header_file == "" || template_file == "") {
+ print "Usage: gen_rec.awk requires three variables to be set:"
print "\tsource_file\t-- the C source file being created"
- print "\tsubsystem\t-- the subsystem prefix, e.g., \"db\""
print "\theader_file\t-- the C #include file being created"
print "\ttemplate_file\t-- the template file being created"
- print "\ttemplate_dir\t-- the directory to find the source template"
exit
}
FS="[\t ][\t ]*"
CFILE=source_file
- NAME=subsystem
HFILE=header_file
TFILE=template_file
- TDIR=template_dir
+ dbprivate = 0
+}
+/^[ ]*DBPRIVATE/ {
+ dbprivate = 1
}
/^[ ]*PREFIX/ {
prefix = $2
@@ -71,9 +89,9 @@ BEGIN {
printf("#include <string.h>\n") >> TFILE
printf("#endif\n\n") >> TFILE
printf("#include \"db_int.h\"\n") >> TFILE
- printf("#include \"db_page.h\"\n") >> TFILE
- printf("#include \"%s.h\"\n", prefix) >> TFILE
- printf("#include \"log.h\"\n\n") >> TFILE
+ printf("#include \"dbinc/db_page.h\"\n") >> TFILE
+ printf("#include \"dbinc/%s.h\"\n", prefix) >> TFILE
+ printf("#include \"dbinc/log.h\"\n\n") >> TFILE
}
/^[ ]*INCLUDE/ {
if ($3 == "")
@@ -81,26 +99,30 @@ BEGIN {
else
printf("%s %s\n", $2, $3) >> CFILE
}
-/^[ ]*(BEGIN|DEPRECATED)/ {
+/^[ ]*(BEGIN|IGNORED)/ {
if (in_begin) {
print "Invalid format: missing END statement"
exit
}
in_begin = 1;
is_dbt = 0;
- is_deprecated = ($1 == "DEPRECATED");
+ has_dbp = 0;
+ is_uint = 0;
+ need_log_function = ($1 == "BEGIN");
nvars = 0;
+ # number of locks that the getpgnos functions will return
+ nlocks = 0;
+
thisfunc = $2;
funcname = sprintf("%s_%s", prefix, $2);
rectype = $3;
funcs[num_funcs] = funcname;
- funcs_dep[num_funcs] = is_deprecated;
++num_funcs;
}
-/^[ ]*(ARG|DBT|POINTER)/ {
+/^[ ]*(DB|ARG|DBT|PGDBT|POINTER|WRLOCK|WRLOCKNZ)/ {
vars[nvars] = $2;
types[nvars] = $3;
atypes[nvars] = $1;
@@ -109,11 +131,16 @@ BEGIN {
for (i = 4; i < NF; i++)
types[nvars] = sprintf("%s %s", types[nvars], $i);
- if ($1 == "ARG")
- sizes[nvars] = sprintf("sizeof(%s)", $2);
- else if ($1 == "POINTER")
+ if ($1 == "DB") {
+ has_dbp = 1;
+ }
+
+ if ($1 == "DB" || $1 == "ARG" || $1 == "WRLOCK" || $1 == "WRLOCKNZ") {
+ sizes[nvars] = sprintf("sizeof(u_int32_t)");
+ is_uint = 1;
+ } else if ($1 == "POINTER")
sizes[nvars] = sprintf("sizeof(*%s)", $2);
- else { # DBT
+ else { # DBT, PGDBT
sizes[nvars] = \
sprintf("sizeof(u_int32_t) + (%s == NULL ? 0 : %s->size)", \
$2, $2);
@@ -121,6 +148,17 @@ BEGIN {
}
nvars++;
}
+/^[ ]*(WRLOCK|WRLOCKNZ)/ {
+ nlocks++;
+
+ if ($1 == "WRLOCK") {
+ lock_if_zero[nlocks] = 1;
+ } else {
+ lock_if_zero[nlocks] = 0;
+ }
+
+ lock_pgnos[nlocks] = $2;
+}
/^[ ]*END/ {
if (!in_begin) {
print "Invalid format: missing BEGIN statement"
@@ -128,7 +166,7 @@ BEGIN {
}
# Declare the record type.
- printf("\n#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE
+ printf("#define\tDB_%s\t%d\n", funcname, rectype) >> HFILE
# Structure declaration.
printf("typedef struct _%s_args {\n", funcname) >> HFILE
@@ -146,17 +184,26 @@ BEGIN {
}
printf("\t%s\t%s;\n", t, vars[i]) >> HFILE
}
- printf("} __%s_args;\n\n", funcname) >> HFILE
+ printf("} %s_args;\n\n", funcname) >> HFILE
- # Output the log, print and read functions.
- if (!is_deprecated)
+ # Output the log, print, read, and getpgnos functions.
+ if (need_log_function) {
log_function();
+
+ # The getpgnos function calls DB-private (__rep_*) functions,
+ # so we only generate it for our own logging functions,
+ # not application-specific ones.
+ if (dbprivate) {
+ getpgnos_function();
+ }
+ }
print_function();
read_function();
# Recovery template
- cmd = sprintf("sed -e s/PREF/%s/ -e s/FUNC/%s/ < %s/rec_ctemp >> %s",
- prefix, thisfunc, TDIR, TFILE)
+ cmd = sprintf(\
+ "sed -e s/PREF/%s/ -e s/FUNC/%s/ < template/rec_ctemp >> %s",
+ prefix, thisfunc, TFILE)
system(cmd);
# Done writing stuff, reset and continue.
@@ -164,63 +211,141 @@ BEGIN {
}
END {
+ # End the conditional for the HFILE
+ printf("#endif\n") >> HFILE;
+
# Print initialization routine; function prototype
- printf("int __%s_init_print __P((DB_ENV *));\n", prefix) >> HFILE;
+ p[1] = sprintf("int %s_init_print %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call __db_add_recovery(print_fn, id)
+ printf("int\n%s_init_print(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n") >> CFILE;
+ # If application-specific, the user will need a prototype for
+ # __db_add_recovery, since they won't have DB's.
+ if (!dbprivate) {
+ printf("\tint __db_add_recovery __P((DB_ENV *,\n") >> CFILE;
+ printf(\
+"\t int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),\n") >> CFILE;
+ printf("\t size_t *,\n") >> CFILE;
+ printf(\
+"\t int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));\n") \
+ >> CFILE;
+ }
- # Create the routine to call db_add_recovery(print_fn, id)
- printf("int\n__%s_init_print(dbenv)\n", prefix) >> CFILE;
- printf("\tDB_ENV *dbenv;\n{\n\tint ret;\n\n") >> CFILE;
+ printf("\tint ret;\n\n") >> CFILE;
for (i = 0; i < num_funcs; i++) {
- printf("\tif ((ret = __db_add_recovery(dbenv,\n") >> CFILE;
- printf("\t __%s_print, DB_%s)) != 0)\n", \
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_print, DB_%s)) != 0)\n", \
funcs[i], funcs[i]) >> CFILE;
printf("\t\treturn (ret);\n") >> CFILE;
}
printf("\treturn (0);\n}\n\n") >> CFILE;
- # Recover initialization routine
- printf("int __%s_init_recover __P((DB_ENV *));\n", prefix) >> HFILE;
+ # We only want to generate *_init_{getpgnos,recover} functions
+ # if this is a DB-private, rather than application-specific,
+ # set of recovery functions. Application-specific recovery functions
+ # should be dispatched using the DB_ENV->set_app_dispatch callback
+ # rather than a DB dispatch table ("dtab").
+ if (!dbprivate)
+ exit
- # Create the routine to call db_add_recovery(func, id)
- printf("int\n__%s_init_recover(dbenv)\n", prefix) >> CFILE;
- printf("\tDB_ENV *dbenv;\n{\n\tint ret;\n\n") >> CFILE;
+ # Page number initialization routine; function prototype
+ p[1] = sprintf("int %s_init_getpgnos %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(pgno_fn, id)
+ printf("int\n%s_init_getpgnos(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
for (i = 0; i < num_funcs; i++) {
- printf("\tif ((ret = __db_add_recovery(dbenv,\n") >> CFILE;
- if (funcs_dep[i] == 1)
- printf("\t __deprecated_recover, DB_%s)) != 0)\n", \
- funcs[i]) >> CFILE;
- else
- printf("\t __%s_recover, DB_%s)) != 0)\n", \
- funcs[i], funcs[i]) >> CFILE;
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_getpgnos, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
printf("\t\treturn (ret);\n") >> CFILE;
}
printf("\treturn (0);\n}\n\n") >> CFILE;
- # End the conditional for the HFILE
- printf("#endif\n") >> HFILE;
+ # Recover initialization routine
+ p[1] = sprintf("int %s_init_recover %s%s", prefix,
+ "__P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, ",
+ "db_recops, void *), size_t *));");
+ p[2] = "";
+ proto_format(p);
+
+ # Create the routine to call db_add_recovery(func, id)
+ printf("int\n%s_init_recover(dbenv, dtabp, dtabsizep)\n", \
+ prefix) >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tint (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *,") >> CFILE;
+ printf(" db_recops, void *));\n") >> CFILE;
+ printf("\tsize_t *dtabsizep;\n{\n\tint ret;\n\n") >> CFILE;
+ for (i = 0; i < num_funcs; i++) {
+ printf("\tif ((ret = __db_add_recovery(dbenv, ") >> CFILE;
+ printf("dtabp, dtabsizep,\n") >> CFILE;
+ printf("\t %s_recover, DB_%s)) != 0)\n", \
+ funcs[i], funcs[i]) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\treturn (0);\n}\n") >> CFILE;
}
function log_function() {
# Write the log function; function prototype
- printf("int __%s_log __P((", funcname) >> HFILE;
- printf("DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t") >> HFILE;
+ pi = 1;
+ p[pi++] = sprintf("int %s_log", funcname);
+ p[pi++] = " ";
+ if (has_dbp == 1) {
+ p[pi++] = "__P((DB *, DB_TXN *, DB_LSN *, u_int32_t";
+ } else {
+ p[pi++] = "__P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t";
+ }
for (i = 0; i < nvars; i++) {
- printf(", ") >> HFILE;
- if (modes[i] == "DBT")
- printf("const ") >> HFILE;
- printf("%s", types[i]) >> HFILE;
- if (modes[i] == "DBT")
- printf(" *") >> HFILE;
+ if (modes[i] == "DB")
+ continue;
+ p[pi++] = ", ";
+ p[pi++] = sprintf("%s%s%s",
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? "const " : "",
+ types[i],
+ (modes[i] == "DBT" || modes[i] == "PGDBT") ? " *" : "");
}
- printf("));\n") >> HFILE;
+ p[pi++] = "";
+ p[pi++] = "));";
+ p[pi++] = "";
+ proto_format(p);
# Function declaration
- printf("int\n__%s_log(dbenv, txnid, ret_lsnp, flags", \
- funcname) >> CFILE;
+ if (has_dbp == 1) {
+ printf("int\n%s_log(dbp, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ } else {
+ printf("int\n%s_log(dbenv, txnid, ret_lsnp, flags", \
+ funcname) >> CFILE;
+ }
for (i = 0; i < nvars; i++) {
+ if (modes[i] == "DB") {
+ # We pass in fileids on the dbp, so if this is one,
+ # skip it.
+ continue;
+ }
printf(",") >> CFILE;
if ((i % 6) == 0)
- printf("\n\t") >> CFILE;
+ printf("\n ") >> CFILE;
else
printf(" ") >> CFILE;
printf("%s", vars[i]) >> CFILE;
@@ -228,65 +353,95 @@ function log_function() {
printf(")\n") >> CFILE;
# Now print the parameters
- printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ if (has_dbp == 1) {
+ printf("\tDB *dbp;\n") >> CFILE;
+ } else {
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ }
printf("\tDB_TXN *txnid;\n\tDB_LSN *ret_lsnp;\n") >> CFILE;
printf("\tu_int32_t flags;\n") >> CFILE;
for (i = 0; i < nvars; i++) {
- if (modes[i] == "DBT")
+ # We just skip for modes == DB.
+ if (modes[i] == "DBT" || modes[i] == "PGDBT")
printf("\tconst %s *%s;\n", types[i], vars[i]) >> CFILE;
- else
+ else if (modes[i] != "DB")
printf("\t%s %s;\n", types[i], vars[i]) >> CFILE;
}
# Function body and local decls
printf("{\n") >> CFILE;
printf("\tDBT logrec;\n") >> CFILE;
+ if (has_dbp == 1)
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
printf("\tDB_LSN *lsnp, null_lsn;\n") >> CFILE;
if (is_dbt == 1)
printf("\tu_int32_t zero;\n") >> CFILE;
- printf("\tu_int32_t rectype, txn_num;\n") >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
+ printf("\tu_int32_t npad, rectype, txn_num;\n") >> CFILE;
printf("\tint ret;\n") >> CFILE;
printf("\tu_int8_t *bp;\n\n") >> CFILE;
# Initialization
+ if (has_dbp == 1)
+ printf("\tdbenv = dbp->dbenv;\n") >> CFILE;
printf("\trectype = DB_%s;\n", funcname) >> CFILE;
- printf("\tif (txnid != NULL &&\n") >> CFILE;
- printf("\t TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE;
- printf("\t (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)\n")\
- >> CFILE;
- printf("\t\treturn (ret);\n") >> CFILE;
- printf("\ttxn_num = txnid == NULL ? 0 : txnid->txnid;\n") >> CFILE;
+ printf("\tnpad = 0;\n\n") >> CFILE;
+
printf("\tif (txnid == NULL) {\n") >> CFILE;
- printf("\t\tZERO_LSN(null_lsn);\n") >> CFILE;
+ printf("\t\ttxn_num = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.file = 0;\n") >> CFILE;
+ printf("\t\tnull_lsn.offset = 0;\n") >> CFILE;
printf("\t\tlsnp = &null_lsn;\n") >> CFILE;
- printf("\t} else\n\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
+ printf("\t} else {\n") >> CFILE;
+ if (funcname != "__db_debug" && dbprivate) {
+ printf(\
+ "\t\tif (TAILQ_FIRST(&txnid->kids) != NULL &&\n") >> CFILE;
+ printf("\t\t (ret = __txn_activekids(") >> CFILE;
+ printf("dbenv, rectype, txnid)) != 0)\n") >> CFILE;
+ printf("\t\t\treturn (ret);\n") >> CFILE;
+ }
+ printf("\t\ttxn_num = txnid->txnid;\n") >> CFILE;
+ printf("\t\tlsnp = &txnid->last_lsn;\n") >> CFILE;
+ printf("\t}\n\n") >> CFILE;
# Malloc
printf("\tlogrec.size = sizeof(rectype) + ") >> CFILE;
printf("sizeof(txn_num) + sizeof(DB_LSN)") >> CFILE;
for (i = 0; i < nvars; i++)
printf("\n\t + %s", sizes[i]) >> CFILE;
- printf(";\n\tif ((ret = ") >> CFILE;
- printf(\
- "__os_malloc(dbenv, logrec.size, NULL, &logrec.data)) != 0)\n")\
- >> CFILE;
- printf("\t\treturn (ret);\n\n") >> CFILE;
+ printf(";\n") >> CFILE
+ if (dbprivate) {
+ printf("\tif (CRYPTO_ON(dbenv)) {\n") >> CFILE;
+ printf("\t\tnpad =\n") >> CFILE
+ printf(\
+"\t\t ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);\n")\
+ >> CFILE;
+ printf("\t\tlogrec.size += npad;\n\t}\n\n") >> CFILE
+ }
+ write_malloc("logrec.data", "logrec.size", CFILE)
+ printf("\tif (npad > 0)\n") >> CFILE;
+ printf("\t\tmemset((u_int8_t *)logrec.data + logrec.size ") >> CFILE;
+ printf("- npad, 0, npad);\n\n") >> CFILE;
# Copy args into buffer
- printf("\tbp = logrec.data;\n") >> CFILE;
+ printf("\tbp = logrec.data;\n\n") >> CFILE;
printf("\tmemcpy(bp, &rectype, sizeof(rectype));\n") >> CFILE;
- printf("\tbp += sizeof(rectype);\n") >> CFILE;
+ printf("\tbp += sizeof(rectype);\n\n") >> CFILE;
printf("\tmemcpy(bp, &txn_num, sizeof(txn_num));\n") >> CFILE;
- printf("\tbp += sizeof(txn_num);\n") >> CFILE;
+ printf("\tbp += sizeof(txn_num);\n\n") >> CFILE;
printf("\tmemcpy(bp, lsnp, sizeof(DB_LSN));\n") >> CFILE;
- printf("\tbp += sizeof(DB_LSN);\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
for (i = 0; i < nvars; i ++) {
- if (modes[i] == "ARG") {
- printf("\tmemcpy(bp, &%s, %s);\n", \
- vars[i], sizes[i]) >> CFILE;
- printf("\tbp += %s;\n", sizes[i]) >> CFILE;
- } else if (modes[i] == "DBT") {
+ if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ") {
+ printf("\tuinttmp = (u_int32_t)%s;\n", \
+ vars[i]) >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
+ } else if (modes[i] == "DBT" || modes[i] == "PGDBT") {
printf("\tif (%s == NULL) {\n", vars[i]) >> CFILE;
printf("\t\tzero = 0;\n") >> CFILE;
printf("\t\tmemcpy(bp, &zero, sizeof(u_int32_t));\n") \
@@ -299,49 +454,85 @@ function log_function() {
>> CFILE;
printf("\t\tmemcpy(bp, %s->data, %s->size);\n", \
vars[i], vars[i]) >> CFILE;
- printf("\t\tbp += %s->size;\n\t}\n", vars[i]) >> CFILE;
+ printf("\t\tbp += %s->size;\n\t}\n\n", \
+ vars[i]) >> CFILE;
+ } else if (modes[i] == "DB") {
+ # We need to log a DB handle. To do this, we
+ # actually just log its fileid; from that, we'll
+ # be able to acquire an open handle at recovery time.
+ printf("\tDB_ASSERT(dbp->log_filename != NULL);\n") \
+ >> CFILE;
+ printf("\tif (dbp->log_filename->id == ") >> CFILE;
+ printf("DB_LOGFILEID_INVALID &&\n\t ") >> CFILE
+ printf("(ret = __dbreg_lazy_id(dbp)) != 0)\n") \
+ >> CFILE;
+ printf("\t\treturn (ret);\n\n") >> CFILE;
+
+ printf("\tuinttmp = ") >> CFILE;
+ printf("(u_int32_t)dbp->log_filename->id;\n") >> CFILE;
+ printf("\tmemcpy(bp, &uinttmp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n\n") >> CFILE;
} else { # POINTER
printf("\tif (%s != NULL)\n", vars[i]) >> CFILE;
printf("\t\tmemcpy(bp, %s, %s);\n", vars[i], \
sizes[i]) >> CFILE;
printf("\telse\n") >> CFILE;
printf("\t\tmemset(bp, 0, %s);\n", sizes[i]) >> CFILE;
- printf("\tbp += %s;\n", sizes[i]) >> CFILE;
+ printf("\tbp += %s;\n\n", sizes[i]) >> CFILE;
}
}
- # Error checking
- printf("\tDB_ASSERT((u_int32_t)") >> CFILE;
- printf("(bp - (u_int8_t *)logrec.data) == logrec.size);\n") >> CFILE;
+ # Error checking. User code won't have DB_ASSERT available, but
+ # this is a pretty unlikely assertion anyway, so we just leave it out
+ # rather than requiring assert.h.
+ if (dbprivate) {
+ printf("\tDB_ASSERT((u_int32_t)") >> CFILE;
+ printf("(bp - (u_int8_t *)logrec.data) <= logrec.size);\n") \
+ >> CFILE;
+ }
# Issue log call
- # The logging system cannot call the public log_put routine
- # due to mutual exclusion constraints. So, if we are
- # generating code for the log subsystem, use the internal
- # __log_put.
- if (prefix == "log")
- printf("\tret = __log_put\(dbenv, ret_lsnp, ") >> CFILE;
- else
- printf("\tret = log_put(dbenv, ret_lsnp, ") >> CFILE;
- printf("(DBT *)&logrec, flags);\n") >> CFILE;
+ # We didn't call the crypto alignment function when we created this
+ # log record (because we don't have the right header files to find
+ # the function), so we have to copy the log record to make sure the
+ # alignment is correct.
+ printf(\
+ "\tret = dbenv->log_put(dbenv,\n\t ret_lsnp, (DBT *)&logrec, ") \
+ >> CFILE;
+ if (dbprivate) {
+ printf("flags | DB_NOCOPY);\n") >> CFILE;
+ } else {
+ printf("flags);\n") >> CFILE;
+ }
# Update the transactions last_lsn
- printf("\tif (txnid != NULL)\n") >> CFILE;
+ printf("\tif (txnid != NULL && ret == 0)\n") >> CFILE;
printf("\t\ttxnid->last_lsn = *ret_lsnp;\n") >> CFILE;
+ # If out of disk space log writes may fail. If we are debugging
+ # that print out which records did not make it to disk.
+ printf("#ifdef LOG_DIAGNOSTIC\n") >> CFILE
+ printf("\tif (ret != 0)\n") >> CFILE;
+ printf("\t\t(void)%s_print(dbenv,\n", funcname) >> CFILE;
+ printf("\t\t (DBT *)&logrec, ret_lsnp, NULL, NULL);\n") >> CFILE
+ printf("#endif\n") >> CFILE
+
# Free and return
- printf("\t__os_free(logrec.data, logrec.size);\n") >> CFILE;
+ write_free("logrec.data", CFILE)
printf("\treturn (ret);\n}\n\n") >> CFILE;
}
function print_function() {
# Write the print function; function prototype
- printf("int __%s_print", funcname) >> HFILE;
- printf(" __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));\n") \
- >> HFILE;
+ p[1] = sprintf("int %s_print", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
# Function declaration
- printf("int\n__%s_print(dbenv, ", funcname) >> CFILE;
+ printf("int\n%s_print(dbenv, ", funcname) >> CFILE;
printf("dbtp, lsnp, notused2, notused3)\n") >> CFILE;
printf("\tDB_ENV *dbenv;\n") >> CFILE;
printf("\tDBT *dbtp;\n") >> CFILE;
@@ -349,20 +540,26 @@ function print_function() {
printf("\tdb_recops notused2;\n\tvoid *notused3;\n{\n") >> CFILE;
# Locals
- printf("\t__%s_args *argp;\n", funcname) >> CFILE;
- printf("\tu_int32_t i;\n\tu_int ch;\n\tint ret;\n\n") >> CFILE;
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ for (i = 0; i < nvars; i ++)
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
+ printf("\tu_int32_t i;\n") >> CFILE
+ printf("\tint ch;\n") >> CFILE
+ break;
+ }
+
+ printf("\tint ret;\n\n") >> CFILE;
# Get rid of complaints about unused parameters.
- printf("\ti = 0;\n\tch = 0;\n") >> CFILE;
printf("\tnotused2 = DB_TXN_ABORT;\n\tnotused3 = NULL;\n\n") >> CFILE;
# Call read routine to initialize structure
- printf("\tif ((ret = __%s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
+ printf("\tif ((ret = %s_read(dbenv, dbtp->data, &argp)) != 0)\n", \
funcname) >> CFILE;
printf("\t\treturn (ret);\n") >> CFILE;
# Print values in every record
- printf("\tprintf(\"[%%lu][%%lu]%s: ", funcname) >> CFILE;
+ printf("\t(void)printf(\n\t \"[%%lu][%%lu]%s: ", funcname) >> CFILE;
printf("rec: %%lu txnid %%lx ") >> CFILE;
printf("prevlsn [%%lu][%%lu]\\n\",\n") >> CFILE;
printf("\t (u_long)lsnp->file,\n") >> CFILE;
@@ -374,19 +571,17 @@ function print_function() {
# Now print fields of argp
for (i = 0; i < nvars; i ++) {
- printf("\tprintf(\"\\t%s: ", vars[i]) >> CFILE;
+ printf("\t(void)printf(\"\\t%s: ", vars[i]) >> CFILE;
- if (modes[i] == "DBT") {
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
printf("\");\n") >> CFILE;
printf("\tfor (i = 0; i < ") >> CFILE;
printf("argp->%s.size; i++) {\n", vars[i]) >> CFILE;
printf("\t\tch = ((u_int8_t *)argp->%s.data)[i];\n", \
vars[i]) >> CFILE;
- printf("\t\tif (isprint(ch) || ch == 0xa)\n") >> CFILE;
- printf("\t\t\tputchar(ch);\n") >> CFILE;
- printf("\t\telse\n") >> CFILE;
- printf("\t\t\tprintf(\"%%#x \", ch);\n") >> CFILE;
- printf("\t}\n\tprintf(\"\\n\");\n") >> CFILE;
+ printf("\t\tprintf(isprint(ch) || ch == 0x0a") >> CFILE;
+ printf(" ? \"%%c\" : \"%%#x \", ch);\n") >> CFILE;
+ printf("\t}\n\t(void)printf(\"\\n\");\n") >> CFILE;
} else if (types[i] == "DB_LSN *") {
printf("[%%%s][%%%s]\\n\",\n", \
formats[i], formats[i]) >> CFILE;
@@ -405,52 +600,63 @@ function print_function() {
printf("argp->%s);\n", vars[i]) >> CFILE;
}
}
- printf("\tprintf(\"\\n\");\n") >> CFILE;
- printf("\t__os_free(argp, 0);\n") >> CFILE;
+ printf("\t(void)printf(\"\\n\");\n") >> CFILE;
+ write_free("argp", CFILE);
printf("\treturn (0);\n") >> CFILE;
printf("}\n\n") >> CFILE;
}
function read_function() {
# Write the read function; function prototype
- printf("int __%s_read __P((DB_ENV *, void *, ", funcname) >> HFILE;
- printf("__%s_args **));\n", funcname) >> HFILE;
+ p[1] = sprintf("int %s_read __P((DB_ENV *, void *,", funcname);
+ p[2] = " ";
+ p[3] = sprintf("%s_args **));", funcname);
+ p[4] = "";
+ proto_format(p);
# Function declaration
- printf("int\n__%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE;
+ printf("int\n%s_read(dbenv, recbuf, argpp)\n", funcname) >> CFILE;
# Now print the parameters
printf("\tDB_ENV *dbenv;\n") >> CFILE;
printf("\tvoid *recbuf;\n") >> CFILE;
- printf("\t__%s_args **argpp;\n", funcname) >> CFILE;
+ printf("\t%s_args **argpp;\n", funcname) >> CFILE;
# Function body and local decls
- printf("{\n\t__%s_args *argp;\n", funcname) >> CFILE;
+ printf("{\n\t%s_args *argp;\n", funcname) >> CFILE;
+ if (is_uint == 1)
+ printf("\tu_int32_t uinttmp;\n") >> CFILE;
printf("\tu_int8_t *bp;\n") >> CFILE;
- printf("\tint ret;\n") >> CFILE;
- printf("\n\tret = __os_malloc(dbenv, sizeof(") >> CFILE;
- printf("__%s_args) +\n\t sizeof(DB_TXN), NULL, &argp);\n", \
- funcname) >> CFILE;
- printf("\tif (ret != 0)\n\t\treturn (ret);\n") >> CFILE;
- # Set up the pointers to the txnid and the prev lsn
- printf("\targp->txnid = (DB_TXN *)&argp[1];\n") >> CFILE;
+ if (dbprivate) {
+ # We only use dbenv and ret in the private malloc case.
+ printf("\tint ret;\n\n") >> CFILE;
+ } else {
+ printf("\t/* Keep the compiler quiet. */\n") >> CFILE;
+ printf("\n\tdbenv = NULL;\n") >> CFILE;
+ }
+
+ malloc_size = sprintf("sizeof(%s_args) + sizeof(DB_TXN)", funcname)
+ write_malloc("argp", malloc_size, CFILE)
+
+ # Set up the pointers to the txnid.
+ printf("\targp->txnid = (DB_TXN *)&argp[1];\n\n") >> CFILE;
# First get the record type, prev_lsn, and txnid fields.
printf("\tbp = recbuf;\n") >> CFILE;
printf("\tmemcpy(&argp->type, bp, sizeof(argp->type));\n") >> CFILE;
- printf("\tbp += sizeof(argp->type);\n") >> CFILE;
+ printf("\tbp += sizeof(argp->type);\n\n") >> CFILE;
printf("\tmemcpy(&argp->txnid->txnid, bp, ") >> CFILE;
printf("sizeof(argp->txnid->txnid));\n") >> CFILE;
- printf("\tbp += sizeof(argp->txnid->txnid);\n") >> CFILE;
+ printf("\tbp += sizeof(argp->txnid->txnid);\n\n") >> CFILE;
printf("\tmemcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));\n") >> CFILE;
- printf("\tbp += sizeof(DB_LSN);\n") >> CFILE;
+ printf("\tbp += sizeof(DB_LSN);\n\n") >> CFILE;
# Now get rest of data.
for (i = 0; i < nvars; i ++) {
- if (modes[i] == "DBT") {
+ if (modes[i] == "DBT" || modes[i] == "PGDBT") {
printf("\tmemset(&argp->%s, 0, sizeof(argp->%s));\n", \
vars[i], vars[i]) >> CFILE;
printf("\tmemcpy(&argp->%s.size, ", vars[i]) >> CFILE;
@@ -458,18 +664,181 @@ function read_function() {
printf("\tbp += sizeof(u_int32_t);\n") >> CFILE;
printf("\targp->%s.data = bp;\n", vars[i]) >> CFILE;
printf("\tbp += argp->%s.size;\n", vars[i]) >> CFILE;
- } else if (modes[i] == "ARG") {
- printf("\tmemcpy(&argp->%s, bp, %s%s));\n", \
- vars[i], "sizeof(argp->", vars[i]) >> CFILE;
- printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
+ } else if (modes[i] == "ARG" || modes[i] == "WRLOCK" || \
+ modes[i] == "WRLOCKNZ" || modes[i] == "DB") {
+ printf("\tmemcpy(&uinttmp, bp, sizeof(uinttmp));\n") \
+ >> CFILE;
+ printf("\targp->%s = (%s)uinttmp;\n", vars[i], \
+ types[i]) >> CFILE;
+ printf("\tbp += sizeof(uinttmp);\n") >> CFILE;
} else { # POINTER
printf("\tmemcpy(&argp->%s, bp, ", vars[i]) >> CFILE;
printf(" sizeof(argp->%s));\n", vars[i]) >> CFILE;
printf("\tbp += sizeof(argp->%s);\n", vars[i]) >> CFILE;
}
+ printf("\n") >> CFILE;
}
# Free and return
printf("\t*argpp = argp;\n") >> CFILE;
printf("\treturn (0);\n}\n\n") >> CFILE;
}
+
+function getpgnos_function() {
+ # Write the getpgnos function; function prototype
+ p[1] = sprintf("int %s_getpgnos", funcname);
+ p[2] = " ";
+ p[3] = "__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));";
+ p[4] = "";
+ proto_format(p);
+
+ # Function declaration
+ printf("int\n%s_getpgnos(dbenv, ", funcname) >> CFILE;
+ printf("rec, lsnp, notused1, summary)\n") >> CFILE;
+ printf("\tDB_ENV *dbenv;\n") >> CFILE;
+ printf("\tDBT *rec;\n") >> CFILE;
+ printf("\tDB_LSN *lsnp;\n") >> CFILE;
+ printf("\tdb_recops notused1;\n") >> CFILE;
+ printf("\tvoid *summary;\n{\n") >> CFILE;
+
+ # If there are no locks, return this fact.
+ if (nlocks == 0) {
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\tint ret;\n") >> CFILE;
+ printf("\tCOMPQUIET(rec, NULL);\n") >> CFILE;
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n") >> CFILE;
+
+ printf("\n\tt = (TXN_RECS *)summary;\n") >> CFILE;
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, ") >> CFILE;
+ printf("t, 1)) != 0)\n") >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ printf("\n\tt->array[t->npages].flags = LSN_PAGE_NOLOCK;\n") \
+ >> CFILE;
+ printf("\tt->array[t->npages].lsn = *lsnp;\n") >> CFILE;
+ printf("\tt->array[t->npages].fid = DB_LOGFILEID_INVALID;\n") \
+ >> CFILE;
+ printf("\tmemset(&t->array[t->npages].pgdesc, 0,\n") >> CFILE;
+ printf("\t sizeof(t->array[t->npages].pgdesc));\n") >> CFILE;
+ printf("\n\tt->npages++;\n") >> CFILE;
+
+ printf("\n") >> CFILE;
+ printf("\treturn (0);\n") >> CFILE;
+ printf("}\n\n") >> CFILE;
+ return;
+ }
+
+ # Locals
+ printf("\tDB *dbp;\n") >> CFILE;
+ printf("\tTXN_RECS *t;\n") >> CFILE;
+ printf("\t%s_args *argp;\n", funcname) >> CFILE;
+ printf("\tu_int32_t ret;\n\n") >> CFILE;
+
+ # Shut up compiler.
+ printf("\tCOMPQUIET(notused1, DB_TXN_ABORT);\n\n") >> CFILE;
+
+ printf("\targp = NULL;\n") >> CFILE;
+ printf("\tt = (TXN_RECS *)summary;\n\n") >> CFILE;
+
+ printf("\tif ((ret = %s_read(dbenv, rec->data, &argp)) != 0)\n", \
+ funcname) >> CFILE;
+ printf("\t\treturn (ret);\n") >> CFILE;
+
+ # Get file ID.
+ printf("\n\tif ((ret = __dbreg_id_to_db(dbenv,\n\t ") >> CFILE;
+ printf("argp->txnid, &dbp, argp->fileid, 0)) != 0)\n") >> CFILE;
+ printf("\t\tgoto err;\n") >> CFILE;
+
+ printf("\n\tif ((ret = __rep_check_alloc(dbenv, t, %d)) != 0)\n", \
+ nlocks) >> CFILE;
+ printf("\t\tgoto err;\n\n") >> CFILE;
+
+ for (i = 1; i <= nlocks; i++) {
+ if (lock_if_zero[i]) {
+ indent = "\t";
+ } else {
+ indent = "\t\t";
+ printf("\tif (argp->%s != PGNO_INVALID) {\n", \
+ lock_pgnos[i]) >> CFILE;
+ }
+ printf("%st->array[t->npages].flags = 0;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].fid = argp->fileid;\n", indent) \
+ >> CFILE;
+ printf("%st->array[t->npages].lsn = *lsnp;\n", indent) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.pgno = argp->%s;\n", \
+ indent, lock_pgnos[i]) >> CFILE;
+ printf("%st->array[t->npages].pgdesc.type = DB_PAGE_LOCK;\n", \
+ indent) >> CFILE;
+ printf("%smemcpy(t->array[t->npages].pgdesc.fileid, ", indent) \
+ >> CFILE;
+ printf("dbp->fileid,\n%s DB_FILE_ID_LEN);\n", \
+ indent, indent) >> CFILE;
+ printf("%st->npages++;\n", indent) >> CFILE;
+ if (!lock_if_zero[i]) {
+ printf("\t}\n") >> CFILE;
+ }
+ }
+
+ printf("\nerr:\tif (argp != NULL)\n") >> CFILE;
+ write_free("argp", CFILE);
+
+ printf("\treturn (ret);\n") >> CFILE;
+
+ printf("}\n\n") >> CFILE;
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p)
+{
+ printf("/*\n") >> CFILE;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ printf("%s%s", t, s) >> CFILE;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> CFILE
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 70) {
+ printf("\n * PUBLIC: ") >> CFILE;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> CFILE;
+ len += length(comma[i]) + 2;
+ }
+ }
+ printf("\n */\n") >> CFILE;
+ delete p;
+}
+
+function write_malloc(ptr, size, file)
+{
+ if (dbprivate) {
+ printf("\tif ((ret = ") >> file;
+ printf(\
+ "__os_malloc(dbenv,\n\t " size ", &" ptr ")) != 0)\n") \
+ >> file
+ printf("\t\treturn (ret);\n\n") >> file;
+ } else {
+ printf("\tif ((" ptr " = malloc(" size ")) == NULL)\n") >> file
+ printf("\t\treturn (ENOMEM);\n\n") >> file
+ }
+}
+
+function write_free(ptr, file)
+{
+ if (dbprivate) {
+ printf("\t__os_free(dbenv, " ptr ");\n") >> file
+ } else {
+ printf("\tfree(" ptr ");\n") >> file
+ }
+}
diff --git a/bdb/dist/gen_rpc.awk b/bdb/dist/gen_rpc.awk
index 6c3bffc1aa4..03975d7321b 100644
--- a/bdb/dist/gen_rpc.awk
+++ b/bdb/dist/gen_rpc.awk
@@ -1,5 +1,5 @@
#
-# $Id: gen_rpc.awk,v 11.25 2001/01/02 20:04:55 sue Exp $
+# $Id: gen_rpc.awk,v 11.50 2002/07/02 19:26:57 sue Exp $
# Awk script for generating client/server RPC code.
#
# This awk script generates most of the RPC routines for DB client/server
@@ -9,27 +9,30 @@
#
# This awk script requires that these variables be set when it is called:
#
+# major -- Major version number
+# minor -- Minor version number
+# xidsize -- size of GIDs
# client_file -- the C source file being created for client code
-# cproto_file -- the header file create for client prototypes
# ctmpl_file -- the C template file being created for client code
# sed_file -- the sed file created to alter server proc code
# server_file -- the C source file being created for server code
-# sproto_file -- the header file create for server prototypes
# stmpl_file -- the C template file being created for server code
# xdr_file -- the XDR message file created
#
# And stdin must be the input file that defines the RPC setup.
BEGIN {
- if (client_file == "" || cproto_file == "" || ctmpl_file == "" ||
+ if (major == "" || minor == "" || xidsize == "" ||
+ client_file == "" || ctmpl_file == "" ||
sed_file == "" || server_file == "" ||
- sproto_file == "" || stmpl_file == "" || xdr_file == "") {
+ stmpl_file == "" || xdr_file == "") {
print "Usage: gen_rpc.awk requires these variables be set:"
+ print "\tmajor\t-- Major version number"
+ print "\tminor\t-- Minor version number"
+ print "\txidsize\t-- GID size"
print "\tclient_file\t-- the client C source file being created"
- print "\tcproto_file\t-- the client prototype header created"
print "\tctmpl_file\t-- the client template file being created"
print "\tsed_file\t-- the sed command file being created"
print "\tserver_file\t-- the server C source file being created"
- print "\tsproto_file\t-- the server prototype header created"
print "\tstmpl_file\t-- the server template file being created"
print "\txdr_file\t-- the XDR message file being created"
error = 1; exit
@@ -40,10 +43,6 @@ BEGIN {
printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
> CFILE
- CHFILE=cproto_file
- printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
- > CHFILE
-
TFILE = ctmpl_file
printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
> TFILE
@@ -52,10 +51,6 @@ BEGIN {
printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
> SFILE
- SHFILE=sproto_file
- printf("/* Do not edit: automatically built by gen_rpc.awk. */\n") \
- > SHFILE
-
# Server procedure template and a sed file to massage an existing
# template source file to change args.
# SEDFILE should be same name as PFILE but .c
@@ -74,28 +69,24 @@ BEGIN {
END {
printf("#endif /* HAVE_RPC */\n") >> CFILE
printf("#endif /* HAVE_RPC */\n") >> TFILE
- printf("program DB_SERVERPROG {\n") >> XFILE
- printf("\tversion DB_SERVERVERS {\n") >> XFILE
+ printf("program DB_RPC_SERVERPROG {\n") >> XFILE
+ printf("\tversion DB_RPC_SERVERVERS {\n") >> XFILE
for (i = 1; i < nendlist; ++i)
printf("\t\t%s;\n", endlist[i]) >> XFILE
- printf("\t} = 1;\n") >> XFILE
+ printf("\t} = %d%03d;\n", major, minor) >> XFILE
printf("} = 351457;\n") >> XFILE
}
/^[ ]*BEGIN/ {
name = $2;
- msgid = $3;
nofunc_code = 0;
funcvars = 0;
- gen_code = 1;
ret_code = 0;
- if ($4 == "NOCLNTCODE")
- gen_code = 0;
- if ($4 == "NOFUNC")
+ if ($3 == "NOFUNC")
nofunc_code = 1;
- if ($4 == "RETCODE")
+ if ($3 == "RETCODE")
ret_code = 1;
nvars = 0;
@@ -131,8 +122,10 @@ END {
if (c_type[nvars] == "DB *") {
ctp_type[nvars] = "CT_DB";
- db_handle = 1;
- db_idx = nvars;
+ if (db_handle != 1) {
+ db_handle = 1;
+ db_idx = nvars;
+ }
}
if (c_type[nvars] == "DBC *") {
@@ -182,6 +175,78 @@ END {
/^[ ]*END/ {
#
# =====================================================
+ # File headers, if necessary.
+ #
+ if (first == 0) {
+ printf("#include \"db_config.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#ifdef HAVE_RPC\n") >> CFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
+ printf("#include <sys/types.h>\n\n") >> CFILE
+ printf("#include <rpc/rpc.h>\n") >> CFILE
+ printf("#include <rpc/xdr.h>\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include <string.h>\n") >> CFILE
+ printf("#endif\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"db_int.h\"\n") >> CFILE
+ printf("#include \"dbinc/txn.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> CFILE
+ printf("#include \"dbinc_auto/rpc_client_ext.h\"\n") >> CFILE
+ printf("\n") >> CFILE
+
+ printf("#include \"db_config.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#ifdef HAVE_RPC\n") >> TFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
+ printf("#include <sys/types.h>\n") >> TFILE
+ printf("#include <rpc/rpc.h>\n") >> TFILE
+ printf("\n") >> TFILE
+ printf("#include <string.h>\n") >> TFILE
+ printf("#endif\n") >> TFILE
+ printf("#include \"db_int.h\"\n") >> TFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> TFILE
+ printf("#include \"dbinc/txn.h\"\n") >> TFILE
+ printf("\n") >> TFILE
+
+ printf("#include \"db_config.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
+ printf("#include <sys/types.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <rpc/rpc.h>\n") >> SFILE
+ printf("#include <rpc/xdr.h>\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include <string.h>\n") >> SFILE
+ printf("#endif\n") >> SFILE
+ printf("\n") >> SFILE
+ printf("#include \"db_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> SFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> SFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> SFILE
+ printf("\n") >> SFILE
+
+ printf("#include \"db_config.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
+ printf("#include <sys/types.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <rpc/rpc.h>\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include <string.h>\n") >> PFILE
+ printf("#endif\n") >> PFILE
+ printf("\n") >> PFILE
+ printf("#include \"db_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/db_server.h\"\n") >> PFILE
+ printf("#include \"dbinc/db_server_int.h\"\n") >> PFILE
+ printf("#include \"dbinc_auto/rpc_server_ext.h\"\n") >> PFILE
+ printf("\n") >> PFILE
+
+ first = 1;
+ }
+ #
+ # =====================================================
# Generate Client Nofunc code first if necessary
# NOTE: This code must be first, because we don't want any
# other code other than this function, so before we write
@@ -190,50 +255,48 @@ END {
#
if (nofunc_code == 1) {
#
- # First time through, put out the general illegal function
+ # First time through, put out the general no server and
+ # illegal functions.
#
if (first_nofunc == 0) {
- printf("int __dbcl_rpc_illegal ") >> CHFILE
- printf("__P((DB_ENV *, char *));\n") >> CHFILE
- printf("int\n__dbcl_rpc_illegal(dbenv, name)\n") \
- >> CFILE
+ printf("static int __dbcl_noserver ") >> CFILE
+ printf("__P((DB_ENV *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_noserver(dbenv)\n") >> CFILE
+ printf("\tDB_ENV *dbenv;\n") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"No server environment\");\n") >> CFILE
+ printf("\treturn (DB_NOSERVER);\n") >> CFILE
+ printf("}\n\n") >> CFILE
+
+ printf("static int __dbcl_rpc_illegal ") >> CFILE
+ printf("__P((DB_ENV *, char *));\n\n") >> CFILE
+ printf("static int\n") >> CFILE
+ printf("__dbcl_rpc_illegal(dbenv, name)\n") >> CFILE
printf("\tDB_ENV *dbenv;\n\tchar *name;\n") >> CFILE
- printf("{\n\t__db_err(dbenv,\n") >> CFILE
- printf("\t \"%%s method meaningless in RPC") >> CFILE
+ printf("{\n\t__db_err(dbenv,") >> CFILE
+ printf(" \"%%s method meaningless in an RPC") >> CFILE
printf(" environment\", name);\n") >> CFILE
printf("\treturn (__db_eopnotsup(dbenv));\n") >> CFILE
printf("}\n\n") >> CFILE
+
first_nofunc = 1
}
#
- # If we are doing a list, spit out prototype decl.
- #
- for (i = 0; i < nvars; i++) {
- if (rpc_type[i] != "LIST")
- continue;
- printf("static int __dbcl_%s_%slist __P((", \
- name, args[i]) >> CFILE
- printf("__%s_%slist **, ", name, args[i]) >> CFILE
- if (list_type[i] == "STRING")
- printf("%s));\n", c_type[i]) >> CFILE
- if (list_type[i] == "INT")
- printf("u_int32_t));\n") >> CFILE
- if (list_type[i] == "ID")
- printf("%s));\n", c_type[i]) >> CFILE
- printf("static void __dbcl_%s_%sfree __P((", \
- name, args[i]) >> CFILE
- printf("__%s_%slist **));\n", name, args[i]) >> CFILE
- }
- #
# Spit out PUBLIC prototypes.
#
- printf("int __dbcl_%s __P((",name) >> CHFILE
- sep = "";
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
for (i = 0; i < nvars; ++i) {
- printf("%s%s", sep, pr_type[i]) >> CHFILE
- sep = ", ";
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
}
- printf("));\n") >> CHFILE
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
#
# Spit out function name/args.
#
@@ -286,7 +349,8 @@ END {
# to COMPQUIET that one.
for (i = 1; i < nvars; ++i) {
if (rpc_type[i] == "CONST" || rpc_type[i] == "DBT" ||
- rpc_type[i] == "LIST" || rpc_type[i] == "STRING") {
+ rpc_type[i] == "LIST" || rpc_type[i] == "STRING" ||
+ rpc_type[i] == "GID") {
printf("\tCOMPQUIET(%s, NULL);\n", args[i]) \
>> CFILE
}
@@ -313,60 +377,43 @@ END {
# XDR messages.
#
printf("\n") >> XFILE
- #
- # If there are any lists, generate the structure to contain them.
- #
+ printf("struct __%s_msg {\n", name) >> XFILE
for (i = 0; i < nvars; ++i) {
if (rpc_type[i] == "LIST") {
- printf("struct __%s_%slist {\n", name, args[i]) >> XFILE
- printf("\topaque ent<>;\n") >> XFILE
- printf("\t__%s_%slist *next;\n", name, args[i]) >> XFILE
- printf("};\n\n") >> XFILE
+ if (list_type[i] == "GID") {
+ printf("\topaque %s<>;\n", args[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", args[i]) >> XFILE
+ }
}
- }
- printf("struct __%s_msg {\n", name) >> XFILE
- for (i = 0; i < nvars; ++i) {
if (rpc_type[i] == "ID") {
printf("\tunsigned int %scl_id;\n", args[i]) >> XFILE
}
if (rpc_type[i] == "STRING") {
printf("\tstring %s<>;\n", args[i]) >> XFILE
}
+ if (rpc_type[i] == "GID") {
+ printf("\topaque %s[%d];\n", args[i], xidsize) >> XFILE
+ }
if (rpc_type[i] == "INT") {
printf("\tunsigned int %s;\n", args[i]) >> XFILE
}
if (rpc_type[i] == "DBT") {
printf("\tunsigned int %sdlen;\n", args[i]) >> XFILE
printf("\tunsigned int %sdoff;\n", args[i]) >> XFILE
+ printf("\tunsigned int %sulen;\n", args[i]) >> XFILE
printf("\tunsigned int %sflags;\n", args[i]) >> XFILE
printf("\topaque %sdata<>;\n", args[i]) >> XFILE
}
- if (rpc_type[i] == "LIST") {
- printf("\t__%s_%slist *%slist;\n", \
- name, args[i], args[i]) >> XFILE
- }
}
printf("};\n") >> XFILE
printf("\n") >> XFILE
#
- # If there are any lists, generate the structure to contain them.
- #
- for (i = 0; i < rvars; ++i) {
- if (ret_type[i] == "LIST") {
- printf("struct __%s_%sreplist {\n", \
- name, retargs[i]) >> XFILE
- printf("\topaque ent<>;\n") >> XFILE
- printf("\t__%s_%sreplist *next;\n", \
- name, retargs[i]) >> XFILE
- printf("};\n\n") >> XFILE
- }
- }
- #
# Generate the reply message
#
printf("struct __%s_reply {\n", name) >> XFILE
- printf("\tunsigned int status;\n") >> XFILE
+ printf("\tint status;\n") >> XFILE
for (i = 0; i < rvars; ++i) {
if (ret_type[i] == "ID") {
printf("\tunsigned int %scl_id;\n", retargs[i]) >> XFILE
@@ -384,8 +431,11 @@ END {
printf("\topaque %sdata<>;\n", retargs[i]) >> XFILE
}
if (ret_type[i] == "LIST") {
- printf("\t__%s_%sreplist *%slist;\n", \
- name, retargs[i], retargs[i]) >> XFILE
+ if (retlist_type[i] == "GID") {
+ printf("\topaque %s<>;\n", retargs[i]) >> XFILE
+ } else {
+ printf("\tunsigned int %s<>;\n", retargs[i]) >> XFILE
+ }
}
}
printf("};\n") >> XFILE
@@ -394,158 +444,30 @@ END {
sprintf("__%s_reply __DB_%s(__%s_msg) = %d", \
name, name, name, nendlist);
nendlist++;
-
- #
- # =====================================================
- # File headers, if necessary.
- #
- if (first == 0) {
- printf("#include \"db_config.h\"\n") >> CFILE
- printf("\n") >> CFILE
- printf("#ifdef HAVE_RPC\n") >> CFILE
- printf("#ifndef NO_SYSTEM_INCLUDES\n") >> CFILE
- printf("#include <sys/types.h>\n") >> CFILE
- printf("#include <rpc/rpc.h>\n") >> CFILE
- printf("#include <rpc/xdr.h>\n") >> CFILE
- printf("\n") >> CFILE
- printf("#include <errno.h>\n") >> CFILE
- printf("#include <string.h>\n") >> CFILE
- printf("#endif\n") >> CFILE
- printf("#include \"db_server.h\"\n") >> CFILE
- printf("\n") >> CFILE
- printf("#include \"db_int.h\"\n") >> CFILE
- printf("#include \"db_page.h\"\n") >> CFILE
- printf("#include \"db_ext.h\"\n") >> CFILE
- printf("#include \"mp.h\"\n") >> CFILE
- printf("#include \"rpc_client_ext.h\"\n") >> CFILE
- printf("#include \"txn.h\"\n") >> CFILE
- printf("\n") >> CFILE
- n = split(CHFILE, hpieces, "/");
- printf("#include \"%s\"\n", hpieces[n]) >> CFILE
- printf("\n") >> CFILE
-
- printf("#include \"db_config.h\"\n") >> TFILE
- printf("\n") >> TFILE
- printf("#ifdef HAVE_RPC\n") >> TFILE
- printf("#ifndef NO_SYSTEM_INCLUDES\n") >> TFILE
- printf("#include <sys/types.h>\n") >> TFILE
- printf("#include <rpc/rpc.h>\n") >> TFILE
- printf("\n") >> TFILE
- printf("#include <errno.h>\n") >> TFILE
- printf("#include <string.h>\n") >> TFILE
- printf("#endif\n") >> TFILE
- printf("#include \"db_server.h\"\n") >> TFILE
- printf("\n") >> TFILE
- printf("#include \"db_int.h\"\n") >> TFILE
- printf("#include \"db_page.h\"\n") >> TFILE
- printf("#include \"db_ext.h\"\n") >> TFILE
- printf("#include \"txn.h\"\n") >> TFILE
- printf("\n") >> TFILE
- n = split(CHFILE, hpieces, "/");
- printf("#include \"%s\"\n", hpieces[n]) >> TFILE
- printf("\n") >> TFILE
-
- printf("#include \"db_config.h\"\n") >> SFILE
- printf("\n") >> SFILE
- printf("#ifndef NO_SYSTEM_INCLUDES\n") >> SFILE
- printf("#include <sys/types.h>\n") >> SFILE
- printf("\n") >> SFILE
- printf("#include <rpc/rpc.h>\n") >> SFILE
- printf("#include <rpc/xdr.h>\n") >> SFILE
- printf("\n") >> SFILE
- printf("#include <errno.h>\n") >> SFILE
- printf("#include <string.h>\n") >> SFILE
- printf("#endif\n") >> SFILE
- printf("#include \"db_server.h\"\n") >> SFILE
- printf("\n") >> SFILE
- printf("#include \"db_int.h\"\n") >> SFILE
- printf("#include \"db_server_int.h\"\n") >> SFILE
- printf("#include \"rpc_server_ext.h\"\n") >> SFILE
- printf("\n") >> SFILE
- n = split(SHFILE, hpieces, "/");
- printf("#include \"%s\"\n", hpieces[n]) >> SFILE
- printf("\n") >> SFILE
-
- printf("#include \"db_config.h\"\n") >> PFILE
- printf("\n") >> PFILE
- printf("#ifndef NO_SYSTEM_INCLUDES\n") >> PFILE
- printf("#include <sys/types.h>\n") >> PFILE
- printf("\n") >> PFILE
- printf("#include <rpc/rpc.h>\n") >> PFILE
- printf("\n") >> PFILE
- printf("#include <errno.h>\n") >> PFILE
- printf("#include <string.h>\n") >> PFILE
- printf("#include \"db_server.h\"\n") >> PFILE
- printf("#endif\n") >> PFILE
- printf("\n") >> PFILE
- printf("#include \"db_int.h\"\n") >> PFILE
- printf("#include \"db_server_int.h\"\n") >> PFILE
- printf("#include \"rpc_server_ext.h\"\n") >> PFILE
- printf("\n") >> PFILE
- n = split(SHFILE, hpieces, "/");
- printf("#include \"%s\"\n", hpieces[n]) >> PFILE
- printf("\n") >> PFILE
-
- first = 1;
- }
-
#
# =====================================================
# Server functions.
#
- # If we are doing a list, send out local list prototypes.
- #
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] != "LIST")
- continue;
- if (list_type[i] != "STRING" && list_type[i] != "INT" &&
- list_type[i] != "ID")
- continue;
- printf("int __db_%s_%slist __P((", name, args[i]) >> SFILE
- printf("__%s_%slist *, ", name, args[i]) >> SFILE
- if (list_type[i] == "STRING") {
- printf("char ***));\n") >> SFILE
- }
- if (list_type[i] == "INT" || list_type[i] == "ID") {
- printf("u_int32_t **));\n") >> SFILE
- }
- printf("void __db_%s_%sfree __P((", name, args[i]) >> SFILE
- if (list_type[i] == "STRING")
- printf("char **));\n\n") >> SFILE
- if (list_type[i] == "INT" || list_type[i] == "ID")
- printf("u_int32_t *));\n\n") >> SFILE
-
- }
- #
# First spit out PUBLIC prototypes for server functions.
#
- printf("__%s_reply * __db_%s_%d __P((__%s_msg *));\n", \
- name, name, msgid, name) >> SHFILE
+ p[1] = sprintf("__%s_reply *__db_%s_%d%03d __P((__%s_msg *, struct svc_req *));",
+ name, name, major, minor, name);
+ p[2] = "";
+ proto_format(p, 0, SFILE);
printf("__%s_reply *\n", name) >> SFILE
- printf("__db_%s_%d(req)\n", name, msgid) >> SFILE
- printf("\t__%s_msg *req;\n", name) >> SFILE;
+ printf("__db_%s_%d%03d(msg, req)\n", name, major, minor) >> SFILE
+ printf("\t__%s_msg *msg;\n", name) >> SFILE;
+ printf("\tstruct svc_req *req;\n", name) >> SFILE;
printf("{\n") >> SFILE
- doing_list = 0;
- #
- # If we are doing a list, decompose it for server proc we'll call.
- #
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] != "LIST")
- continue;
- doing_list = 1;
- if (list_type[i] == "STRING")
- printf("\tchar **__db_%slist;\n", args[i]) >> SFILE
- if (list_type[i] == "ID" || list_type[i] == "INT")
- printf("\tu_int32_t *__db_%slist;\n", args[i]) >> SFILE
- }
- if (doing_list)
- printf("\tint ret;\n") >> SFILE
printf("\tstatic __%s_reply reply; /* must be static */\n", \
name) >> SFILE
if (xdr_free) {
printf("\tstatic int __%s_free = 0; /* must be static */\n\n", \
name) >> SFILE
+ }
+ printf("\tCOMPQUIET(req, NULL);\n", name) >> SFILE
+ if (xdr_free) {
printf("\tif (__%s_free)\n", name) >> SFILE
printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)&reply);\n", \
name) >> SFILE
@@ -553,8 +475,8 @@ END {
printf("\n\t/* Reinitialize allocated fields */\n") >> SFILE
for (i = 0; i < rvars; ++i) {
if (ret_type[i] == "LIST") {
- printf("\treply.%slist = NULL;\n", \
- retargs[i]) >> SFILE
+ printf("\treply.%s.%s_val = NULL;\n", \
+ retargs[i], retargs[i]) >> SFILE
}
if (ret_type[i] == "DBT") {
printf("\treply.%sdata.%sdata_val = NULL;\n", \
@@ -564,44 +486,43 @@ END {
}
need_out = 0;
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] == "LIST") {
- printf("\n\tif ((ret = __db_%s_%slist(", \
- name, args[i]) >> SFILE
- printf("req->%slist, &__db_%slist)) != 0)\n", \
- args[i], args[i]) >> SFILE
- printf("\t\tgoto out;\n") >> SFILE
- need_out = 1;
- }
- }
-
#
# Compose server proc to call. Decompose message components as args.
#
- printf("\n\t__%s_%d_proc(", name, msgid) >> SFILE
+ printf("\n\t__%s_proc(", name) >> SFILE
sep = "";
for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "IGNORE") {
+ continue;
+ }
if (rpc_type[i] == "ID") {
- printf("%sreq->%scl_id", sep, args[i]) >> SFILE
+ printf("%smsg->%scl_id", sep, args[i]) >> SFILE
}
if (rpc_type[i] == "STRING") {
- printf("%s(*req->%s == '\\0') ? NULL : req->%s", \
+ printf("%s(*msg->%s == '\\0') ? NULL : msg->%s", \
sep, args[i], args[i]) >> SFILE
}
+ if (rpc_type[i] == "GID") {
+ printf("%smsg->%s", sep, args[i]) >> SFILE
+ }
if (rpc_type[i] == "INT") {
- printf("%sreq->%s", sep, args[i]) >> SFILE
+ printf("%smsg->%s", sep, args[i]) >> SFILE
}
if (rpc_type[i] == "LIST") {
- printf("%s__db_%slist", sep, args[i]) >> SFILE
+ printf("%smsg->%s.%s_val", \
+ sep, args[i], args[i]) >> SFILE
+ printf("%smsg->%s.%s_len", \
+ sep, args[i], args[i]) >> SFILE
}
if (rpc_type[i] == "DBT") {
- printf("%sreq->%sdlen", sep, args[i]) >> SFILE
+ printf("%smsg->%sdlen", sep, args[i]) >> SFILE
sep = ",\n\t ";
- printf("%sreq->%sdoff", sep, args[i]) >> SFILE
- printf("%sreq->%sflags", sep, args[i]) >> SFILE
- printf("%sreq->%sdata.%sdata_val", \
+ printf("%smsg->%sdoff", sep, args[i]) >> SFILE
+ printf("%smsg->%sulen", sep, args[i]) >> SFILE
+ printf("%smsg->%sflags", sep, args[i]) >> SFILE
+ printf("%smsg->%sdata.%sdata_val", \
sep, args[i], args[i]) >> SFILE
- printf("%sreq->%sdata.%sdata_len", \
+ printf("%smsg->%sdata.%sdata_len", \
sep, args[i], args[i]) >> SFILE
}
sep = ",\n\t ";
@@ -611,12 +532,6 @@ END {
printf("%s&__%s_free);\n", sep, name) >> SFILE
else
printf(");\n\n") >> SFILE
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] == "LIST") {
- printf("\t__db_%s_%sfree(__db_%slist);\n", \
- name, args[i], args[i]) >> SFILE
- }
- }
if (need_out) {
printf("\nout:\n") >> SFILE
}
@@ -624,191 +539,98 @@ END {
printf("}\n\n") >> SFILE
#
- # If we are doing a list, write list functions for this op.
- #
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] != "LIST")
- continue;
- if (list_type[i] != "STRING" && list_type[i] != "INT" &&
- list_type[i] != "ID")
- continue;
- printf("int\n") >> SFILE
- printf("__db_%s_%slist(locp, ppp)\n", name, args[i]) >> SFILE
- printf("\t__%s_%slist *locp;\n", name, args[i]) >> SFILE
- if (list_type[i] == "STRING") {
- printf("\tchar ***ppp;\n{\n") >> SFILE
- printf("\tchar **pp;\n") >> SFILE
- }
- if (list_type[i] == "INT" || list_type[i] == "ID") {
- printf("\tu_int32_t **ppp;\n{\n") >> SFILE
- printf("\tu_int32_t *pp;\n") >> SFILE
- }
- printf("\tint cnt, ret, size;\n") >> SFILE
- printf("\t__%s_%slist *nl;\n\n", name, args[i]) >> SFILE
- printf("\tfor (cnt = 0, nl = locp;") >> SFILE
- printf(" nl != NULL; cnt++, nl = nl->next)\n\t\t;\n\n") >> SFILE
- printf("\tif (cnt == 0) {\n") >> SFILE
- printf("\t\t*ppp = NULL;\n") >> SFILE
- printf("\t\treturn (0);\n\t}\n") >> SFILE
- printf("\tsize = sizeof(*pp) * (cnt + 1);\n") >> SFILE
- printf("\tif ((ret = __os_malloc(NULL, size, ") >> SFILE
- printf("NULL, ppp)) != 0)\n") >> SFILE
- printf("\t\treturn (ret);\n") >> SFILE
- printf("\tmemset(*ppp, 0, size);\n") >> SFILE
- printf("\tfor (pp = *ppp, nl = locp;") >> SFILE
- printf(" nl != NULL; nl = nl->next, pp++) {\n") >> SFILE
- if (list_type[i] == "STRING") {
- printf("\t\tif ((ret = __os_malloc(NULL ,") >> SFILE
- printf("nl->ent.ent_len + 1, NULL, pp)) != 0)\n") \
- >> SFILE
- printf("\t\t\tgoto out;\n") >> SFILE
- printf("\t\tif ((ret = __os_strdup(NULL, ") >> SFILE
- printf("(char *)nl->ent.ent_val, pp)) != 0)\n") >> SFILE
- printf("\t\t\tgoto out;\n") >> SFILE
- }
- if (list_type[i] == "INT" || list_type[i] == "ID")
- printf("\t\t*pp = *(u_int32_t *)nl->ent.ent_val;\n") \
- >> SFILE
- printf("\t}\n") >> SFILE
- printf("\treturn (0);\n") >> SFILE
- if (list_type[i] == "STRING") {
- printf("out:\n") >> SFILE
- printf("\t__db_%s_%sfree(*ppp);\n", \
- name, args[i]) >> SFILE
- printf("\treturn (ret);\n") >> SFILE
- }
- printf("}\n\n") >> SFILE
-
- printf("void\n") >> SFILE
- printf("__db_%s_%sfree(pp)\n", name, args[i]) >> SFILE
-
- if (list_type[i] == "STRING")
- printf("\tchar **pp;\n") >> SFILE
- if (list_type[i] == "INT" || list_type[i] == "ID")
- printf("\tu_int32_t *pp;\n") >> SFILE
-
- printf("{\n") >> SFILE
- printf("\tsize_t size;\n") >> SFILE
-
- if (list_type[i] == "STRING")
- printf("\tchar **p;\n\n") >> SFILE
- if (list_type[i] == "INT" || list_type[i] == "ID")
- printf("\tu_int32_t *p;\n\n") >> SFILE
-
- printf("\tif (pp == NULL)\n\t\treturn;\n") >> SFILE
- printf("\tsize = sizeof(*p);\n") >> SFILE
- printf("\tfor (p = pp; *p != 0; p++) {\n") >> SFILE
- printf("\t\tsize += sizeof(*p);\n") >> SFILE
-
- if (list_type[i] == "STRING")
- printf("\t\t__os_free(*p, strlen(*p)+1);\n") >> SFILE
- printf("\t}\n") >> SFILE
- printf("\t__os_free(pp, size);\n") >> SFILE
- printf("}\n\n") >> SFILE
- }
-
- #
# =====================================================
# Generate Procedure Template Server code
#
# Produce SED file commands if needed at the same time
#
- # Start with PUBLIC prototypes
+ # Spit out comment, prototype, function name and arg list.
#
- printf("void __%s_%d_proc __P((", name, msgid) >> SHFILE
- sep = "";
- argcount = 0;
+ printf("/^\\/\\* BEGIN __%s_proc/,/^\\/\\* END __%s_proc/c\\\n", \
+ name, name) >> SEDFILE
+
+ printf("/* BEGIN __%s_proc */\n", name) >> PFILE
+ printf("/* BEGIN __%s_proc */\\\n", name) >> SEDFILE
+
+ pi = 1;
+ p[pi++] = sprintf("void __%s_proc __P((", name);
+ p[pi++] = "";
for (i = 0; i < nvars; ++i) {
- argcount++;
- split_lines(1);
- if (argcount == 0) {
- sep = "";
- }
if (rpc_type[i] == "IGNORE")
continue;
if (rpc_type[i] == "ID") {
- printf("%slong", sep) >> SHFILE
+ p[pi++] = "long";
+ p[pi++] = ", ";
}
if (rpc_type[i] == "STRING") {
- printf("%schar *", sep) >> SHFILE
+ p[pi++] = "char *";
+ p[pi++] = ", ";
+ }
+ if (rpc_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
}
if (rpc_type[i] == "INT") {
- printf("%su_int32_t", sep) >> SHFILE
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
}
- if (rpc_type[i] == "LIST" && list_type[i] == "STRING") {
- printf("%schar **", sep) >> SHFILE
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ p[pi++] = "u_int8_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
}
if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
- printf("%su_int32_t *", sep) >> SHFILE
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
}
if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
- printf("%su_int32_t *", sep) >> SHFILE
+ p[pi++] = "u_int32_t *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
}
if (rpc_type[i] == "DBT") {
- printf("%su_int32_t", sep) >> SHFILE
- sep = ", ";
- argcount++;
- split_lines(1);
- if (argcount == 0) {
- sep = "";
- } else {
- sep = ", ";
- }
- printf("%su_int32_t", sep) >> SHFILE
- argcount++;
- split_lines(1);
- if (argcount == 0) {
- sep = "";
- } else {
- sep = ", ";
- }
- printf("%su_int32_t", sep) >> SHFILE
- argcount++;
- split_lines(1);
- if (argcount == 0) {
- sep = "";
- } else {
- sep = ", ";
- }
- printf("%svoid *", sep) >> SHFILE
- argcount++;
- split_lines(1);
- if (argcount == 0) {
- sep = "";
- } else {
- sep = ", ";
- }
- printf("%su_int32_t", sep) >> SHFILE
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
+ p[pi++] = "void *";
+ p[pi++] = ", ";
+ p[pi++] = "u_int32_t";
+ p[pi++] = ", ";
}
- sep = ", ";
}
- printf("%s__%s_reply *", sep, name) >> SHFILE
+ p[pi++] = sprintf("__%s_reply *", name);
if (xdr_free) {
- printf("%sint *));\n", sep) >> SHFILE
+ p[pi++] = ", ";
+ p[pi++] = "int *));";
} else {
- printf("));\n") >> SHFILE
+ p[pi++] = "";
+ p[pi++] = "));";
}
- #
- # Spit out function name and arg list
- #
- printf("/^\\/\\* BEGIN __%s_%d_proc/,/^\\/\\* END __%s_%d_proc/c\\\n", \
- name, msgid, name, msgid) >> SEDFILE
+ p[pi++] = "";
+ proto_format(p, 1, SEDFILE);
- printf("/* BEGIN __%s_%d_proc */\n", name, msgid) >> PFILE
- printf("/* BEGIN __%s_%d_proc */\\\n", name, msgid) >> SEDFILE
printf("void\n") >> PFILE
printf("void\\\n") >> SEDFILE
- printf("__%s_%d_proc(", name, msgid) >> PFILE
- printf("__%s_%d_proc(", name, msgid) >> SEDFILE
+ printf("__%s_proc(", name) >> PFILE
+ printf("__%s_proc(", name) >> SEDFILE
sep = "";
argcount = 0;
for (i = 0; i < nvars; ++i) {
argcount++;
- split_lines(0);
+ split_lines();
if (argcount == 0) {
sep = "";
}
- if (rpc_type[i] == "IGNORE")
+ if (rpc_type[i] == "IGNORE")
continue;
if (rpc_type[i] == "ID") {
printf("%s%scl_id", sep, args[i]) >> PFILE
@@ -818,20 +640,33 @@ END {
printf("%s%s", sep, args[i]) >> PFILE
printf("%s%s", sep, args[i]) >> SEDFILE
}
+ if (rpc_type[i] == "GID") {
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ }
if (rpc_type[i] == "INT") {
printf("%s%s", sep, args[i]) >> PFILE
printf("%s%s", sep, args[i]) >> SEDFILE
}
if (rpc_type[i] == "LIST") {
- printf("%s%slist", sep, args[i]) >> PFILE
- printf("%s%slist", sep, args[i]) >> SEDFILE
+ printf("%s%s", sep, args[i]) >> PFILE
+ printf("%s%s", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%slen", sep, args[i]) >> PFILE
+ printf("%s%slen", sep, args[i]) >> SEDFILE
}
if (rpc_type[i] == "DBT") {
printf("%s%sdlen", sep, args[i]) >> PFILE
printf("%s%sdlen", sep, args[i]) >> SEDFILE
sep = ", ";
argcount++;
- split_lines(0);
+ split_lines();
if (argcount == 0) {
sep = "";
} else {
@@ -840,7 +675,16 @@ END {
printf("%s%sdoff", sep, args[i]) >> PFILE
printf("%s%sdoff", sep, args[i]) >> SEDFILE
argcount++;
- split_lines(0);
+ split_lines();
+ if (argcount == 0) {
+ sep = "";
+ } else {
+ sep = ", ";
+ }
+ printf("%s%sulen", sep, args[i]) >> PFILE
+ printf("%s%sulen", sep, args[i]) >> SEDFILE
+ argcount++;
+ split_lines();
if (argcount == 0) {
sep = "";
} else {
@@ -849,7 +693,7 @@ END {
printf("%s%sflags", sep, args[i]) >> PFILE
printf("%s%sflags", sep, args[i]) >> SEDFILE
argcount++;
- split_lines(0);
+ split_lines();
if (argcount == 0) {
sep = "";
} else {
@@ -858,7 +702,7 @@ END {
printf("%s%sdata", sep, args[i]) >> PFILE
printf("%s%sdata", sep, args[i]) >> SEDFILE
argcount++;
- split_lines(0);
+ split_lines();
if (argcount == 0) {
sep = "";
} else {
@@ -890,22 +734,33 @@ END {
printf("\tchar *%s;\n", args[i]) >> PFILE
printf("\\\tchar *%s;\\\n", args[i]) >> SEDFILE
}
+ if (rpc_type[i] == "GID") {
+ printf("\tu_int8_t *%s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t *%s;\\\n", args[i]) >> SEDFILE
+ }
if (rpc_type[i] == "INT") {
printf("\tu_int32_t %s;\n", args[i]) >> PFILE
printf("\\\tu_int32_t %s;\\\n", args[i]) >> SEDFILE
}
- if (rpc_type[i] == "LIST" && list_type[i] == "STRING") {
- printf("\tchar ** %slist;\n", args[i]) >> PFILE
- printf("\\\tchar ** %slist;\\\n", args[i]) >> SEDFILE
+ if (rpc_type[i] == "LIST" && list_type[i] == "GID") {
+ printf("\tu_int8_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int8_t * %s;\\\n", args[i]) >> SEDFILE
}
if (rpc_type[i] == "LIST" && list_type[i] == "INT") {
- printf("\tu_int32_t * %slist;\n", args[i]) >> PFILE
- printf("\\\tu_int32_t * %slist;\\\n", \
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", \
args[i]) >> SEDFILE
+ printf("\tu_int32_t %ssize;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %ssize;\\\n", args[i]) >> SEDFILE
}
if (rpc_type[i] == "LIST" && list_type[i] == "ID") {
- printf("\tu_int32_t * %slist;\n", args[i]) >> PFILE
- printf("\\\tu_int32_t * %slist;\\\n", args[i]) \
+ printf("\tu_int32_t * %s;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t * %s;\\\n", args[i]) \
+ >> SEDFILE
+ }
+ if (rpc_type[i] == "LIST") {
+ printf("\tu_int32_t %slen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %slen;\\\n", args[i]) \
>> SEDFILE
}
if (rpc_type[i] == "DBT") {
@@ -913,6 +768,8 @@ END {
printf("\\\tu_int32_t %sdlen;\\\n", args[i]) >> SEDFILE
printf("\tu_int32_t %sdoff;\n", args[i]) >> PFILE
printf("\\\tu_int32_t %sdoff;\\\n", args[i]) >> SEDFILE
+ printf("\tu_int32_t %sulen;\n", args[i]) >> PFILE
+ printf("\\\tu_int32_t %sulen;\\\n", args[i]) >> SEDFILE
printf("\tu_int32_t %sflags;\n", args[i]) >> PFILE
printf("\\\tu_int32_t %sflags;\\\n", args[i]) >> SEDFILE
printf("\tvoid *%sdata;\n", args[i]) >> PFILE
@@ -928,8 +785,8 @@ END {
printf("\\\tint * freep;\\\n") >> SEDFILE
}
- printf("/* END __%s_%d_proc */\n", name, msgid) >> PFILE
- printf("/* END __%s_%d_proc */\n", name, msgid) >> SEDFILE
+ printf("/* END __%s_proc */\n", name) >> PFILE
+ printf("/* END __%s_proc */\n", name) >> SEDFILE
#
# Function body
@@ -957,43 +814,23 @@ END {
printf("}\n\n") >> PFILE
#
- # If we don't want client code generated, go on to next.
- #
- if (gen_code == 0)
- next;
-
- #
# =====================================================
# Generate Client code
#
- # If we are doing a list, spit out prototype decl.
- #
- for (i = 0; i < nvars; i++) {
- if (rpc_type[i] != "LIST")
- continue;
- printf("static int __dbcl_%s_%slist __P((", \
- name, args[i]) >> CFILE
- printf("__%s_%slist **, ", name, args[i]) >> CFILE
- if (list_type[i] == "STRING")
- printf("%s));\n", c_type[i]) >> CFILE
- if (list_type[i] == "INT")
- printf("u_int32_t));\n") >> CFILE
- if (list_type[i] == "ID")
- printf("%s));\n", c_type[i]) >> CFILE
- printf("static void __dbcl_%s_%sfree __P((", \
- name, args[i]) >> CFILE
- printf("__%s_%slist **));\n", name, args[i]) >> CFILE
- }
- #
# Spit out PUBLIC prototypes.
#
- printf("int __dbcl_%s __P((",name) >> CHFILE
- sep = "";
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s __P((", name);
+ p[pi++] = "";
for (i = 0; i < nvars; ++i) {
- printf("%s%s", sep, pr_type[i]) >> CHFILE
- sep = ", ";
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
}
- printf("));\n") >> CHFILE
+ p[pi - 1] = "";
+ p[pi++] = "));";
+ p[pi] = "";
+ proto_format(p, 0, CFILE);
+
#
# Spit out function name/args.
#
@@ -1014,16 +851,28 @@ END {
printf("{\n") >> CFILE
printf("\tCLIENT *cl;\n") >> CFILE
- printf("\t__%s_msg req;\n", name) >> CFILE
- printf("\tstatic __%s_reply *replyp = NULL;\n", name) >> CFILE;
+ printf("\t__%s_msg msg;\n", name) >> CFILE
+ printf("\t__%s_reply *replyp = NULL;\n", name) >> CFILE;
printf("\tint ret;\n") >> CFILE
if (!env_handle)
printf("\tDB_ENV *dbenv;\n") >> CFILE
+ #
+ # If we are managing a list, we need a few more vars.
+ #
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t%s %sp;\n", c_type[i], args[i]) >> CFILE
+ printf("\tint %si;\n", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("\tu_int8_t ** %sq;\n", args[i]) >> CFILE
+ else
+ printf("\tu_int32_t * %sq;\n", args[i]) >> CFILE
+ }
+ }
printf("\n") >> CFILE
printf("\tret = 0;\n") >> CFILE
if (!env_handle) {
- printf("\tdbenv = NULL;\n") >> CFILE
if (db_handle)
printf("\tdbenv = %s->dbenv;\n", args[db_idx]) >> CFILE
else if (dbc_handle)
@@ -1032,27 +881,19 @@ END {
else if (txn_handle)
printf("\tdbenv = %s->mgrp->dbenv;\n", \
args[txn_idx]) >> CFILE
- printf("\tif (dbenv == NULL || dbenv->cl_handle == NULL) {\n") \
- >> CFILE
- printf("\t\t__db_err(dbenv, \"No server environment.\");\n") \
+ else
+ printf("\tdbenv = NULL;\n") >> CFILE
+ printf("\tif (dbenv == NULL || !RPC_ON(dbenv))\n") \
>> CFILE
+ printf("\t\treturn (__dbcl_noserver(NULL));\n") >> CFILE
} else {
- printf("\tif (%s == NULL || %s->cl_handle == NULL) {\n", \
+ printf("\tif (%s == NULL || !RPC_ON(%s))\n", \
args[env_idx], args[env_idx]) >> CFILE
- printf("\t\t__db_err(%s, \"No server environment.\");\n", \
+ printf("\t\treturn (__dbcl_noserver(%s));\n", \
args[env_idx]) >> CFILE
}
- printf("\t\treturn (DB_NOSERVER);\n") >> CFILE
- printf("\t}\n") >> CFILE
printf("\n") >> CFILE
- #
- # Free old reply if there was one.
- #
- printf("\tif (replyp != NULL) {\n") >> CFILE
- printf("\t\txdr_free((xdrproc_t)xdr___%s_reply, (void *)replyp);\n", \
- name) >> CFILE
- printf("\t\treplyp = NULL;\n\t}\n") >> CFILE
if (!env_handle)
printf("\tcl = (CLIENT *)dbenv->cl_handle;\n") >> CFILE
else
@@ -1068,8 +909,12 @@ END {
if (func_arg[i] != 1)
continue;
printf("\tif (%s != NULL) {\n", args[i]) >> CFILE
- printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
- printf("\"User functions not supported in RPC.\");\n") >> CFILE
+ if (!env_handle) {
+ printf("\t\t__db_err(dbenv, ") >> CFILE
+ } else {
+ printf("\t\t__db_err(%s, ", args[env_idx]) >> CFILE
+ }
+ printf("\"User functions not supported in RPC\");\n") >> CFILE
printf("\t\treturn (EINVAL);\n\t}\n") >> CFILE
}
@@ -1079,49 +924,102 @@ END {
for (i = 0; i < nvars; ++i) {
if (rpc_type[i] == "ID") {
printf("\tif (%s == NULL)\n", args[i]) >> CFILE
- printf("\t\treq.%scl_id = 0;\n\telse\n", \
+ printf("\t\tmsg.%scl_id = 0;\n\telse\n", \
args[i]) >> CFILE
if (c_type[i] == "DB_TXN *") {
- printf("\t\treq.%scl_id = %s->txnid;\n", \
+ printf("\t\tmsg.%scl_id = %s->txnid;\n", \
args[i], args[i]) >> CFILE
} else {
- printf("\t\treq.%scl_id = %s->cl_id;\n", \
+ printf("\t\tmsg.%scl_id = %s->cl_id;\n", \
args[i], args[i]) >> CFILE
}
}
+ if (rpc_type[i] == "GID") {
+ printf("\tmemcpy(msg.%s, %s, %d);\n", \
+ args[i], args[i], xidsize) >> CFILE
+ }
if (rpc_type[i] == "INT") {
- printf("\treq.%s = %s;\n", args[i], args[i]) >> CFILE
+ printf("\tmsg.%s = %s;\n", args[i], args[i]) >> CFILE
}
if (rpc_type[i] == "STRING") {
printf("\tif (%s == NULL)\n", args[i]) >> CFILE
- printf("\t\treq.%s = \"\";\n", args[i]) >> CFILE
+ printf("\t\tmsg.%s = \"\";\n", args[i]) >> CFILE
printf("\telse\n") >> CFILE
- printf("\t\treq.%s = (char *)%s;\n", \
+ printf("\t\tmsg.%s = (char *)%s;\n", \
args[i], args[i]) >> CFILE
}
if (rpc_type[i] == "DBT") {
- printf("\treq.%sdlen = %s->dlen;\n", \
+ printf("\tmsg.%sdlen = %s->dlen;\n", \
+ args[i], args[i]) >> CFILE
+ printf("\tmsg.%sdoff = %s->doff;\n", \
args[i], args[i]) >> CFILE
- printf("\treq.%sdoff = %s->doff;\n", \
+ printf("\tmsg.%sulen = %s->ulen;\n", \
args[i], args[i]) >> CFILE
- printf("\treq.%sflags = %s->flags;\n", \
+ printf("\tmsg.%sflags = %s->flags;\n", \
args[i], args[i]) >> CFILE
- printf("\treq.%sdata.%sdata_val = %s->data;\n", \
+ printf("\tmsg.%sdata.%sdata_val = %s->data;\n", \
args[i], args[i], args[i]) >> CFILE
- printf("\treq.%sdata.%sdata_len = %s->size;\n", \
+ printf("\tmsg.%sdata.%sdata_len = %s->size;\n", \
args[i], args[i], args[i]) >> CFILE
}
if (rpc_type[i] == "LIST") {
- printf("\tif ((ret = __dbcl_%s_%slist(", \
- name, args[i]) >> CFILE
- printf("&req.%slist, %s)) != 0)\n", \
+ printf("\tfor (%si = 0, %sp = %s; *%sp != 0; ", \
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf(" %si++, %sp++)\n\t\t;\n", args[i], args[i]) \
+ >> CFILE
+
+ #
+ # If we are an array of ints, *_len is how many
+ # elements. If we are a GID, *_len is total bytes.
+ #
+ printf("\tmsg.%s.%s_len = %si",args[i], args[i], \
+ args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" * %d;\n", xidsize) >> CFILE
+ else
+ printf(";\n") >> CFILE
+ printf("\tif ((ret = __os_calloc(") >> CFILE
+ if (!env_handle)
+ printf("dbenv,\n") >> CFILE
+ else
+ printf("%s,\n", args[env_idx]) >> CFILE
+ printf("\t msg.%s.%s_len,", \
+ args[i], args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf(" 1,") >> CFILE
+ else
+ printf(" sizeof(u_int32_t),") >> CFILE
+ printf(" &msg.%s.%s_val)) != 0)\n",\
+ args[i], args[i], args[i], args[i]) >> CFILE
+ printf("\t\treturn (ret);\n") >> CFILE
+ printf("\tfor (%sq = msg.%s.%s_val, %sp = %s; ", \
+ args[i], args[i], args[i], \
args[i], args[i]) >> CFILE
- printf("\t\tgoto out;\n") >> CFILE
+ printf("%si--; %sq++, %sp++)\n", \
+ args[i], args[i], args[i]) >> CFILE
+ printf("\t\t*%sq = ", args[i]) >> CFILE
+ if (list_type[i] == "GID")
+ printf("*%sp;\n", args[i]) >> CFILE
+ if (list_type[i] == "ID")
+ printf("(*%sp)->cl_id;\n", args[i]) >> CFILE
+ if (list_type[i] == "INT")
+ printf("*%sp;\n", args[i]) >> CFILE
}
}
printf("\n") >> CFILE
- printf("\treplyp = __db_%s_%d(&req, cl);\n", name, msgid) >> CFILE
+ printf("\treplyp = __db_%s_%d%03d(&msg, cl);\n", name, major, minor) \
+ >> CFILE
+ for (i = 0; i < nvars; ++i) {
+ if (rpc_type[i] == "LIST") {
+ printf("\t__os_free(") >> CFILE
+ if (!env_handle)
+ printf("dbenv, ") >> CFILE
+ else
+ printf("%s, ", args[env_idx]) >> CFILE
+ printf("msg.%s.%s_val);\n", args[i], args[i]) >> CFILE
+ }
+ }
printf("\tif (replyp == NULL) {\n") >> CFILE
if (!env_handle) {
printf("\t\t__db_err(dbenv, ") >> CFILE
@@ -1137,112 +1035,24 @@ END {
if (ret_code == 0) {
printf("\tret = replyp->status;\n") >> CFILE
} else {
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] == "LIST") {
- printf("\t__dbcl_%s_%sfree(&req.%slist);\n", \
- name, args[i], args[i]) >> CFILE
- }
- }
- printf("\treturn (__dbcl_%s_ret(", name) >> CFILE
+ printf("\tret = __dbcl_%s_ret(", name) >> CFILE
sep = "";
for (i = 0; i < nvars; ++i) {
printf("%s%s", sep, args[i]) >> CFILE
sep = ", ";
}
- printf("%sreplyp));\n", sep) >> CFILE
+ printf("%sreplyp);\n", sep) >> CFILE
}
printf("out:\n") >> CFILE
- for (i = 0; i < nvars; ++i) {
- if (rpc_type[i] == "LIST") {
- printf("\t__dbcl_%s_%sfree(&req.%slist);\n", \
- name, args[i], args[i]) >> CFILE
- }
- }
- printf("\treturn (ret);\n") >> CFILE
- printf("}\n\n") >> CFILE
-
#
- # If we are doing a list, write list functions for op.
+ # Free reply if there was one.
#
- for (i = 0; i < nvars; i++) {
- if (rpc_type[i] != "LIST")
- continue;
- printf("int\n__dbcl_%s_%slist(locp, pp)\n", \
- name, args[i]) >> CFILE
- printf("\t__%s_%slist **locp;\n", name, args[i]) >> CFILE
- if (list_type[i] == "STRING")
- printf("\t%s pp;\n{\n\t%s p;\n", \
- c_type[i], c_type[i]) >> CFILE
- if (list_type[i] == "INT")
- printf("\tu_int32_t *pp;\n{\n\tu_int32_t *p, *q;\n") \
- >> CFILE
- if (list_type[i] == "ID")
- printf("\t%s pp;\n{\n\t%s p;\n\tu_int32_t *q;\n", \
- c_type[i], c_type[i]) >> CFILE
-
- printf("\tint ret;\n") >> CFILE
- printf("\t__%s_%slist *nl, **nlp;\n\n", name, args[i]) >> CFILE
- printf("\t*locp = NULL;\n") >> CFILE
- printf("\tif (pp == NULL)\n\t\treturn (0);\n") >> CFILE
- printf("\tnlp = locp;\n") >> CFILE
- printf("\tfor (p = pp; *p != 0; p++) {\n") >> CFILE
- printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
- printf("sizeof(*nl), NULL, nlp)) != 0)\n") >> CFILE
- printf("\t\t\tgoto out;\n") >> CFILE
- printf("\t\tnl = *nlp;\n") >> CFILE
- printf("\t\tnl->next = NULL;\n") >> CFILE
- printf("\t\tnl->ent.ent_val = NULL;\n") >> CFILE
- printf("\t\tnl->ent.ent_len = 0;\n") >> CFILE
- if (list_type[i] == "STRING") {
- printf("\t\tif ((ret = __os_strdup(NULL, ") >> CFILE
- printf("*p, &nl->ent.ent_val)) != 0)\n") >> CFILE
- printf("\t\t\tgoto out;\n") >> CFILE
- printf("\t\tnl->ent.ent_len = strlen(*p)+1;\n") >> CFILE
- }
- if (list_type[i] == "INT") {
- printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
- printf("sizeof(%s), NULL, &nl->ent.ent_val)) != 0)\n", \
- c_type[i]) >> CFILE
- printf("\t\t\tgoto out;\n") >> CFILE
- printf("\t\tq = (u_int32_t *)nl->ent.ent_val;\n") \
- >> CFILE
- printf("\t\t*q = *p;\n") >> CFILE
- printf("\t\tnl->ent.ent_len = sizeof(%s);\n", \
- c_type[i]) >> CFILE
- }
- if (list_type[i] == "ID") {
- printf("\t\tif ((ret = __os_malloc(NULL, ") >> CFILE
- printf("sizeof(u_int32_t),") >> CFILE
- printf(" NULL, &nl->ent.ent_val)) != 0)\n") >> CFILE
- printf("\t\t\tgoto out;\n") >> CFILE
- printf("\t\tq = (u_int32_t *)nl->ent.ent_val;\n") \
- >> CFILE
- printf("\t\t*q = (*p)->cl_id;\n") >> CFILE
- printf("\t\tnl->ent.ent_len = sizeof(u_int32_t);\n") \
- >> CFILE
- }
- printf("\t\tnlp = &nl->next;\n") >> CFILE
- printf("\t}\n") >> CFILE
- printf("\treturn (0);\n") >> CFILE
- printf("out:\n") >> CFILE
- printf("\t__dbcl_%s_%sfree(locp);\n", name, args[i]) >> CFILE
- printf("\treturn (ret);\n") >> CFILE
-
- printf("}\n\n") >> CFILE
+ printf("\tif (replyp != NULL)\n") >> CFILE
+ printf("\t\txdr_free((xdrproc_t)xdr___%s_reply,",name) >> CFILE
+ printf(" (void *)replyp);\n") >> CFILE
+ printf("\treturn (ret);\n") >> CFILE
+ printf("}\n\n") >> CFILE
- printf("void\n__dbcl_%s_%sfree(locp)\n", name, args[i]) >> CFILE
- printf("\t__%s_%slist **locp;\n", name, args[i]) >> CFILE
- printf("{\n") >> CFILE
- printf("\t__%s_%slist *nl, *nl1;\n\n", name, args[i]) >> CFILE
- printf("\tif (locp == NULL)\n\t\treturn;\n") >> CFILE
- printf("\tfor (nl = *locp; nl != NULL; nl = nl1) {\n") >> CFILE
- printf("\t\tnl1 = nl->next;\n") >> CFILE
- printf("\t\tif (nl->ent.ent_val)\n") >> CFILE
- printf("\t\t\t__os_free(nl->ent.ent_val, nl->ent.ent_len);\n") \
- >> CFILE
- printf("\t\t__os_free(nl, sizeof(*nl));\n") >> CFILE
- printf("\t}\n}\n\n") >> CFILE
- }
#
# Generate Client Template code
#
@@ -1250,38 +1060,16 @@ END {
#
# If we are doing a list, write prototypes
#
- for (i = 0; i < rvars; ++i) {
- if (ret_type[i] != "LIST")
- continue;
- if (retlist_type[i] != "STRING" &&
- retlist_type[i] != "INT" && list_type[i] != "ID")
- continue;
- printf("int __db_%s_%sreplist __P((", \
- name, retargs[i]) >> TFILE
- printf("__%s_%sreplist, ", \
- name, retargs[i]) >> TFILE
- if (retlist_type[i] == "STRING") {
- printf("char ***));\n") >> TFILE
- }
- if (retlist_type[i] == "INT" ||
- retlist_type[i] == "ID") {
- printf("u_int32_t **));\n") >> TFILE
- }
- printf("void __db_%s_%sfree __P((", \
- name, retargs[i]) >> TFILE
- if (retlist_type[i] == "STRING")
- printf("char **));\n") >> TFILE
- if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
- printf("u_int32_t *));\n\n") >> TFILE
- }
-
- printf("int __dbcl_%s_ret __P((", name) >> CHFILE
- sep = "";
+ pi = 1;
+ p[pi++] = sprintf("int __dbcl_%s_ret __P((", name);
+ p[pi++] = "";
for (i = 0; i < nvars; ++i) {
- printf("%s%s", sep, pr_type[i]) >> CHFILE
- sep = ", ";
+ p[pi++] = pr_type[i];
+ p[pi++] = ", ";
}
- printf("%s__%s_reply *));\n", sep, name) >> CHFILE
+ p[pi++] = sprintf("__%s_reply *));", name);
+ p[pi++] = "";
+ proto_format(p, 0, TFILE);
printf("int\n") >> TFILE
printf("__dbcl_%s_ret(", name) >> TFILE
@@ -1310,12 +1098,12 @@ END {
printf("\t%s %s;\n", \
retc_type[i], retargs[i]) >> TFILE
} else if (ret_type[i] == "LIST") {
- if (retlist_type[i] == "STRING")
- printf("\tchar **__db_%slist;\n", \
+ if (retlist_type[i] == "GID")
+ printf("\tu_int8_t *__db_%s;\n", \
retargs[i]) >> TFILE
if (retlist_type[i] == "ID" ||
retlist_type[i] == "INT")
- printf("\tu_int32_t *__db_%slist;\n", \
+ printf("\tu_int32_t *__db_%s;\n", \
retargs[i]) >> TFILE
} else {
printf("\t/* %s %s; */\n", \
@@ -1347,16 +1135,9 @@ END {
printf("\t%s = replyp->%s;\n", \
retargs[i], varname) >> TFILE
} else if (ret_type[i] == "LIST") {
- printf("\n\tif ((ret = __db_%s_%slist(", \
- name, retargs[i]) >> TFILE
- printf("replyp->%slist, &__db_%slist)) != 0)", \
- retargs[i], retargs[i]) >> TFILE
- printf("\n\t\treturn (ret);\n") >> TFILE
printf("\n\t/*\n") >> TFILE
printf("\t * XXX Handle list\n") >> TFILE
printf("\t */\n\n") >> TFILE
- printf("\t__db_%s_%sfree(__db_%slist);\n", \
- name, retargs[i], retargs[i]) >> TFILE
} else {
printf("\t/* Handle replyp->%s; */\n", \
varname) >> TFILE
@@ -1365,118 +1146,69 @@ END {
printf("\n\t/*\n\t * XXX Code goes here\n\t */\n\n") >> TFILE
printf("\treturn (replyp->status);\n") >> TFILE
printf("}\n\n") >> TFILE
- #
- # If we are doing a list, write list functions for this op.
- #
- for (i = 0; i < rvars; ++i) {
- if (ret_type[i] != "LIST")
- continue;
- if (retlist_type[i] != "STRING" &&
- retlist_type[i] != "INT" && list_type[i] != "ID")
- continue;
- printf("int\n") >> TFILE
- printf("__db_%s_%sreplist(locp, ppp)\n", \
- name, retargs[i]) >> TFILE
- printf("\t__%s_%sreplist *locp;\n", \
- name, retargs[i]) >> TFILE
- if (retlist_type[i] == "STRING") {
- printf("\tchar ***ppp;\n{\n") >> TFILE
- printf("\tchar **pp;\n") >> TFILE
- }
- if (retlist_type[i] == "INT" ||
- retlist_type[i] == "ID") {
- printf("\tu_int32_t **ppp;\n{\n") >> TFILE
- printf("\tu_int32_t *pp;\n") >> TFILE
- }
-
- printf("\tint cnt, ret, size;\n") >> TFILE
- printf("\t__%s_%sreplist *nl;\n\n", \
- name, retargs[i]) >> TFILE
- printf("\tfor (cnt = 0, nl = locp; ") >> TFILE
- printf("nl != NULL; cnt++, nl = nl->next)\n\t\t;\n\n") \
- >> TFILE
- printf("\tif (cnt == 0) {\n") >> TFILE
- printf("\t\t*ppp = NULL;\n") >> TFILE
- printf("\t\treturn (0);\n\t}\n") >> TFILE
- printf("\tsize = sizeof(*pp) * cnt;\n") >> TFILE
- printf("\tif ((ret = __os_malloc(NULL, ") >> TFILE
- printf("size, NULL, ppp)) != 0)\n") >> TFILE
- printf("\t\treturn (ret);\n") >> TFILE
- printf("\tmemset(*ppp, 0, size);\n") >> TFILE
- printf("\tfor (pp = *ppp, nl = locp; ") >> TFILE
- printf("nl != NULL; nl = nl->next, pp++) {\n") >> TFILE
- if (retlist_type[i] == "STRING") {
- printf("\t\tif ((ret = __os_malloc(NULL, ") \
- >> TFILE
- printf("nl->ent.ent_len + 1, NULL,") >> TFILE
- printf(" pp)) != 0)\n") >> TFILE
- printf("\t\t\tgoto out;\n") >> TFILE
- printf("\t\tif ((ret = __os_strdup(") >> TFILE
- printf("NULL, (char *)nl->ent.ent_val,") \
- >> TFILE
- printf(" pp)) != 0)\n") >> TFILE
- printf("\t\t\tgoto out;\n") >> TFILE
- }
- if (retlist_type[i] == "INT" ||
- retlist_type[i] == "ID") {
- printf("\t\t*pp = *(u_int32_t *)") >> TFILE
- printf("nl->ent.ent_val;\n") >> TFILE
- }
- printf("\t}\n") >> TFILE
- printf("\treturn (0);\n") >> TFILE
- printf("out:\n") >> TFILE
- printf("\t__db_%s_%sfree(*ppp);\n", \
- name, retargs[i]) >> TFILE
- printf("\treturn (ret);\n") >> TFILE
- printf("}\n\n") >> TFILE
-
- printf("void\n") >> TFILE
- printf("__db_%s_%sfree(pp)\n", \
- name, retargs[i]) >> TFILE
-
- if (retlist_type[i] == "STRING")
- printf("\tchar **pp;\n") >> TFILE
- if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
- printf("\tu_int32_t *pp;\n") >> TFILE
-
- printf("{\n") >> TFILE
- printf("\tsize_t size;\n") >> TFILE
-
- if (retlist_type[i] == "STRING")
- printf("\tchar **p;\n\n") >> TFILE
- if (retlist_type[i] == "INT" || retlist_type[i] == "ID")
- printf("\tu_int32_t *p;\n\n") >> TFILE
-
- printf("\tif (pp == NULL)\n\t\treturn;\n") >> TFILE
- printf("\tsize = sizeof(*p);\n") >> TFILE
- printf("\tfor (p = pp; *p != 0; p++) {\n") >> TFILE
- printf("\t\tsize += sizeof(*p);\n") >> TFILE
-
- if (retlist_type[i] == "STRING")
- printf("\t\t__os_free(*p, strlen(*p)+1);\n") \
- >> TFILE
- printf("\t}\n") >> TFILE
- printf("\t__os_free(pp, size);\n") >> TFILE
- printf("}\n\n") >> TFILE
- }
}
}
#
# split_lines --
# Add line separators to pretty-print the output.
-function split_lines(is_public) {
+function split_lines() {
if (argcount > 3) {
# Reset the counter, remove any trailing whitespace from
# the separator.
argcount = 0;
sub("[ ]$", "", sep)
- if (is_public) {
- printf("%s\n\t", sep) >> SHFILE
- } else {
- printf("%s\n\t\t", sep) >> PFILE
- printf("%s\\\n\\\t\\\t", sep) >> SEDFILE
+ printf("%s\n\t\t", sep) >> PFILE
+ printf("%s\\\n\\\t\\\t", sep) >> SEDFILE
+ }
+}
+
+# proto_format --
+# Pretty-print a function prototype.
+function proto_format(p, sedfile, OUTPUT)
+{
+ if (sedfile)
+ printf("/*\\\n") >> OUTPUT;
+ else
+ printf("/*\n") >> OUTPUT;
+
+ s = "";
+ for (i = 1; i in p; ++i)
+ s = s p[i];
+
+ if (sedfile)
+ t = "\\ * PUBLIC: "
+ else
+ t = " * PUBLIC: "
+ if (length(s) + length(t) < 80)
+ if (sedfile)
+ printf("%s%s", t, s) >> OUTPUT;
+ else
+ printf("%s%s", t, s) >> OUTPUT;
+ else {
+ split(s, p, "__P");
+ len = length(t) + length(p[1]);
+ printf("%s%s", t, p[1]) >> OUTPUT
+
+ n = split(p[2], comma, ",");
+ comma[1] = "__P" comma[1];
+ for (i = 1; i <= n; i++) {
+ if (len + length(comma[i]) > 75) {
+ if (sedfile)
+ printf(\
+ "\\\n\\ * PUBLIC: ") >> OUTPUT;
+ else
+ printf("\n * PUBLIC: ") >> OUTPUT;
+ len = 0;
+ }
+ printf("%s%s", comma[i], i == n ? "" : ",") >> OUTPUT;
+ len += length(comma[i]);
}
}
+ if (sedfile)
+ printf("\\\n\\ */\\\n") >> OUTPUT;
+ else
+ printf("\n */\n") >> OUTPUT;
+ delete p;
}
diff --git a/bdb/dist/install-sh b/bdb/dist/install-sh
new file mode 100755
index 00000000000..b41a2459161
--- /dev/null
+++ b/bdb/dist/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=$mkdirprog
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+ '
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/bdb/dist/ltconfig b/bdb/dist/ltconfig
deleted file mode 100644
index f78afda0e1f..00000000000
--- a/bdb/dist/ltconfig
+++ /dev/null
@@ -1,3136 +0,0 @@
-#! /bin/sh
-
-# ltconfig - Create a system-specific libtool.
-# Copyright (C) 1996-1999 Free Software Foundation, Inc.
-# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
-#
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-# A lot of this script is taken from autoconf-2.10.
-
-# Check that we are running under the correct shell.
-SHELL=${CONFIG_SHELL-/bin/sh}
-echo=echo
-if test "X$1" = X--no-reexec; then
- # Discard the --no-reexec flag, and continue.
- shift
-elif test "X$1" = X--fallback-echo; then
- # Avoid inline document here, it may be left over
- :
-elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
- # Yippee, $echo works!
- :
-else
- # Restart under the correct shell.
- exec "$SHELL" "$0" --no-reexec ${1+"$@"}
-fi
-
-if test "X$1" = X--fallback-echo; then
- # used as fallback echo
- shift
- cat <<EOF
-$*
-EOF
- exit 0
-fi
-
-# Find the correct PATH separator. Usually this is `:', but
-# DJGPP uses `;' like DOS.
-if test "X${PATH_SEPARATOR+set}" != Xset; then
- UNAME=${UNAME-`uname 2>/dev/null`}
- case X$UNAME in
- *-DOS) PATH_SEPARATOR=';' ;;
- *) PATH_SEPARATOR=':' ;;
- esac
-fi
-
-# The HP-UX ksh and POSIX shell print the target directory to stdout
-# if CDPATH is set.
-if test "X${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
-
-if test "X${echo_test_string+set}" != Xset; then
- # find a string as large as possible, as long as the shell can cope with it
- for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do
- # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
- if (echo_test_string="`eval $cmd`") 2>/dev/null &&
- echo_test_string="`eval $cmd`" &&
- (test "X$echo_test_string" = "X$echo_test_string") 2>/dev/null; then
- break
- fi
- done
-fi
-
-if test "X`($echo '\t') 2>/dev/null`" != 'X\t' ||
- test "X`($echo "$echo_test_string") 2>/dev/null`" != X"$echo_test_string"; then
- # The Solaris, AIX, and Digital Unix default echo programs unquote
- # backslashes. This makes it impossible to quote backslashes using
- # echo "$something" | sed 's/\\/\\\\/g'
- #
- # So, first we look for a working echo in the user's PATH.
-
- IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- for dir in $PATH /usr/ucb; do
- if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
- test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
- test "X`($dir/echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
- echo="$dir/echo"
- break
- fi
- done
- IFS="$save_ifs"
-
- if test "X$echo" = Xecho; then
- # We didn't find a better echo, so look for alternatives.
- if test "X`(print -r '\t') 2>/dev/null`" = 'X\t' &&
- test "X`(print -r "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
- # This shell has a builtin print -r that does the trick.
- echo='print -r'
- elif (test -f /bin/ksh || test -f /bin/ksh$ac_exeext) &&
- test "X$CONFIG_SHELL" != X/bin/ksh; then
- # If we have ksh, try running ltconfig again with it.
- ORIGINAL_CONFIG_SHELL="${CONFIG_SHELL-/bin/sh}"
- export ORIGINAL_CONFIG_SHELL
- CONFIG_SHELL=/bin/ksh
- export CONFIG_SHELL
- exec "$CONFIG_SHELL" "$0" --no-reexec ${1+"$@"}
- else
- # Try using printf.
- echo='printf "%s\n"'
- if test "X`($echo '\t') 2>/dev/null`" = 'X\t' &&
- test "X`($echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
- # Cool, printf works
- :
- elif test "X`("$ORIGINAL_CONFIG_SHELL" "$0" --fallback-echo '\t') 2>/dev/null`" = 'X\t' &&
- test "X`("$ORIGINAL_CONFIG_SHELL" "$0" --fallback-echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
- CONFIG_SHELL="$ORIGINAL_CONFIG_SHELL"
- export CONFIG_SHELL
- SHELL="$CONFIG_SHELL"
- export SHELL
- echo="$CONFIG_SHELL $0 --fallback-echo"
- elif test "X`("$CONFIG_SHELL" "$0" --fallback-echo '\t') 2>/dev/null`" = 'X\t' &&
- test "X`("$CONFIG_SHELL" "$0" --fallback-echo "$echo_test_string") 2>/dev/null`" = X"$echo_test_string"; then
- echo="$CONFIG_SHELL $0 --fallback-echo"
- else
- # maybe with a smaller string...
- prev=:
-
- for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do
- if (test "X$echo_test_string" = "X`eval $cmd`") 2>/dev/null; then
- break
- fi
- prev="$cmd"
- done
-
- if test "$prev" != 'sed 50q "$0"'; then
- echo_test_string=`eval $prev`
- export echo_test_string
- exec "${ORIGINAL_CONFIG_SHELL}" "$0" ${1+"$@"}
- else
- # Oops. We lost completely, so just stick with echo.
- echo=echo
- fi
- fi
- fi
- fi
-fi
-
-# Sed substitution that helps us do robust quoting. It backslashifies
-# metacharacters that are still active within double-quoted strings.
-Xsed='sed -e s/^X//'
-sed_quote_subst='s/\([\\"\\`$\\\\]\)/\\\1/g'
-
-# Same as above, but do not quote variable references.
-double_quote_subst='s/\([\\"\\`\\\\]\)/\\\1/g'
-
-# Sed substitution to delay expansion of an escaped shell variable in a
-# double_quote_subst'ed string.
-delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
-
-# The name of this program.
-progname=`$echo "X$0" | $Xsed -e 's%^.*/%%'`
-
-# Constants:
-PROGRAM=ltconfig
-PACKAGE=libtool
-VERSION=1.3.5
-TIMESTAMP=" (1.385.2.206 2000/05/27 11:12:27)"
-ac_compile='${CC-cc} -c $CFLAGS $CPPFLAGS conftest.$ac_ext 1>&5'
-ac_link='${CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
-rm="rm -f"
-
-help="Try \`$progname --help' for more information."
-
-# Global variables:
-default_ofile=libtool
-can_build_shared=yes
-enable_shared=yes
-# All known linkers require a `.a' archive for static linking (except M$VC,
-# which needs '.lib').
-enable_static=yes
-enable_fast_install=yes
-enable_dlopen=unknown
-enable_win32_dll=no
-ltmain=
-silent=
-srcdir=
-ac_config_guess=
-ac_config_sub=
-host=
-nonopt=
-ofile="$default_ofile"
-verify_host=yes
-with_gcc=no
-with_gnu_ld=no
-need_locks=yes
-ac_ext=c
-objext=o
-libext=a
-exeext=
-cache_file=
-
-old_AR="$AR"
-old_CC="$CC"
-old_CFLAGS="$CFLAGS"
-old_CPPFLAGS="$CPPFLAGS"
-old_LDFLAGS="$LDFLAGS"
-old_LD="$LD"
-old_LN_S="$LN_S"
-old_LIBS="$LIBS"
-old_NM="$NM"
-old_RANLIB="$RANLIB"
-old_DLLTOOL="$DLLTOOL"
-old_OBJDUMP="$OBJDUMP"
-old_AS="$AS"
-
-# Parse the command line options.
-args=
-prev=
-for option
-do
- case "$option" in
- -*=*) optarg=`echo "$option" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
- *) optarg= ;;
- esac
-
- # If the previous option needs an argument, assign it.
- if test -n "$prev"; then
- eval "$prev=\$option"
- prev=
- continue
- fi
-
- case "$option" in
- --help) cat <<EOM
-Usage: $progname [OPTION]... [HOST [LTMAIN]]
-
-Generate a system-specific libtool script.
-
- --debug enable verbose shell tracing
- --disable-shared do not build shared libraries
- --disable-static do not build static libraries
- --disable-fast-install do not optimize for fast installation
- --enable-dlopen enable dlopen support
- --enable-win32-dll enable building dlls on win32 hosts
- --help display this help and exit
- --no-verify do not verify that HOST is a valid host type
--o, --output=FILE specify the output file [default=$default_ofile]
- --quiet same as \`--silent'
- --silent do not print informational messages
- --srcdir=DIR find \`config.guess' in DIR
- --version output version information and exit
- --with-gcc assume that the GNU C compiler will be used
- --with-gnu-ld assume that the C compiler uses the GNU linker
- --disable-lock disable file locking
- --cache-file=FILE configure cache file
-
-LTMAIN is the \`ltmain.sh' shell script fragment or \`ltmain.c' program
-that provides basic libtool functionality.
-
-HOST is the canonical host system name [default=guessed].
-EOM
- exit 0
- ;;
-
- --debug)
- echo "$progname: enabling shell trace mode"
- set -x
- ;;
-
- --disable-shared) enable_shared=no ;;
-
- --disable-static) enable_static=no ;;
-
- --disable-fast-install) enable_fast_install=no ;;
-
- --enable-dlopen) enable_dlopen=yes ;;
-
- --enable-win32-dll) enable_win32_dll=yes ;;
-
- --quiet | --silent) silent=yes ;;
-
- --srcdir) prev=srcdir ;;
- --srcdir=*) srcdir="$optarg" ;;
-
- --no-verify) verify_host=no ;;
-
- --output | -o) prev=ofile ;;
- --output=*) ofile="$optarg" ;;
-
- --version) echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"; exit 0 ;;
-
- --with-gcc) with_gcc=yes ;;
- --with-gnu-ld) with_gnu_ld=yes ;;
-
- --disable-lock) need_locks=no ;;
-
- --cache-file=*) cache_file="$optarg" ;;
-
- -*)
- echo "$progname: unrecognized option \`$option'" 1>&2
- echo "$help" 1>&2
- exit 1
- ;;
-
- *)
- if test -z "$ltmain"; then
- ltmain="$option"
- elif test -z "$host"; then
-# This generates an unnecessary warning for sparc-sun-solaris4.1.3_U1
-# if test -n "`echo $option| sed 's/[-a-z0-9.]//g'`"; then
-# echo "$progname: warning \`$option' is not a valid host type" 1>&2
-# fi
- host="$option"
- else
- echo "$progname: too many arguments" 1>&2
- echo "$help" 1>&2
- exit 1
- fi ;;
- esac
-done
-
-if test -z "$ltmain"; then
- echo "$progname: you must specify a LTMAIN file" 1>&2
- echo "$help" 1>&2
- exit 1
-fi
-
-if test ! -f "$ltmain"; then
- echo "$progname: \`$ltmain' does not exist" 1>&2
- echo "$help" 1>&2
- exit 1
-fi
-
-# Quote any args containing shell metacharacters.
-ltconfig_args=
-for arg
-do
- case "$arg" in
- *" "*|*" "*|*[\[\]\~\#\$\^\&\*\(\)\{\}\\\|\;\<\>\?]*)
- ltconfig_args="$ltconfig_args '$arg'" ;;
- *) ltconfig_args="$ltconfig_args $arg" ;;
- esac
-done
-
-# A relevant subset of AC_INIT.
-
-# File descriptor usage:
-# 0 standard input
-# 1 file creation
-# 2 errors and warnings
-# 3 some systems may open it to /dev/tty
-# 4 used on the Kubota Titan
-# 5 compiler messages saved in config.log
-# 6 checking for... messages and results
-if test "$silent" = yes; then
- exec 6>/dev/null
-else
- exec 6>&1
-fi
-exec 5>>./config.log
-
-# NLS nuisances.
-# Only set LANG and LC_ALL to C if already set.
-# These must not be set unconditionally because not all systems understand
-# e.g. LANG=C (notably SCO).
-if test "X${LC_ALL+set}" = Xset; then LC_ALL=C; export LC_ALL; fi
-if test "X${LANG+set}" = Xset; then LANG=C; export LANG; fi
-
-if test -n "$cache_file" && test -r "$cache_file"; then
- echo "loading cache $cache_file within ltconfig"
- . $cache_file
-fi
-
-if (echo "testing\c"; echo 1,2,3) | grep c >/dev/null; then
- # Stardent Vistra SVR4 grep lacks -e, says ghazi@caip.rutgers.edu.
- if (echo -n testing; echo 1,2,3) | sed s/-n/xn/ | grep xn >/dev/null; then
- ac_n= ac_c='
-' ac_t=' '
- else
- ac_n=-n ac_c= ac_t=
- fi
-else
- ac_n= ac_c='\c' ac_t=
-fi
-
-if test -z "$srcdir"; then
- # Assume the source directory is the same one as the path to LTMAIN.
- srcdir=`$echo "X$ltmain" | $Xsed -e 's%/[^/]*$%%'`
- test "$srcdir" = "$ltmain" && srcdir=.
-fi
-
-trap "$rm conftest*; exit 1" 1 2 15
-if test "$verify_host" = yes; then
- # Check for config.guess and config.sub.
- ac_aux_dir=
- for ac_dir in $srcdir $srcdir/.. $srcdir/../..; do
- if test -f $ac_dir/config.guess; then
- ac_aux_dir=$ac_dir
- break
- fi
- done
- if test -z "$ac_aux_dir"; then
- echo "$progname: cannot find config.guess in $srcdir $srcdir/.. $srcdir/../.." 1>&2
- echo "$help" 1>&2
- exit 1
- fi
- ac_config_guess=$ac_aux_dir/config.guess
- ac_config_sub=$ac_aux_dir/config.sub
-
- # Make sure we can run config.sub.
- if $SHELL $ac_config_sub sun4 >/dev/null 2>&1; then :
- else
- echo "$progname: cannot run $ac_config_sub" 1>&2
- echo "$help" 1>&2
- exit 1
- fi
-
- echo $ac_n "checking host system type""... $ac_c" 1>&6
-
- host_alias=$host
- case "$host_alias" in
- "")
- if host_alias=`$SHELL $ac_config_guess`; then :
- else
- echo "$progname: cannot guess host type; you must specify one" 1>&2
- echo "$help" 1>&2
- exit 1
- fi ;;
- esac
- host=`$SHELL $ac_config_sub $host_alias`
- echo "$ac_t$host" 1>&6
-
- # Make sure the host verified.
- test -z "$host" && exit 1
-
-elif test -z "$host"; then
- echo "$progname: you must specify a host type if you use \`--no-verify'" 1>&2
- echo "$help" 1>&2
- exit 1
-else
- host_alias=$host
-fi
-
-# Transform linux* to *-*-linux-gnu*, to support old configure scripts.
-case "$host_os" in
-linux-gnu*) ;;
-linux*) host=`echo $host | sed 's/^\(.*-.*-linux\)\(.*\)$/\1-gnu\2/'`
-esac
-
-host_cpu=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
-host_vendor=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
-host_os=`echo $host | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
-
-case "$host_os" in
-aix3*)
- # AIX sometimes has problems with the GCC collect2 program. For some
- # reason, if we set the COLLECT_NAMES environment variable, the problems
- # vanish in a puff of smoke.
- if test "X${COLLECT_NAMES+set}" != Xset; then
- COLLECT_NAMES=
- export COLLECT_NAMES
- fi
- ;;
-esac
-
-# Determine commands to create old-style static archives.
-old_archive_cmds='$AR cru $oldlib$oldobjs'
-old_postinstall_cmds='chmod 644 $oldlib'
-old_postuninstall_cmds=
-
-# Set a sane default for `AR'.
-test -z "$AR" && AR=ar
-
-# Set a sane default for `OBJDUMP'.
-test -z "$OBJDUMP" && OBJDUMP=objdump
-
-# If RANLIB is not set, then run the test.
-if test "${RANLIB+set}" != "set"; then
- result=no
-
- echo $ac_n "checking for ranlib... $ac_c" 1>&6
- IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- for dir in $PATH; do
- test -z "$dir" && dir=.
- if test -f $dir/ranlib || test -f $dir/ranlib$ac_exeext; then
- RANLIB="ranlib"
- result="ranlib"
- break
- fi
- done
- IFS="$save_ifs"
-
- echo "$ac_t$result" 1>&6
-fi
-
-if test -n "$RANLIB"; then
- old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
- old_postinstall_cmds="\$RANLIB \$oldlib~$old_postinstall_cmds"
-fi
-
-# Set sane defaults for `DLLTOOL', `OBJDUMP', and `AS', used on cygwin.
-test -z "$DLLTOOL" && DLLTOOL=dlltool
-test -z "$OBJDUMP" && OBJDUMP=objdump
-test -z "$AS" && AS=as
-
-# Check to see if we are using GCC.
-if test "$with_gcc" != yes || test -z "$CC"; then
- # If CC is not set, then try to find GCC or a usable CC.
- if test -z "$CC"; then
- echo $ac_n "checking for gcc... $ac_c" 1>&6
- IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- for dir in $PATH; do
- test -z "$dir" && dir=.
- if test -f $dir/gcc || test -f $dir/gcc$ac_exeext; then
- CC="gcc"
- break
- fi
- done
- IFS="$save_ifs"
-
- if test -n "$CC"; then
- echo "$ac_t$CC" 1>&6
- else
- echo "$ac_t"no 1>&6
- fi
- fi
-
- # Not "gcc", so try "cc", rejecting "/usr/ucb/cc".
- if test -z "$CC"; then
- echo $ac_n "checking for cc... $ac_c" 1>&6
- IFS="${IFS= }"; save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- cc_rejected=no
- for dir in $PATH; do
- test -z "$dir" && dir=.
- if test -f $dir/cc || test -f $dir/cc$ac_exeext; then
- if test "$dir/cc" = "/usr/ucb/cc"; then
- cc_rejected=yes
- continue
- fi
- CC="cc"
- break
- fi
- done
- IFS="$save_ifs"
- if test $cc_rejected = yes; then
- # We found a bogon in the path, so make sure we never use it.
- set dummy $CC
- shift
- if test $# -gt 0; then
- # We chose a different compiler from the bogus one.
- # However, it has the same name, so the bogon will be chosen
- # first if we set CC to just the name; use the full file name.
- shift
- set dummy "$dir/cc" "$@"
- shift
- CC="$@"
- fi
- fi
-
- if test -n "$CC"; then
- echo "$ac_t$CC" 1>&6
- else
- echo "$ac_t"no 1>&6
- fi
-
- if test -z "$CC"; then
- echo "$progname: error: no acceptable cc found in \$PATH" 1>&2
- exit 1
- fi
- fi
-
- # Now see if the compiler is really GCC.
- with_gcc=no
- echo $ac_n "checking whether we are using GNU C... $ac_c" 1>&6
- echo "$progname:581: checking whether we are using GNU C" >&5
-
- $rm conftest.c
- cat > conftest.c <<EOF
-#ifdef __GNUC__
- yes;
-#endif
-EOF
- if { ac_try='${CC-cc} -E conftest.c'; { (eval echo $progname:589: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
- with_gcc=yes
- fi
- $rm conftest.c
- echo "$ac_t$with_gcc" 1>&6
-fi
-
-# Allow CC to be a program name with arguments.
-set dummy $CC
-compiler="$2"
-
-echo $ac_n "checking for object suffix... $ac_c" 1>&6
-$rm conftest*
-echo 'int i = 1;' > conftest.c
-echo "$progname:603: checking for object suffix" >& 5
-if { (eval echo $progname:604: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; }; then
- # Append any warnings to the config.log.
- cat conftest.err 1>&5
-
- for ac_file in conftest.*; do
- case $ac_file in
- *.c) ;;
- *) objext=`echo $ac_file | sed -e s/conftest.//` ;;
- esac
- done
-else
- cat conftest.err 1>&5
- echo "$progname: failed program was:" >&5
- cat conftest.c >&5
-fi
-$rm conftest*
-echo "$ac_t$objext" 1>&6
-
-echo $ac_n "checking for executable suffix... $ac_c" 1>&6
-if eval "test \"`echo '$''{'ac_cv_exeext'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- ac_cv_exeext="no"
- $rm conftest*
- echo 'main () { return 0; }' > conftest.c
- echo "$progname:629: checking for executable suffix" >& 5
- if { (eval echo $progname:630: \"$ac_link\") 1>&5; (eval $ac_link) 2>conftest.err; }; then
- # Append any warnings to the config.log.
- cat conftest.err 1>&5
-
- for ac_file in conftest.*; do
- case $ac_file in
- *.c | *.err | *.$objext ) ;;
- *) ac_cv_exeext=.`echo $ac_file | sed -e s/conftest.//` ;;
- esac
- done
- else
- cat conftest.err 1>&5
- echo "$progname: failed program was:" >&5
- cat conftest.c >&5
- fi
- $rm conftest*
-fi
-if test "X$ac_cv_exeext" = Xno; then
- exeext=""
-else
- exeext="$ac_cv_exeext"
-fi
-echo "$ac_t$ac_cv_exeext" 1>&6
-
-echo $ac_n "checking for $compiler option to produce PIC... $ac_c" 1>&6
-pic_flag=
-special_shlib_compile_flags=
-wl=
-link_static_flag=
-no_builtin_flag=
-
-if test "$with_gcc" = yes; then
- wl='-Wl,'
- link_static_flag='-static'
-
- case "$host_os" in
- beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
- # PIC is the default for these OSes.
- ;;
- aix*)
- # Below there is a dirty hack to force normal static linking with -ldl
- # The problem is because libdl dynamically linked with both libc and
- # libC (AIX C++ library), which obviously doesn't included in libraries
- # list by gcc. This cause undefined symbols with -static flags.
- # This hack allows C programs to be linked with "-static -ldl", but
- # we not sure about C++ programs.
- link_static_flag="$link_static_flag ${wl}-lC"
- ;;
- cygwin* | mingw* | os2*)
- # We can build DLLs from non-PIC.
- ;;
- amigaos*)
- # FIXME: we need at least 68020 code to build shared libraries, but
- # adding the `-m68020' flag to GCC prevents building anything better,
- # like `-m68040'.
- pic_flag='-m68020 -resident32 -malways-restore-a4'
- ;;
- sysv4*MP*)
- if test -d /usr/nec; then
- pic_flag=-Kconform_pic
- fi
- ;;
- *)
- pic_flag='-fPIC'
- ;;
- esac
-else
- # PORTME Check for PIC flags for the system compiler.
- case "$host_os" in
- aix3* | aix4*)
- # All AIX code is PIC.
- link_static_flag='-bnso -bI:/lib/syscalls.exp'
- ;;
-
- hpux9* | hpux10* | hpux11*)
- # Is there a better link_static_flag that works with the bundled CC?
- wl='-Wl,'
- link_static_flag="${wl}-a ${wl}archive"
- pic_flag='+Z'
- ;;
-
- irix5* | irix6*)
- wl='-Wl,'
- link_static_flag='-non_shared'
- # PIC (with -KPIC) is the default.
- ;;
-
- cygwin* | mingw* | os2*)
- # We can build DLLs from non-PIC.
- ;;
-
- osf3* | osf4* | osf5*)
- # All OSF/1 code is PIC.
- wl='-Wl,'
- link_static_flag='-non_shared'
- ;;
-
- sco3.2v5*)
- pic_flag='-Kpic'
- link_static_flag='-dn'
- special_shlib_compile_flags='-belf'
- ;;
-
- solaris*)
- pic_flag='-KPIC'
- link_static_flag='-Bstatic'
- wl='-Wl,'
- ;;
-
- sunos4*)
- pic_flag='-PIC'
- link_static_flag='-Bstatic'
- wl='-Qoption ld '
- ;;
-
- sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
- pic_flag='-KPIC'
- link_static_flag='-Bstatic'
- wl='-Wl,'
- ;;
-
- uts4*)
- pic_flag='-pic'
- link_static_flag='-Bstatic'
- ;;
- sysv4*MP*)
- if test -d /usr/nec ;then
- pic_flag='-Kconform_pic'
- link_static_flag='-Bstatic'
- fi
- ;;
- *)
- can_build_shared=no
- ;;
- esac
-fi
-
-if test -n "$pic_flag"; then
- echo "$ac_t$pic_flag" 1>&6
-
- # Check to make sure the pic_flag actually works.
- echo $ac_n "checking if $compiler PIC flag $pic_flag works... $ac_c" 1>&6
- $rm conftest*
- echo "int some_variable = 0;" > conftest.c
- save_CFLAGS="$CFLAGS"
- CFLAGS="$CFLAGS $pic_flag -DPIC"
- echo "$progname:776: checking if $compiler PIC flag $pic_flag works" >&5
- if { (eval echo $progname:777: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.$objext; then
- # Append any warnings to the config.log.
- cat conftest.err 1>&5
-
- case "$host_os" in
- hpux9* | hpux10* | hpux11*)
- # On HP-UX, both CC and GCC only warn that PIC is supported... then they
- # create non-PIC objects. So, if there were any warnings, we assume that
- # PIC is not supported.
- if test -s conftest.err; then
- echo "$ac_t"no 1>&6
- can_build_shared=no
- pic_flag=
- else
- echo "$ac_t"yes 1>&6
- pic_flag=" $pic_flag"
- fi
- ;;
- *)
- echo "$ac_t"yes 1>&6
- pic_flag=" $pic_flag"
- ;;
- esac
- else
- # Append any errors to the config.log.
- cat conftest.err 1>&5
- can_build_shared=no
- pic_flag=
- echo "$ac_t"no 1>&6
- fi
- CFLAGS="$save_CFLAGS"
- $rm conftest*
-else
- echo "$ac_t"none 1>&6
-fi
-
-# Check to see if options -o and -c are simultaneously supported by compiler
-echo $ac_n "checking if $compiler supports -c -o file.o... $ac_c" 1>&6
-$rm -r conftest 2>/dev/null
-mkdir conftest
-cd conftest
-$rm conftest*
-echo "int some_variable = 0;" > conftest.c
-mkdir out
-# According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-# that will create temporary files in the current directory regardless of
-# the output directory. Thus, making CWD read-only will cause this test
-# to fail, enabling locking or at least warning the user not to do parallel
-# builds.
-chmod -w .
-save_CFLAGS="$CFLAGS"
-CFLAGS="$CFLAGS -o out/conftest2.o"
-echo "$progname:829: checking if $compiler supports -c -o file.o" >&5
-if { (eval echo $progname:830: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>out/conftest.err; } && test -s out/conftest2.o; then
-
- # The compiler can only warn and ignore the option if not recognized
- # So say no if there are warnings
- if test -s out/conftest.err; then
- echo "$ac_t"no 1>&6
- compiler_c_o=no
- else
- echo "$ac_t"yes 1>&6
- compiler_c_o=yes
- fi
-else
- # Append any errors to the config.log.
- cat out/conftest.err 1>&5
- compiler_c_o=no
- echo "$ac_t"no 1>&6
-fi
-CFLAGS="$save_CFLAGS"
-chmod u+w .
-$rm conftest* out/*
-rmdir out
-cd ..
-rmdir conftest
-$rm -r conftest 2>/dev/null
-
-if test x"$compiler_c_o" = x"yes"; then
- # Check to see if we can write to a .lo
- echo $ac_n "checking if $compiler supports -c -o file.lo... $ac_c" 1>&6
- $rm conftest*
- echo "int some_variable = 0;" > conftest.c
- save_CFLAGS="$CFLAGS"
- CFLAGS="$CFLAGS -c -o conftest.lo"
- echo "$progname:862: checking if $compiler supports -c -o file.lo" >&5
-if { (eval echo $progname:863: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.lo; then
-
- # The compiler can only warn and ignore the option if not recognized
- # So say no if there are warnings
- if test -s conftest.err; then
- echo "$ac_t"no 1>&6
- compiler_o_lo=no
- else
- echo "$ac_t"yes 1>&6
- compiler_o_lo=yes
- fi
- else
- # Append any errors to the config.log.
- cat conftest.err 1>&5
- compiler_o_lo=no
- echo "$ac_t"no 1>&6
- fi
- CFLAGS="$save_CFLAGS"
- $rm conftest*
-else
- compiler_o_lo=no
-fi
-
-# Check to see if we can do hard links to lock some files if needed
-hard_links="nottested"
-if test "$compiler_c_o" = no && test "$need_locks" != no; then
- # do not overwrite the value of need_locks provided by the user
- echo $ac_n "checking if we can lock with hard links... $ac_c" 1>&6
- hard_links=yes
- $rm conftest*
- ln conftest.a conftest.b 2>/dev/null && hard_links=no
- touch conftest.a
- ln conftest.a conftest.b 2>&5 || hard_links=no
- ln conftest.a conftest.b 2>/dev/null && hard_links=no
- echo "$ac_t$hard_links" 1>&6
- $rm conftest*
- if test "$hard_links" = no; then
- echo "*** WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2
- need_locks=warn
- fi
-else
- need_locks=no
-fi
-
-if test "$with_gcc" = yes; then
- # Check to see if options -fno-rtti -fno-exceptions are supported by compiler
- echo $ac_n "checking if $compiler supports -fno-rtti -fno-exceptions ... $ac_c" 1>&6
- $rm conftest*
- echo "int some_variable = 0;" > conftest.c
- save_CFLAGS="$CFLAGS"
- CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -c conftest.c"
- echo "$progname:914: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
- if { (eval echo $progname:915: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; } && test -s conftest.o; then
-
- # The compiler can only warn and ignore the option if not recognized
- # So say no if there are warnings
- if test -s conftest.err; then
- echo "$ac_t"no 1>&6
- compiler_rtti_exceptions=no
- else
- echo "$ac_t"yes 1>&6
- compiler_rtti_exceptions=yes
- fi
- else
- # Append any errors to the config.log.
- cat conftest.err 1>&5
- compiler_rtti_exceptions=no
- echo "$ac_t"no 1>&6
- fi
- CFLAGS="$save_CFLAGS"
- $rm conftest*
-
- if test "$compiler_rtti_exceptions" = "yes"; then
- no_builtin_flag=' -fno-builtin -fno-rtti -fno-exceptions'
- else
- no_builtin_flag=' -fno-builtin'
- fi
-
-fi
-
-# Check for any special shared library compilation flags.
-if test -n "$special_shlib_compile_flags"; then
- echo "$progname: warning: \`$CC' requires \`$special_shlib_compile_flags' to build shared libraries" 1>&2
- if echo "$old_CC $old_CFLAGS " | egrep -e "[ ]$special_shlib_compile_flags[ ]" >/dev/null; then :
- else
- echo "$progname: add \`$special_shlib_compile_flags' to the CC or CFLAGS env variable and reconfigure" 1>&2
- can_build_shared=no
- fi
-fi
-
-echo $ac_n "checking if $compiler static flag $link_static_flag works... $ac_c" 1>&6
-$rm conftest*
-echo 'main(){return(0);}' > conftest.c
-save_LDFLAGS="$LDFLAGS"
-LDFLAGS="$LDFLAGS $link_static_flag"
-echo "$progname:958: checking if $compiler static flag $link_static_flag works" >&5
-if { (eval echo $progname:959: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then
- echo "$ac_t$link_static_flag" 1>&6
-else
- echo "$ac_t"none 1>&6
- link_static_flag=
-fi
-LDFLAGS="$save_LDFLAGS"
-$rm conftest*
-
-if test -z "$LN_S"; then
- # Check to see if we can use ln -s, or we need hard links.
- echo $ac_n "checking whether ln -s works... $ac_c" 1>&6
- $rm conftest.dat
- if ln -s X conftest.dat 2>/dev/null; then
- $rm conftest.dat
- LN_S="ln -s"
- else
- LN_S=ln
- fi
- if test "$LN_S" = "ln -s"; then
- echo "$ac_t"yes 1>&6
- else
- echo "$ac_t"no 1>&6
- fi
-fi
-
-# Make sure LD is an absolute path.
-if test -z "$LD"; then
- ac_prog=ld
- if test "$with_gcc" = yes; then
- # Check if gcc -print-prog-name=ld gives a path.
- echo $ac_n "checking for ld used by GCC... $ac_c" 1>&6
- echo "$progname:991: checking for ld used by GCC" >&5
- ac_prog=`($CC -print-prog-name=ld) 2>&5`
- case "$ac_prog" in
- # Accept absolute paths.
- [\\/]* | [A-Za-z]:[\\/]*)
- re_direlt='/[^/][^/]*/\.\./'
- # Canonicalize the path of ld
- ac_prog=`echo $ac_prog| sed 's%\\\\%/%g'`
- while echo $ac_prog | grep "$re_direlt" > /dev/null 2>&1; do
- ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"`
- done
- test -z "$LD" && LD="$ac_prog"
- ;;
- "")
- # If it fails, then pretend we are not using GCC.
- ac_prog=ld
- ;;
- *)
- # If it is relative, then search for the first ld in PATH.
- with_gnu_ld=unknown
- ;;
- esac
- elif test "$with_gnu_ld" = yes; then
- echo $ac_n "checking for GNU ld... $ac_c" 1>&6
- echo "$progname:1015: checking for GNU ld" >&5
- else
- echo $ac_n "checking for non-GNU ld""... $ac_c" 1>&6
- echo "$progname:1018: checking for non-GNU ld" >&5
- fi
-
- if test -z "$LD"; then
- IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- for ac_dir in $PATH; do
- test -z "$ac_dir" && ac_dir=.
- if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
- LD="$ac_dir/$ac_prog"
- # Check to see if the program is GNU ld. I'd rather use --version,
- # but apparently some GNU ld's only accept -v.
- # Break only if it was the GNU/non-GNU ld that we prefer.
- if "$LD" -v 2>&1 < /dev/null | egrep '(GNU|with BFD)' > /dev/null; then
- test "$with_gnu_ld" != no && break
- else
- test "$with_gnu_ld" != yes && break
- fi
- fi
- done
- IFS="$ac_save_ifs"
- fi
-
- if test -n "$LD"; then
- echo "$ac_t$LD" 1>&6
- else
- echo "$ac_t"no 1>&6
- fi
-
- if test -z "$LD"; then
- echo "$progname: error: no acceptable ld found in \$PATH" 1>&2
- exit 1
- fi
-fi
-
-# Check to see if it really is or is not GNU ld.
-echo $ac_n "checking if the linker ($LD) is GNU ld... $ac_c" 1>&6
-# I'd rather use --version here, but apparently some GNU ld's only accept -v.
-if $LD -v 2>&1 </dev/null | egrep '(GNU|with BFD)' 1>&5; then
- with_gnu_ld=yes
-else
- with_gnu_ld=no
-fi
-echo "$ac_t$with_gnu_ld" 1>&6
-
-# See if the linker supports building shared libraries.
-echo $ac_n "checking whether the linker ($LD) supports shared libraries... $ac_c" 1>&6
-
-allow_undefined_flag=
-no_undefined_flag=
-need_lib_prefix=unknown
-need_version=unknown
-# when you set need_version to no, make sure it does not cause -set_version
-# flags to be left without arguments
-archive_cmds=
-archive_expsym_cmds=
-old_archive_from_new_cmds=
-export_dynamic_flag_spec=
-whole_archive_flag_spec=
-thread_safe_flag_spec=
-hardcode_libdir_flag_spec=
-hardcode_libdir_separator=
-hardcode_direct=no
-hardcode_minus_L=no
-hardcode_shlibpath_var=unsupported
-runpath_var=
-always_export_symbols=no
-export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | sed '\''s/.* //'\'' | sort | uniq > $export_symbols'
-# include_expsyms should be a list of space-separated symbols to be *always*
-# included in the symbol list
-include_expsyms=
-# exclude_expsyms can be an egrep regular expression of symbols to exclude
-# it will be wrapped by ` (' and `)$', so one must not match beginning or
-# end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
-# as well as any symbol that contains `d'.
-exclude_expsyms="_GLOBAL_OFFSET_TABLE_"
-# Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
-# platforms (ab)use it in PIC code, but their linkers get confused if
-# the symbol is explicitly referenced. Since portable code cannot
-# rely on this symbol name, it's probably fine to never include it in
-# preloaded symbol tables.
-
-case "$host_os" in
-cygwin* | mingw*)
- # FIXME: the MSVC++ port hasn't been tested in a loooong time
- # When not using gcc, we currently assume that we are using
- # Microsoft Visual C++.
- if test "$with_gcc" != yes; then
- with_gnu_ld=no
- fi
- ;;
-
-esac
-
-ld_shlibs=yes
-if test "$with_gnu_ld" = yes; then
- # If archive_cmds runs LD, not CC, wlarc should be empty
- wlarc='${wl}'
-
- # See if GNU ld supports shared libraries.
- case "$host_os" in
- aix3* | aix4*)
- # On AIX, the GNU linker is very broken
- ld_shlibs=no
- cat <<EOF 1>&2
-
-*** Warning: the GNU linker, at least up to release 2.9.1, is reported
-*** to be unable to reliably create shared libraries on AIX.
-*** Therefore, libtool is disabling shared libraries support. If you
-*** really care for shared libraries, you may want to modify your PATH
-*** so that a non-GNU linker is found, and then restart.
-
-EOF
- ;;
-
- amigaos*)
- archive_cmds='$rm $objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $objdir/a2ixlibrary.data~$AR cru $lib $libobjs~$RANLIB $lib~(cd $objdir && a2ixlibrary -32)'
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_minus_L=yes
-
- # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
- # that the semantics of dynamic libraries on AmigaOS, at least up
- # to version 4, is to share data among multiple programs linked
- # with the same dynamic library. Since this doesn't match the
- # behavior of shared libraries on other platforms, we can use
- # them.
- ld_shlibs=no
- ;;
-
- beos*)
- if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
- allow_undefined_flag=unsupported
- # Joseph Beckenbach <jrb3@best.com> says some releases of gcc
- # support --undefined. This deserves some investigation. FIXME
- archive_cmds='$CC -nostart $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
- else
- ld_shlibs=no
- fi
- ;;
-
- cygwin* | mingw*)
- # hardcode_libdir_flag_spec is actually meaningless, as there is
- # no search path for DLLs.
- hardcode_libdir_flag_spec='-L$libdir'
- allow_undefined_flag=unsupported
- always_export_symbols=yes
-
- # Extract the symbol export list from an `--export-all' def file,
- # then regenerate the def file from the symbol export list, so that
- # the compiled dll only exports the symbol export list.
- # Be careful not to strip the DATA tag left by newer dlltools.
- export_symbols_cmds='test -f $objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $objdir/$soname-ltdll.c~
- test -f $objdir/$soname-ltdll.$objext || (cd $objdir && $CC -c $soname-ltdll.c)~
- $DLLTOOL --export-all --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --output-def $objdir/$soname-def $objdir/$soname-ltdll.$objext $libobjs $convenience~
- sed -e "1,/EXPORTS/d" -e "s/ @ [0-9]*//" -e "s/ *;.*$//" < $objdir/$soname-def > $export_symbols'
-
- # If DATA tags from a recent dlltool are present, honour them!
- archive_expsym_cmds='echo EXPORTS > $objdir/$soname-def~
- _lt_hint=1;
- cat $export_symbols | while read symbol; do
- set dummy \$symbol;
- case \$# in
- 2) echo " \$2 @ \$_lt_hint ; " >> $objdir/$soname-def;;
- *) echo " \$2 @ \$_lt_hint \$3 ; " >> $objdir/$soname-def;;
- esac;
- _lt_hint=`expr 1 + \$_lt_hint`;
- done~
- test -f $objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $objdir/$soname-ltdll.c~
- test -f $objdir/$soname-ltdll.$objext || (cd $objdir && $CC -c $soname-ltdll.c)~
- $CC -Wl,--base-file,$objdir/$soname-base -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts~
- $DLLTOOL --as=$AS --dllname $soname --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --def $objdir/$soname-def --base-file $objdir/$soname-base --output-exp $objdir/$soname-exp~
- $CC -Wl,--base-file,$objdir/$soname-base $objdir/$soname-exp -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts~
- $DLLTOOL --as=$AS --dllname $soname --exclude-symbols DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12 --def $objdir/$soname-def --base-file $objdir/$soname-base --output-exp $objdir/$soname-exp~
- $CC $objdir/$soname-exp -Wl,--dll -nostartfiles -Wl,-e,__cygwin_dll_entry@12 -o $lib $objdir/$soname-ltdll.$objext $libobjs $deplibs $linkopts'
-
- old_archive_from_new_cmds='$DLLTOOL --as=$AS --dllname $soname --def $objdir/$soname-def --output-lib $objdir/$libname.a'
- ;;
-
- netbsd*)
- if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
- archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
- else
- archive_cmds='$LD -Bshareable $libobjs $deplibs $linkopts -o $lib'
- # can we support soname and/or expsyms with a.out? -oliva
- fi
- ;;
-
- solaris* | sysv5*)
- if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
- ld_shlibs=no
- cat <<EOF 1>&2
-
-*** Warning: The releases 2.8.* of the GNU linker cannot reliably
-*** create shared libraries on Solaris systems. Therefore, libtool
-*** is disabling shared libraries support. We urge you to upgrade GNU
-*** binutils to release 2.9.1 or newer. Another option is to modify
-*** your PATH or compiler configuration so that the native linker is
-*** used, and then restart.
-
-EOF
- elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
- archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
- else
- ld_shlibs=no
- fi
- ;;
-
- sunos4*)
- archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linkopts'
- wlarc=
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- *)
- if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
- archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
- else
- ld_shlibs=no
- fi
- ;;
- esac
-
- if test "$ld_shlibs" = yes; then
- runpath_var=LD_RUN_PATH
- hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
- export_dynamic_flag_spec='${wl}--export-dynamic'
- case $host_os in
- cygwin* | mingw*)
- # dlltool doesn't understand --whole-archive et. al.
- whole_archive_flag_spec=
- ;;
- *)
- # ancient GNU ld didn't support --whole-archive et. al.
- if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
- whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
- else
- whole_archive_flag_spec=
- fi
- ;;
- esac
- fi
-else
- # PORTME fill in a description of your system's linker (not GNU ld)
- case "$host_os" in
- aix3*)
- allow_undefined_flag=unsupported
- always_export_symbols=yes
- archive_expsym_cmds='$LD -o $objdir/$soname $libobjs $deplibs $linkopts -bE:$export_symbols -T512 -H512 -bM:SRE~$AR cru $lib $objdir/$soname'
- # Note: this linker hardcodes the directories in LIBPATH if there
- # are no directories specified by -L.
- hardcode_minus_L=yes
- if test "$with_gcc" = yes && test -z "$link_static_flag"; then
- # Neither direct hardcoding nor static linking is supported with a
- # broken collect2.
- hardcode_direct=unsupported
- fi
- ;;
-
- aix4*)
- hardcode_libdir_flag_spec='${wl}-b ${wl}nolibpath ${wl}-b ${wl}libpath:$libdir:/usr/lib:/lib'
- hardcode_libdir_separator=':'
- if test "$with_gcc" = yes; then
- collect2name=`${CC} -print-prog-name=collect2`
- if test -f "$collect2name" && \
- strings "$collect2name" | grep resolve_lib_name >/dev/null
- then
- # We have reworked collect2
- hardcode_direct=yes
- else
- # We have old collect2
- hardcode_direct=unsupported
- # It fails to find uninstalled libraries when the uninstalled
- # path is not listed in the libpath. Setting hardcode_minus_L
- # to unsupported forces relinking
- hardcode_minus_L=yes
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_libdir_separator=
- fi
- shared_flag='-shared'
- else
- shared_flag='${wl}-bM:SRE'
- hardcode_direct=yes
- fi
- allow_undefined_flag=' ${wl}-berok'
- archive_cmds="\$CC $shared_flag"' -o $objdir/$soname $libobjs $deplibs $linkopts ${wl}-bexpall ${wl}-bnoentry${allow_undefined_flag}'
- archive_expsym_cmds="\$CC $shared_flag"' -o $objdir/$soname $libobjs $deplibs $linkopts ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}'
- #### local change for Sleepycat DB:
- # On AIX 4.3.2 (at least), -bexpall exports too much,
- # causing symbol conflicts. This was:
- # case "$host_os" in aix4.[01]|aix4.[01].*)
- case "$host_os" in aix4.*)
- # According to Greg Wooledge, -bexpall is only supported from AIX 4.2 on
- always_export_symbols=yes ;;
- esac
- ;;
-
- amigaos*)
- archive_cmds='$rm $objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $objdir/a2ixlibrary.data~$AR cru $lib $libobjs~$RANLIB $lib~(cd $objdir && a2ixlibrary -32)'
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_minus_L=yes
- # see comment about different semantics on the GNU ld section
- ld_shlibs=no
- ;;
-
- cygwin* | mingw*)
- # When not using gcc, we currently assume that we are using
- # Microsoft Visual C++.
- # hardcode_libdir_flag_spec is actually meaningless, as there is
- # no search path for DLLs.
- hardcode_libdir_flag_spec=' '
- allow_undefined_flag=unsupported
- # Tell ltmain to make .lib files, not .a files.
- libext=lib
- # FIXME: Setting linknames here is a bad hack.
- archive_cmds='$CC -o $lib $libobjs $linkopts `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
- # The linker will automatically build a .lib file if we build a DLL.
- old_archive_from_new_cmds='true'
- # FIXME: Should let the user specify the lib program.
- old_archive_cmds='lib /OUT:$oldlib$oldobjs'
- fix_srcfile_path='`cygpath -w $srcfile`'
- ;;
-
- freebsd1*)
- ld_shlibs=no
- ;;
-
- # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
- # support. Future versions do this automatically, but an explicit c++rt0.o
- # does not break anything, and helps significantly (at the cost of a little
- # extra space).
- freebsd2.2*)
- archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts /usr/lib/c++rt0.o'
- hardcode_libdir_flag_spec='-R$libdir'
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- # Unfortunately, older versions of FreeBSD 2 do not have this feature.
- freebsd2*)
- archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts'
- hardcode_direct=yes
- hardcode_minus_L=yes
- hardcode_shlibpath_var=no
- ;;
-
- # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
- freebsd*)
- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flag_spec='-R$libdir'
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- hpux9* | hpux10* | hpux11*)
- case "$host_os" in
- hpux9*) archive_cmds='$rm $objdir/$soname~$LD -b +b $install_libdir -o $objdir/$soname $libobjs $deplibs $linkopts~test $objdir/$soname = $lib || mv $objdir/$soname $lib' ;;
- *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linkopts' ;;
- esac
- hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
- hardcode_libdir_separator=:
- hardcode_direct=yes
- hardcode_minus_L=yes # Not in the search PATH, but as the default
- # location of the library.
- export_dynamic_flag_spec='${wl}-E'
- ;;
-
- irix5* | irix6*)
- if test "$with_gcc" = yes; then
- archive_cmds='$CC -shared $libobjs $deplibs $linkopts ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
- else
- archive_cmds='$LD -shared $libobjs $deplibs $linkopts -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
- fi
- hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
- hardcode_libdir_separator=:
- ;;
-
- netbsd*)
- if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
- archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts' # a.out
- else
- archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linkopts' # ELF
- fi
- hardcode_libdir_flag_spec='${wl}-R$libdir'
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- openbsd*)
- archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flag_spec='-R$libdir'
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- os2*)
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_minus_L=yes
- allow_undefined_flag=unsupported
- archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $objdir/$libname.def~$echo DATA >> $objdir/$libname.def~$echo " SINGLE NONSHARED" >> $objdir/$libname.def~$echo EXPORTS >> $objdir/$libname.def~emxexp $libobjs >> $objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $linkopts $objdir/$libname.def'
- old_archive_from_new_cmds='emximp -o $objdir/$libname.a $objdir/$libname.def'
- ;;
-
- osf3*)
- if test "$with_gcc" = yes; then
- allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $linkopts ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
- else
- allow_undefined_flag=' -expect_unresolved \*'
- archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linkopts -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
- fi
- hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
- hardcode_libdir_separator=:
- ;;
-
- osf4* | osf5*) # As osf3* with the addition of the -msym flag
- if test "$with_gcc" = yes; then
- allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $linkopts ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
- else
- allow_undefined_flag=' -expect_unresolved \*'
- archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linkopts -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
- fi
- hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
- hardcode_libdir_separator=:
- ;;
- rhapsody*)
- archive_cmds='$CC -bundle -undefined suppress -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flags_spec='-L$libdir'
- hardcode_direct=yes
- hardcode_shlibpath_var=no
- ;;
-
- sco3.2v5*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- hardcode_shlibpath_var=no
- runpath_var=LD_RUN_PATH
- hardcode_runpath_var=yes
- ;;
-
- solaris*)
- no_undefined_flag=' -z text'
- # $CC -shared without GNU ld will not create a library from C++
- # object files and a static libstdc++, better avoid it by now
- archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linkopts'
- archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
- $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linkopts~$rm $lib.exp'
- hardcode_libdir_flag_spec='-R$libdir'
- hardcode_shlibpath_var=no
- case "$host_os" in
- solaris2.[0-5] | solaris2.[0-5].*) ;;
- *) # Supported since Solaris 2.6 (maybe 2.5.1?)
- whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;;
- esac
- ;;
-
- sunos4*)
- archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_direct=yes
- hardcode_minus_L=yes
- hardcode_shlibpath_var=no
- ;;
-
- sysv4)
- if test "x$host_vendor" = xsequent; then
- # Use $CC to link under sequent, because it throws in some extra .o
- # files that make .init and .fini sections work.
- archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $linkopts'
- else
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- fi
- runpath_var='LD_RUN_PATH'
- hardcode_shlibpath_var=no
- hardcode_direct=no #Motorola manual says yes, but my tests say they lie
- ;;
-
- sysv4.3*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- hardcode_shlibpath_var=no
- export_dynamic_flag_spec='-Bexport'
- ;;
-
- sysv5*)
- no_undefined_flag=' -z text'
- # $CC -shared without GNU ld will not create a library from C++
- # object files and a static libstdc++, better avoid it by now
- archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linkopts'
- archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
- $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linkopts~$rm $lib.exp'
- hardcode_libdir_flag_spec=
- hardcode_shlibpath_var=no
- runpath_var='LD_RUN_PATH'
- ;;
-
- uts4*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_shlibpath_var=no
- ;;
-
- dgux*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- hardcode_libdir_flag_spec='-L$libdir'
- hardcode_shlibpath_var=no
- ;;
-
- sysv4*MP*)
- if test -d /usr/nec; then
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- hardcode_shlibpath_var=no
- runpath_var=LD_RUN_PATH
- hardcode_runpath_var=yes
- ld_shlibs=yes
- fi
- ;;
-
- sysv4.2uw2*)
- archive_cmds='$LD -G -o $lib $libobjs $deplibs $linkopts'
- hardcode_direct=yes
- hardcode_minus_L=no
- hardcode_shlibpath_var=no
- hardcode_runpath_var=yes
- runpath_var=LD_RUN_PATH
- ;;
-
- unixware7*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
- runpath_var='LD_RUN_PATH'
- hardcode_shlibpath_var=no
- ;;
-
- *)
- ld_shlibs=no
- ;;
- esac
-fi
-echo "$ac_t$ld_shlibs" 1>&6
-test "$ld_shlibs" = no && can_build_shared=no
-
-if test -z "$NM"; then
- echo $ac_n "checking for BSD-compatible nm... $ac_c" 1>&6
- case "$NM" in
- [\\/]* | [A-Za-z]:[\\/]*) ;; # Let the user override the test with a path.
- *)
- IFS="${IFS= }"; ac_save_ifs="$IFS"; IFS="${IFS}${PATH_SEPARATOR}"
- for ac_dir in $PATH /usr/ucb /usr/ccs/bin /bin; do
- test -z "$ac_dir" && ac_dir=.
- if test -f $ac_dir/nm || test -f $ac_dir/nm$ac_exeext; then
- # Check to see if the nm accepts a BSD-compat flag.
- # Adding the `sed 1q' prevents false positives on HP-UX, which says:
- # nm: unknown option "B" ignored
- if ($ac_dir/nm -B /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
- NM="$ac_dir/nm -B"
- break
- elif ($ac_dir/nm -p /dev/null 2>&1 | sed '1q'; exit 0) | egrep /dev/null >/dev/null; then
- NM="$ac_dir/nm -p"
- break
- else
- NM=${NM="$ac_dir/nm"} # keep the first match, but
- continue # so that we can try to find one that supports BSD flags
- fi
- fi
- done
- IFS="$ac_save_ifs"
- test -z "$NM" && NM=nm
- ;;
- esac
- echo "$ac_t$NM" 1>&6
-fi
-
-# Check for command to grab the raw symbol name followed by C symbol from nm.
-echo $ac_n "checking command to parse $NM output... $ac_c" 1>&6
-
-# These are sane defaults that work on at least a few old systems.
-# [They come from Ultrix. What could be older than Ultrix?!! ;)]
-
-# Character class describing NM global symbol codes.
-symcode='[BCDEGRST]'
-
-# Regexp to match symbols that can be accessed directly from C.
-sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
-
-# Transform the above into a raw symbol and a C symbol.
-symxfrm='\1 \2\3 \3'
-
-# Transform an extracted symbol line into a proper C declaration
-global_symbol_to_cdecl="sed -n -e 's/^. .* \(.*\)$/extern char \1;/p'"
-
-# Define system-specific variables.
-case "$host_os" in
-aix*)
- symcode='[BCDT]'
- ;;
-cygwin* | mingw*)
- symcode='[ABCDGISTW]'
- ;;
-hpux*) # Its linker distinguishes data from code symbols
- global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern char \1();/p' -e 's/^. .* \(.*\)$/extern char \1;/p'"
- ;;
-irix*)
- symcode='[BCDEGRST]'
- ;;
-solaris*)
- symcode='[BDT]'
- ;;
-sysv4)
- symcode='[DFNSTU]'
- ;;
-esac
-
-# If we're using GNU nm, then use its standard symbol codes.
-if $NM -V 2>&1 | egrep '(GNU|with BFD)' > /dev/null; then
- symcode='[ABCDGISTW]'
-fi
-
-# Try without a prefix undercore, then with it.
-for ac_symprfx in "" "_"; do
-
- # Write the raw and C identifiers.
- global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode\)[ ][ ]*\($ac_symprfx\)$sympat$/$symxfrm/p'"
-
- # Check to see that the pipe works correctly.
- pipe_works=no
- $rm conftest*
- cat > conftest.c <<EOF
-#ifdef __cplusplus
-extern "C" {
-#endif
-char nm_test_var;
-void nm_test_func(){}
-#ifdef __cplusplus
-}
-#endif
-main(){nm_test_var='a';nm_test_func();return(0);}
-EOF
-
- echo "$progname:1653: checking if global_symbol_pipe works" >&5
- if { (eval echo $progname:1654: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; } && test -s conftest.$objext; then
- # Now try to grab the symbols.
- nlist=conftest.nm
- if { echo "$progname:1657: eval \"$NM conftest.$objext | $global_symbol_pipe > $nlist\"" >&5; eval "$NM conftest.$objext | $global_symbol_pipe > $nlist 2>&5"; } && test -s "$nlist"; then
-
- # Try sorting and uniquifying the output.
- if sort "$nlist" | uniq > "$nlist"T; then
- mv -f "$nlist"T "$nlist"
- else
- rm -f "$nlist"T
- fi
-
- # Make sure that we snagged all the symbols we need.
- if egrep ' nm_test_var$' "$nlist" >/dev/null; then
- if egrep ' nm_test_func$' "$nlist" >/dev/null; then
- cat <<EOF > conftest.c
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-EOF
- # Now generate the symbol file.
- eval "$global_symbol_to_cdecl"' < "$nlist" >> conftest.c'
-
- cat <<EOF >> conftest.c
-#if defined (__STDC__) && __STDC__
-# define lt_ptr_t void *
-#else
-# define lt_ptr_t char *
-# define const
-#endif
-
-/* The mapping between symbol names and symbols. */
-const struct {
- const char *name;
- lt_ptr_t address;
-}
-lt_preloaded_symbols[] =
-{
-EOF
- sed 's/^. \(.*\) \(.*\)$/ {"\2", (lt_ptr_t) \&\2},/' < "$nlist" >> conftest.c
- cat <<\EOF >> conftest.c
- {0, (lt_ptr_t) 0}
-};
-
-#ifdef __cplusplus
-}
-#endif
-EOF
- # Now try linking the two files.
- mv conftest.$objext conftstm.$objext
- save_LIBS="$LIBS"
- save_CFLAGS="$CFLAGS"
- LIBS="conftstm.$objext"
- CFLAGS="$CFLAGS$no_builtin_flag"
- if { (eval echo $progname:1709: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then
- pipe_works=yes
- else
- echo "$progname: failed program was:" >&5
- cat conftest.c >&5
- fi
- LIBS="$save_LIBS"
- else
- echo "cannot find nm_test_func in $nlist" >&5
- fi
- else
- echo "cannot find nm_test_var in $nlist" >&5
- fi
- else
- echo "cannot run $global_symbol_pipe" >&5
- fi
- else
- echo "$progname: failed program was:" >&5
- cat conftest.c >&5
- fi
- $rm conftest* conftst*
-
- # Do not use the global_symbol_pipe unless it works.
- if test "$pipe_works" = yes; then
- break
- else
- global_symbol_pipe=
- fi
-done
-if test "$pipe_works" = yes; then
- echo "${ac_t}ok" 1>&6
-else
- echo "${ac_t}failed" 1>&6
-fi
-
-if test -z "$global_symbol_pipe"; then
- global_symbol_to_cdecl=
-fi
-
-# Check hardcoding attributes.
-echo $ac_n "checking how to hardcode library paths into programs... $ac_c" 1>&6
-hardcode_action=
-if test -n "$hardcode_libdir_flag_spec" || \
- test -n "$runpath_var"; then
-
- # We can hardcode non-existant directories.
- if test "$hardcode_direct" != no &&
- # If the only mechanism to avoid hardcoding is shlibpath_var, we
- # have to relink, otherwise we might link with an installed library
- # when we should be linking with a yet-to-be-installed one
- ## test "$hardcode_shlibpath_var" != no &&
- test "$hardcode_minus_L" != no; then
- # Linking always hardcodes the temporary library directory.
- hardcode_action=relink
- else
- # We can link without hardcoding, and we can hardcode nonexisting dirs.
- hardcode_action=immediate
- fi
-else
- # We cannot hardcode anything, or else we can only hardcode existing
- # directories.
- hardcode_action=unsupported
-fi
-echo "$ac_t$hardcode_action" 1>&6
-
-
-reload_flag=
-reload_cmds='$LD$reload_flag -o $output$reload_objs'
-echo $ac_n "checking for $LD option to reload object files... $ac_c" 1>&6
-# PORTME Some linkers may need a different reload flag.
-reload_flag='-r'
-echo "$ac_t$reload_flag" 1>&6
-test -n "$reload_flag" && reload_flag=" $reload_flag"
-
-# PORTME Fill in your ld.so characteristics
-library_names_spec=
-libname_spec='lib$name'
-soname_spec=
-postinstall_cmds=
-postuninstall_cmds=
-finish_cmds=
-finish_eval=
-shlibpath_var=
-shlibpath_overrides_runpath=unknown
-version_type=none
-dynamic_linker="$host_os ld.so"
-sys_lib_dlsearch_path_spec="/lib /usr/lib"
-sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
-file_magic_cmd=
-file_magic_test_file=
-deplibs_check_method='unknown'
-# Need to set the preceding variable on all platforms that support
-# interlibrary dependencies.
-# 'none' -- dependencies not supported.
-# `unknown' -- same as none, but documents that we really don't know.
-# 'pass_all' -- all dependencies passed with no checks.
-# 'test_compile' -- check by making test program.
-# 'file_magic [regex]' -- check by looking for files in library path
-# which responds to the $file_magic_cmd with a given egrep regex.
-# If you have `file' or equivalent on your system and you're not sure
-# whether `pass_all' will *always* work, you probably want this one.
-echo $ac_n "checking dynamic linker characteristics... $ac_c" 1>&6
-case "$host_os" in
-aix3*)
- version_type=linux
- library_names_spec='${libname}${release}.so$versuffix $libname.a'
- shlibpath_var=LIBPATH
-
- # AIX has no versioning support, so we append a major version to the name.
- soname_spec='${libname}${release}.so$major'
- ;;
-
-aix4*)
- version_type=linux
- # AIX has no versioning support, so currently we can not hardcode correct
- # soname into executable. Probably we can add versioning support to
- # collect2, so additional links can be useful in future.
- # We preserve .a as extension for shared libraries though AIX4.2
- # and later linker supports .so
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.a'
- shlibpath_var=LIBPATH
- deplibs_check_method=pass_all
- ;;
-
-amigaos*)
- library_names_spec='$libname.ixlibrary $libname.a'
- # Create ${libname}_ixlibrary.a entries in /sys/libs.
- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$echo "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $rm /sys/libs/${libname}_ixlibrary.a; $show "(cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a)"; (cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a) || exit 1; done'
- ;;
-
-beos*)
- library_names_spec='${libname}.so'
- dynamic_linker="$host_os ld.so"
- shlibpath_var=LIBRARY_PATH
- deplibs_check_method=pass_all
- lt_cv_dlopen="load_add_on"
- lt_cv_dlopen_libs=
- lt_cv_dlopen_self=yes
- ;;
-
-bsdi4*)
- version_type=linux
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
- shlibpath_var=LD_LIBRARY_PATH
- deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=/shlib/libc.so
- sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
- sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
- export_dynamic_flag_spec=-rdynamic
- # the default ld.so.conf also contains /usr/contrib/lib and
- # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
- # libtool to hard-code these into programs
- ;;
-
-cygwin* | mingw*)
- version_type=windows
- need_version=no
- need_lib_prefix=no
- if test "$with_gcc" = yes; then
- library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll $libname.a'
- else
- library_names_spec='${libname}`echo ${release} | sed -e 's/[.]/-/g'`${versuffix}.dll $libname.lib'
- fi
- dynamic_linker='Win32 ld.exe'
- deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
- file_magic_cmd='${OBJDUMP} -f'
- # FIXME: first we should search . and the directory the executable is in
- shlibpath_var=PATH
- lt_cv_dlopen="LoadLibrary"
- lt_cv_dlopen_libs=
- ;;
-
-freebsd1*)
- dynamic_linker=no
- ;;
-
-freebsd*)
- objformat=`test -x /usr/bin/objformat && /usr/bin/objformat || echo aout`
- version_type=freebsd-$objformat
- case "$version_type" in
- freebsd-elf*)
- deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB shared object'
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=`echo /usr/lib/libc.so*`
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
- need_version=no
- need_lib_prefix=no
- ;;
- freebsd-*)
- deplibs_check_method=unknown
- library_names_spec='${libname}${release}.so$versuffix $libname.so$versuffix'
- need_version=yes
- ;;
- esac
- shlibpath_var=LD_LIBRARY_PATH
- case "$host_os" in
- freebsd2* | freebsd3.[01]* | freebsdelf3.[01]*)
- shlibpath_overrides_runpath=yes
- ;;
- *) # from 3.2 on
- shlibpath_overrides_runpath=no
- ;;
- esac
- ;;
-
-gnu*)
- version_type=linux
- need_lib_prefix=no
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so${major} ${libname}.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- ;;
-
-hpux9* | hpux10* | hpux11*)
- # Give a soname corresponding to the major version so that dld.sl refuses to
- # link against other versions.
- dynamic_linker="$host_os dld.sl"
- version_type=sunos
- need_lib_prefix=no
- need_version=no
- shlibpath_var=SHLIB_PATH
- shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
- library_names_spec='${libname}${release}.sl$versuffix ${libname}${release}.sl$major $libname.sl'
- soname_spec='${libname}${release}.sl$major'
- # HP-UX runs *really* slowly unless shared libraries are mode 555.
- postinstall_cmds='chmod 555 $lib'
-
- #### local change for Sleepycat DB: [#1990]
- # The following 3 lines added, otherwise dependent libraries are not allowed
- # on HP. We use dependent libraries in a very straightforward way, to
- # incorporate -lnsl into libtcl.sl, and for testing only.
- deplibs_check_method='file_magic PA-RISC[1-9][0-9.]* shared library'
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=`echo /lib/libc.sl*`
- ;;
-
-irix5* | irix6*)
- version_type=irix
- need_lib_prefix=no
- need_version=no
- soname_spec='${libname}${release}.so.$major'
- library_names_spec='${libname}${release}.so.$versuffix ${libname}${release}.so.$major ${libname}${release}.so $libname.so'
- case "$host_os" in
- irix5*)
- libsuff= shlibsuff=
- # this will be overridden with pass_all, but let us keep it just in case
- deplibs_check_method="file_magic ELF 32-bit MSB dynamic lib MIPS - version 1"
- ;;
- *)
- case "$LD" in # libtool.m4 will add one of these switches to LD
- *-32|*"-32 ") libsuff= shlibsuff= libmagic=32-bit;;
- *-n32|*"-n32 ") libsuff=32 shlibsuff=N32 libmagic=N32;;
- *-64|*"-64 ") libsuff=64 shlibsuff=64 libmagic=64-bit;;
- *) libsuff= shlibsuff= libmagic=never-match;;
- esac
- ;;
- esac
- shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
- shlibpath_overrides_runpath=no
- sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
- sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=`echo /lib${libsuff}/libc.so*`
- deplibs_check_method='pass_all'
- ;;
-
-# No shared lib support for Linux oldld, aout, or coff.
-linux-gnuoldld* | linux-gnuaout* | linux-gnucoff*)
- dynamic_linker=no
- ;;
-
-# This must be Linux ELF.
-linux-gnu*)
- version_type=linux
- need_lib_prefix=no
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
- shlibpath_var=LD_LIBRARY_PATH
- shlibpath_overrides_runpath=no
- deplibs_check_method=pass_all
-
- if test -f /lib/ld.so.1; then
- dynamic_linker='GNU ld.so'
- else
- # Only the GNU ld.so supports shared libraries on MkLinux.
- case "$host_cpu" in
- powerpc*) dynamic_linker=no ;;
- *) dynamic_linker='Linux ld.so' ;;
- esac
- fi
- ;;
-
-netbsd*)
- version_type=sunos
- if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
- library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
- finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
- dynamic_linker='NetBSD (a.out) ld.so'
- else
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major ${libname}${release}.so ${libname}.so'
- soname_spec='${libname}${release}.so$major'
- dynamic_linker='NetBSD ld.elf_so'
- fi
- shlibpath_var=LD_LIBRARY_PATH
- ;;
-
-openbsd*)
- version_type=sunos
- if test "$with_gnu_ld" = yes; then
- need_lib_prefix=no
- need_version=no
- fi
- library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
- finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
- shlibpath_var=LD_LIBRARY_PATH
- ;;
-
-os2*)
- libname_spec='$name'
- need_lib_prefix=no
- library_names_spec='$libname.dll $libname.a'
- dynamic_linker='OS/2 ld.exe'
- shlibpath_var=LIBPATH
- ;;
-
-osf3* | osf4* | osf5*)
- version_type=osf
- need_version=no
- soname_spec='${libname}${release}.so'
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so $libname.so'
- shlibpath_var=LD_LIBRARY_PATH
- # this will be overridden with pass_all, but let us keep it just in case
- deplibs_check_method='file_magic COFF format alpha shared library'
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=/shlib/libc.so
- deplibs_check_method='pass_all'
- sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
- sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
- ;;
-
-rhapsody*)
- version_type=sunos
- library_names_spec='${libname}.so'
- soname_spec='${libname}.so'
- shlibpath_var=DYLD_LIBRARY_PATH
- deplibs_check_method=pass_all
- ;;
-
-sco3.2v5*)
- version_type=osf
- soname_spec='${libname}${release}.so$major'
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- shlibpath_var=LD_LIBRARY_PATH
- #### local change for Sleepycat DB:
- # The following line added, otherwise dependent libraries are not allowed
- # on SCO. We use dependent libraries in a very straightforward way.
- deplibs_check_method='pass_all'
- ;;
-
-solaris*)
- version_type=linux
- need_lib_prefix=no
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- shlibpath_overrides_runpath=yes
- # ldd complains unless libraries are executable
- postinstall_cmds='chmod +x $lib'
- deplibs_check_method="file_magic ELF [0-9][0-9]-bit [LM]SB dynamic lib"
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=/lib/libc.so
- ;;
-
-sunos4*)
- version_type=sunos
- library_names_spec='${libname}${release}.so$versuffix ${libname}.so$versuffix'
- finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
- shlibpath_var=LD_LIBRARY_PATH
- shlibpath_overrides_runpath=yes
- if test "$with_gnu_ld" = yes; then
- need_lib_prefix=no
- fi
- need_version=yes
- ;;
-
-sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
- version_type=linux
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- case "$host_vendor" in
- sequent)
- file_magic_cmd='/bin/file'
- deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
- ;;
- ncr)
- deplibs_check_method='pass_all'
- ;;
- motorola)
- need_lib_prefix=no
- need_version=no
- shlibpath_overrides_runpath=no
- sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
- deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
- file_magic_cmd=/usr/bin/file
- file_magic_test_file=`echo /usr/lib/libc.so*`
- ;;
- esac
- ;;
-
-uts4*)
- version_type=linux
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- ;;
-
-dgux*)
- version_type=linux
- need_lib_prefix=no
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- ;;
-
-sysv4*MP*)
- if test -d /usr/nec ;then
- version_type=linux
- library_names_spec='$libname.so.$versuffix $libname.so.$major $libname.so'
- soname_spec='$libname.so.$major'
- shlibpath_var=LD_LIBRARY_PATH
- fi
- ;;
-
-#### local change for Sleepycat DB:
-# Add in the QNX support from QNX.
-nto-qnx)
- version_type=linux
- need_lib_prefix=no
- need_version=no
- library_names_spec='${libname}${release}.so$versuffix ${libname}${release}.so$major $libname.so'
- soname_spec='${libname}${release}.so$major'
- shlibpath_var=LD_LIBRARY_PATH
- shlibpath_overrides_runpath=yes
- deplibs_check_method='pass_all'
- ;;
-
-*)
- dynamic_linker=no
- ;;
-esac
-echo "$ac_t$dynamic_linker" 1>&6
-test "$dynamic_linker" = no && can_build_shared=no
-
-# Report the final consequences.
-echo "checking if libtool supports shared libraries... $can_build_shared" 1>&6
-
-# Only try to build win32 dlls if AC_LIBTOOL_WIN32_DLL was used in
-# configure.in, otherwise build static only libraries.
-case "$host_os" in
-cygwin* | mingw* | os2*)
- if test x$can_build_shared = xyes; then
- test x$enable_win32_dll = xno && can_build_shared=no
- echo "checking if package supports dlls... $can_build_shared" 1>&6
- fi
-;;
-esac
-
-if test -n "$file_magic_test_file" && test -n "$file_magic_cmd"; then
- case "$deplibs_check_method" in
- "file_magic "*)
- file_magic_regex="`expr \"$deplibs_check_method\" : \"file_magic \(.*\)\"`"
- if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
- egrep "$file_magic_regex" > /dev/null; then
- :
- else
- cat <<EOF 1>&2
-
-*** Warning: the command libtool uses to detect shared libraries,
-*** $file_magic_cmd, produces output that libtool cannot recognize.
-*** The result is that libtool may fail to recognize shared libraries
-*** as such. This will affect the creation of libtool libraries that
-*** depend on shared libraries, but programs linked with such libtool
-*** libraries will work regardless of this problem. Nevertheless, you
-*** may want to report the problem to your system manager and/or to
-*** bug-libtool@gnu.org
-
-EOF
- fi ;;
- esac
-fi
-
-echo $ac_n "checking whether to build shared libraries... $ac_c" 1>&6
-test "$can_build_shared" = "no" && enable_shared=no
-
-# On AIX, shared libraries and static libraries use the same namespace, and
-# are all built from PIC.
-case "$host_os" in
-aix3*)
- test "$enable_shared" = yes && enable_static=no
- if test -n "$RANLIB"; then
- archive_cmds="$archive_cmds~\$RANLIB \$lib"
- postinstall_cmds='$RANLIB $lib'
- fi
- ;;
-
-aix4*)
- test "$enable_shared" = yes && enable_static=no
- ;;
-esac
-
-echo "$ac_t$enable_shared" 1>&6
-
-# Make sure either enable_shared or enable_static is yes.
-test "$enable_shared" = yes || enable_static=yes
-
-echo "checking whether to build static libraries... $enable_static" 1>&6
-
-if test "$hardcode_action" = relink; then
- # Fast installation is not supported
- enable_fast_install=no
-elif test "$shlibpath_overrides_runpath" = yes ||
- test "$enable_shared" = no; then
- # Fast installation is not necessary
- enable_fast_install=needless
-fi
-
-echo $ac_n "checking for objdir... $ac_c" 1>&6
-rm -f .libs 2>/dev/null
-mkdir .libs 2>/dev/null
-if test -d .libs; then
- objdir=.libs
-else
- # MS-DOS does not allow filenames that begin with a dot.
- objdir=_libs
-fi
-rmdir .libs 2>/dev/null
-echo "$ac_t$objdir" 1>&6
-
-if test "x$enable_dlopen" != xyes; then
- enable_dlopen=unknown
- enable_dlopen_self=unknown
- enable_dlopen_self_static=unknown
-else
-if eval "test \"`echo '$''{'lt_cv_dlopen'+set}'`\" != set"; then
- lt_cv_dlopen=no lt_cv_dlopen_libs=
-echo $ac_n "checking for dlopen in -ldl""... $ac_c" 1>&6
-echo "$progname:2248: checking for dlopen in -ldl" >&5
-ac_lib_var=`echo dl'_'dlopen | sed 'y%./+-%__p_%'`
-if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- ac_save_LIBS="$LIBS"
-LIBS="-ldl $LIBS"
-cat > conftest.$ac_ext <<EOF
-#line 2256 "ltconfig"
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char dlopen();
-
-int main() {
-dlopen()
-; return 0; }
-EOF
-if { (eval echo $progname:2269: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=yes"
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=no"
-fi
-rm -f conftest*
-LIBS="$ac_save_LIBS"
-
-fi
-if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
-else
- echo "$ac_t""no" 1>&6
-echo $ac_n "checking for dlopen""... $ac_c" 1>&6
-echo "$progname:2288: checking for dlopen" >&5
-if eval "test \"`echo '$''{'ac_cv_func_dlopen'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- cat > conftest.$ac_ext <<EOF
-#line 2293 "ltconfig"
-/* System header to define __stub macros and hopefully few prototypes,
- which can conflict with char dlopen(); below. */
-#include <assert.h>
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char dlopen();
-
-int main() {
-
-/* The GNU C library defines this for functions which it implements
- to always fail with ENOSYS. Some functions are actually named
- something starting with __ and the normal name is an alias. */
-#if defined (__stub_dlopen) || defined (__stub___dlopen)
-choke me
-#else
-dlopen();
-#endif
-
-; return 0; }
-EOF
-if { (eval echo $progname:2318: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
- rm -rf conftest*
- eval "ac_cv_func_dlopen=yes"
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_func_dlopen=no"
-fi
-rm -f conftest*
-fi
-if eval "test \"`echo '$ac_cv_func_'dlopen`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- lt_cv_dlopen="dlopen"
-else
- echo "$ac_t""no" 1>&6
-echo $ac_n "checking for dld_link in -ldld""... $ac_c" 1>&6
-echo "$progname:2335: checking for dld_link in -ldld" >&5
-ac_lib_var=`echo dld'_'dld_link | sed 'y%./+-%__p_%'`
-if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- ac_save_LIBS="$LIBS"
-LIBS="-ldld $LIBS"
-cat > conftest.$ac_ext <<EOF
-#line 2343 "ltconfig"
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char dld_link();
-
-int main() {
-dld_link()
-; return 0; }
-EOF
-if { (eval echo $progname:2356: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=yes"
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=no"
-fi
-rm -f conftest*
-LIBS="$ac_save_LIBS"
-
-fi
-if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
-else
- echo "$ac_t""no" 1>&6
-echo $ac_n "checking for shl_load""... $ac_c" 1>&6
-echo "$progname:2375: checking for shl_load" >&5
-if eval "test \"`echo '$''{'ac_cv_func_shl_load'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- cat > conftest.$ac_ext <<EOF
-#line 2380 "ltconfig"
-/* System header to define __stub macros and hopefully few prototypes,
- which can conflict with char shl_load(); below. */
-#include <assert.h>
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char shl_load();
-
-int main() {
-
-/* The GNU C library defines this for functions which it implements
- to always fail with ENOSYS. Some functions are actually named
- something starting with __ and the normal name is an alias. */
-#if defined (__stub_shl_load) || defined (__stub___shl_load)
-choke me
-#else
-shl_load();
-#endif
-
-; return 0; }
-EOF
-if { (eval echo $progname:2405: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
- rm -rf conftest*
- eval "ac_cv_func_shl_load=yes"
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_func_shl_load=no"
-fi
-rm -f conftest*
-fi
-
-if eval "test \"`echo '$ac_cv_func_'shl_load`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- lt_cv_dlopen="shl_load"
-else
- echo "$ac_t""no" 1>&6
-echo $ac_n "checking for shl_load in -ldld""... $ac_c" 1>&6
-echo "$progname:2423: checking for shl_load in -ldld" >&5
-ac_lib_var=`echo dld'_'shl_load | sed 'y%./+-%__p_%'`
-if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- ac_save_LIBS="$LIBS"
-LIBS="-ldld $LIBS"
-cat > conftest.$ac_ext <<EOF
-#line 2431 "ltconfig"
-#include "confdefs.h"
-/* Override any gcc2 internal prototype to avoid an error. */
-/* We use char because int might match the return type of a gcc2
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char shl_load();
-
-int main() {
-shl_load()
-; return 0; }
-EOF
-if { (eval echo $progname:2445: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest${ac_exeext}; then
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=yes"
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_lib_$ac_lib_var=no"
-fi
-rm -f conftest*
-LIBS="$ac_save_LIBS"
-
-fi
-if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then
- echo "$ac_t""yes" 1>&6
- lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
-else
- echo "$ac_t""no" 1>&6
-fi
-
-
-fi
-
-
-fi
-
-
-fi
-
-
-fi
-
-fi
-
- if test "x$lt_cv_dlopen" != xno; then
- enable_dlopen=yes
- fi
-
- case "$lt_cv_dlopen" in
- dlopen)
-for ac_hdr in dlfcn.h; do
-ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
-echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
-echo "$progname:2488: checking for $ac_hdr" >&5
-if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- cat > conftest.$ac_ext <<EOF
-#line 2493 "ltconfig"
-#include <$ac_hdr>
-int fnord = 0;
-EOF
-ac_try="$ac_compile >/dev/null 2>conftest.out"
-{ (eval echo $progname:2498: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
-ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
-if test -z "$ac_err"; then
- rm -rf conftest*
- eval "ac_cv_header_$ac_safe=yes"
-else
- echo "$ac_err" >&5
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -rf conftest*
- eval "ac_cv_header_$ac_safe=no"
-fi
-rm -f conftest*
-fi
-if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then
- echo "$ac_t""yes" 1>&6
-else
- echo "$ac_t""no" 1>&6
-fi
-done
-
- if test "x$ac_cv_header_dlfcn_h" = xyes; then
- CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
- fi
- eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
- LIBS="$lt_cv_dlopen_libs $LIBS"
-
- echo $ac_n "checking whether a program can dlopen itself""... $ac_c" 1>&6
-echo "$progname:2526: checking whether a program can dlopen itself" >&5
-if test "${lt_cv_dlopen_self+set}" = set; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- if test "$cross_compiling" = yes; then
- lt_cv_dlopen_self=cross
- else
- cat > conftest.c <<EOF
-#line 2534 "ltconfig"
-
-#if HAVE_DLFCN_H
-#include <dlfcn.h>
-#endif
-
-#include <stdio.h>
-
-#ifdef RTLD_GLOBAL
-# define LTDL_GLOBAL RTLD_GLOBAL
-#else
-# ifdef DL_GLOBAL
-# define LTDL_GLOBAL DL_GLOBAL
-# else
-# define LTDL_GLOBAL 0
-# endif
-#endif
-
-/* We may have to define LTDL_LAZY_OR_NOW in the command line if we
- find out it does not work in some platform. */
-#ifndef LTDL_LAZY_OR_NOW
-# ifdef RTLD_LAZY
-# define LTDL_LAZY_OR_NOW RTLD_LAZY
-# else
-# ifdef DL_LAZY
-# define LTDL_LAZY_OR_NOW DL_LAZY
-# else
-# ifdef RTLD_NOW
-# define LTDL_LAZY_OR_NOW RTLD_NOW
-# else
-# ifdef DL_NOW
-# define LTDL_LAZY_OR_NOW DL_NOW
-# else
-# define LTDL_LAZY_OR_NOW 0
-# endif
-# endif
-# endif
-# endif
-#endif
-
-fnord() { int i=42;}
-main() { void *self, *ptr1, *ptr2; self=dlopen(0,LTDL_GLOBAL|LTDL_LAZY_OR_NOW);
- if(self) { ptr1=dlsym(self,"fnord"); ptr2=dlsym(self,"_fnord");
- if(ptr1 || ptr2) { dlclose(self); exit(0); } } exit(1); }
-
-EOF
-if { (eval echo $progname:2580: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null
-then
- lt_cv_dlopen_self=yes
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -fr conftest*
- lt_cv_dlopen_self=no
-fi
-rm -fr conftest*
-fi
-
-fi
-
-echo "$ac_t""$lt_cv_dlopen_self" 1>&6
-
- if test "$lt_cv_dlopen_self" = yes; then
- LDFLAGS="$LDFLAGS $link_static_flag"
- echo $ac_n "checking whether a statically linked program can dlopen itself""... $ac_c" 1>&6
-echo "$progname:2599: checking whether a statically linked program can dlopen itself" >&5
-if test "${lt_cv_dlopen_self_static+set}" = set; then
- echo $ac_n "(cached) $ac_c" 1>&6
-else
- if test "$cross_compiling" = yes; then
- lt_cv_dlopen_self_static=cross
- else
- cat > conftest.c <<EOF
-#line 2607 "ltconfig"
-
-#if HAVE_DLFCN_H
-#include <dlfcn.h>
-#endif
-
-#include <stdio.h>
-
-#ifdef RTLD_GLOBAL
-# define LTDL_GLOBAL RTLD_GLOBAL
-#else
-# ifdef DL_GLOBAL
-# define LTDL_GLOBAL DL_GLOBAL
-# else
-# define LTDL_GLOBAL 0
-# endif
-#endif
-
-/* We may have to define LTDL_LAZY_OR_NOW in the command line if we
- find out it does not work in some platform. */
-#ifndef LTDL_LAZY_OR_NOW
-# ifdef RTLD_LAZY
-# define LTDL_LAZY_OR_NOW RTLD_LAZY
-# else
-# ifdef DL_LAZY
-# define LTDL_LAZY_OR_NOW DL_LAZY
-# else
-# ifdef RTLD_NOW
-# define LTDL_LAZY_OR_NOW RTLD_NOW
-# else
-# ifdef DL_NOW
-# define LTDL_LAZY_OR_NOW DL_NOW
-# else
-# define LTDL_LAZY_OR_NOW 0
-# endif
-# endif
-# endif
-# endif
-#endif
-
-fnord() { int i=42;}
-main() { void *self, *ptr1, *ptr2; self=dlopen(0,LTDL_GLOBAL|LTDL_LAZY_OR_NOW);
- if(self) { ptr1=dlsym(self,"fnord"); ptr2=dlsym(self,"_fnord");
- if(ptr1 || ptr2) { dlclose(self); exit(0); } } exit(1); }
-
-EOF
-if { (eval echo $progname:2653: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null
-then
- lt_cv_dlopen_self_static=yes
-else
- echo "$progname: failed program was:" >&5
- cat conftest.$ac_ext >&5
- rm -fr conftest*
- lt_cv_dlopen_self_static=no
-fi
-rm -fr conftest*
-fi
-
-fi
-
-echo "$ac_t""$lt_cv_dlopen_self_static" 1>&6
-fi
- ;;
- esac
-
- case "$lt_cv_dlopen_self" in
- yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
- *) enable_dlopen_self=unknown ;;
- esac
-
- case "$lt_cv_dlopen_self_static" in
- yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
- *) enable_dlopen_self_static=unknown ;;
- esac
-fi
-
-# Copy echo and quote the copy, instead of the original, because it is
-# used later.
-ltecho="$echo"
-if test "X$ltecho" = "X$CONFIG_SHELL $0 --fallback-echo"; then
- ltecho="$CONFIG_SHELL \$0 --fallback-echo"
-fi
-LTSHELL="$SHELL"
-
-LTCONFIG_VERSION="$VERSION"
-
-# Only quote variables if we're using ltmain.sh.
-case "$ltmain" in
-*.sh)
- # Now quote all the things that may contain metacharacters.
- for var in ltecho old_CC old_CFLAGS old_CPPFLAGS \
- old_LD old_LDFLAGS old_LIBS \
- old_NM old_RANLIB old_LN_S old_DLLTOOL old_OBJDUMP old_AS \
- AR CC LD LN_S NM LTSHELL LTCONFIG_VERSION \
- reload_flag reload_cmds wl \
- pic_flag link_static_flag no_builtin_flag export_dynamic_flag_spec \
- thread_safe_flag_spec whole_archive_flag_spec libname_spec \
- library_names_spec soname_spec \
- RANLIB old_archive_cmds old_archive_from_new_cmds old_postinstall_cmds \
- old_postuninstall_cmds archive_cmds archive_expsym_cmds postinstall_cmds postuninstall_cmds \
- file_magic_cmd export_symbols_cmds deplibs_check_method allow_undefined_flag no_undefined_flag \
- finish_cmds finish_eval global_symbol_pipe global_symbol_to_cdecl \
- hardcode_libdir_flag_spec hardcode_libdir_separator \
- sys_lib_search_path_spec sys_lib_dlsearch_path_spec \
- compiler_c_o compiler_o_lo need_locks exclude_expsyms include_expsyms; do
-
- case "$var" in
- reload_cmds | old_archive_cmds | old_archive_from_new_cmds | \
- old_postinstall_cmds | old_postuninstall_cmds | \
- export_symbols_cmds | archive_cmds | archive_expsym_cmds | \
- postinstall_cmds | postuninstall_cmds | \
- finish_cmds | sys_lib_search_path_spec | sys_lib_dlsearch_path_spec)
- # Double-quote double-evaled strings.
- eval "$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$double_quote_subst\" -e \"\$sed_quote_subst\" -e \"\$delay_variable_subst\"\`\\\""
- ;;
- *)
- eval "$var=\\\"\`\$echo \"X\$$var\" | \$Xsed -e \"\$sed_quote_subst\"\`\\\""
- ;;
- esac
- done
-
- case "$ltecho" in
- *'\$0 --fallback-echo"')
- ltecho=`$echo "X$ltecho" | $Xsed -e 's/\\\\\\\$0 --fallback-echo"$/$0 --fallback-echo"/'`
- ;;
- esac
-
- trap "$rm \"$ofile\"; exit 1" 1 2 15
- echo "creating $ofile"
- $rm "$ofile"
- cat <<EOF > "$ofile"
-#! $SHELL
-
-# `$echo "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
-# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
-# NOTE: Changes made to this file will be lost: look at ltconfig or ltmain.sh.
-#
-# Copyright (C) 1996-1999 Free Software Foundation, Inc.
-# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-# Sed that helps us avoid accidentally triggering echo(1) options like -n.
-Xsed="sed -e s/^X//"
-
-# The HP-UX ksh and POSIX shell print the target directory to stdout
-# if CDPATH is set.
-if test "X\${CDPATH+set}" = Xset; then CDPATH=:; export CDPATH; fi
-
-### BEGIN LIBTOOL CONFIG
-EOF
- cfgfile="$ofile"
- ;;
-
-*)
- # Double-quote the variables that need it (for aesthetics).
- for var in old_CC old_CFLAGS old_CPPFLAGS \
- old_LD old_LDFLAGS old_LIBS \
- old_NM old_RANLIB old_LN_S old_DLLTOOL old_OBJDUMP old_AS; do
- eval "$var=\\\"\$var\\\""
- done
-
- # Just create a config file.
- cfgfile="$ofile.cfg"
- trap "$rm \"$cfgfile\"; exit 1" 1 2 15
- echo "creating $cfgfile"
- $rm "$cfgfile"
- cat <<EOF > "$cfgfile"
-# `$echo "$cfgfile" | sed 's%^.*/%%'` - Libtool configuration file.
-# Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP)
-EOF
- ;;
-esac
-
-cat <<EOF >> "$cfgfile"
-# Libtool was configured as follows, on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
-#
-# CC=$old_CC CFLAGS=$old_CFLAGS CPPFLAGS=$old_CPPFLAGS \\
-# LD=$old_LD LDFLAGS=$old_LDFLAGS LIBS=$old_LIBS \\
-# NM=$old_NM RANLIB=$old_RANLIB LN_S=$old_LN_S \\
-# DLLTOOL=$old_DLLTOOL OBJDUMP=$old_OBJDUMP AS=$old_AS \\
-# $0$ltconfig_args
-#
-# Compiler and other test output produced by $progname, useful for
-# debugging $progname, is in ./config.log if it exists.
-
-# The version of $progname that generated this script.
-LTCONFIG_VERSION=$LTCONFIG_VERSION
-
-# Shell to use when invoking shell scripts.
-SHELL=$LTSHELL
-
-# Whether or not to build shared libraries.
-build_libtool_libs=$enable_shared
-
-# Whether or not to build static libraries.
-build_old_libs=$enable_static
-
-# Whether or not to optimize for fast installation.
-fast_install=$enable_fast_install
-
-# The host system.
-host_alias=$host_alias
-host=$host
-
-# An echo program that does not interpret backslashes.
-echo=$ltecho
-
-# The archiver.
-AR=$AR
-
-# The default C compiler.
-CC=$CC
-
-# The linker used to build libraries.
-LD=$LD
-
-# Whether we need hard or soft links.
-LN_S=$LN_S
-
-# A BSD-compatible nm program.
-NM=$NM
-
-# Used on cygwin: DLL creation program.
-DLLTOOL="$DLLTOOL"
-
-# Used on cygwin: object dumper.
-OBJDUMP="$OBJDUMP"
-
-# Used on cygwin: assembler.
-AS="$AS"
-
-# The name of the directory that contains temporary libtool files.
-objdir=$objdir
-
-# How to create reloadable object files.
-reload_flag=$reload_flag
-reload_cmds=$reload_cmds
-
-# How to pass a linker flag through the compiler.
-wl=$wl
-
-# Object file suffix (normally "o").
-objext="$objext"
-
-# Old archive suffix (normally "a").
-libext="$libext"
-
-# Executable file suffix (normally "").
-exeext="$exeext"
-
-# Additional compiler flags for building library objects.
-pic_flag=$pic_flag
-
-# Does compiler simultaneously support -c and -o options?
-compiler_c_o=$compiler_c_o
-
-# Can we write directly to a .lo ?
-compiler_o_lo=$compiler_o_lo
-
-# Must we lock files when doing compilation ?
-need_locks=$need_locks
-
-# Do we need the lib prefix for modules?
-need_lib_prefix=$need_lib_prefix
-
-# Do we need a version for libraries?
-need_version=$need_version
-
-# Whether dlopen is supported.
-dlopen=$enable_dlopen
-
-# Whether dlopen of programs is supported.
-dlopen_self=$enable_dlopen_self
-
-# Whether dlopen of statically linked programs is supported.
-dlopen_self_static=$enable_dlopen_self_static
-
-# Compiler flag to prevent dynamic linking.
-link_static_flag=$link_static_flag
-
-# Compiler flag to turn off builtin functions.
-no_builtin_flag=$no_builtin_flag
-
-# Compiler flag to allow reflexive dlopens.
-export_dynamic_flag_spec=$export_dynamic_flag_spec
-
-# Compiler flag to generate shared objects directly from archives.
-whole_archive_flag_spec=$whole_archive_flag_spec
-
-# Compiler flag to generate thread-safe objects.
-thread_safe_flag_spec=$thread_safe_flag_spec
-
-# Library versioning type.
-version_type=$version_type
-
-# Format of library name prefix.
-libname_spec=$libname_spec
-
-# List of archive names. First name is the real one, the rest are links.
-# The last name is the one that the linker finds with -lNAME.
-library_names_spec=$library_names_spec
-
-# The coded name of the library, if different from the real name.
-soname_spec=$soname_spec
-
-# Commands used to build and install an old-style archive.
-RANLIB=$RANLIB
-old_archive_cmds=$old_archive_cmds
-old_postinstall_cmds=$old_postinstall_cmds
-old_postuninstall_cmds=$old_postuninstall_cmds
-
-# Create an old-style archive from a shared archive.
-old_archive_from_new_cmds=$old_archive_from_new_cmds
-
-# Commands used to build and install a shared archive.
-archive_cmds=$archive_cmds
-archive_expsym_cmds=$archive_expsym_cmds
-postinstall_cmds=$postinstall_cmds
-postuninstall_cmds=$postuninstall_cmds
-
-# Method to check whether dependent libraries are shared objects.
-deplibs_check_method=$deplibs_check_method
-
-# Command to use when deplibs_check_method == file_magic.
-file_magic_cmd=$file_magic_cmd
-
-# Flag that allows shared libraries with undefined symbols to be built.
-allow_undefined_flag=$allow_undefined_flag
-
-# Flag that forces no undefined symbols.
-no_undefined_flag=$no_undefined_flag
-
-# Commands used to finish a libtool library installation in a directory.
-finish_cmds=$finish_cmds
-
-# Same as above, but a single script fragment to be evaled but not shown.
-finish_eval=$finish_eval
-
-# Take the output of nm and produce a listing of raw symbols and C names.
-global_symbol_pipe=$global_symbol_pipe
-
-# Transform the output of nm in a proper C declaration
-global_symbol_to_cdecl=$global_symbol_to_cdecl
-
-# This is the shared library runtime path variable.
-runpath_var=$runpath_var
-
-# This is the shared library path variable.
-shlibpath_var=$shlibpath_var
-
-# Is shlibpath searched before the hard-coded library search path?
-shlibpath_overrides_runpath=$shlibpath_overrides_runpath
-
-# How to hardcode a shared library path into an executable.
-hardcode_action=$hardcode_action
-
-# Flag to hardcode \$libdir into a binary during linking.
-# This must work even if \$libdir does not exist.
-hardcode_libdir_flag_spec=$hardcode_libdir_flag_spec
-
-# Whether we need a single -rpath flag with a separated argument.
-hardcode_libdir_separator=$hardcode_libdir_separator
-
-# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
-# resulting binary.
-hardcode_direct=$hardcode_direct
-
-# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
-# resulting binary.
-hardcode_minus_L=$hardcode_minus_L
-
-# Set to yes if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into
-# the resulting binary.
-hardcode_shlibpath_var=$hardcode_shlibpath_var
-
-# Compile-time system search path for libraries
-sys_lib_search_path_spec=$sys_lib_search_path_spec
-
-# Run-time system search path for libraries
-sys_lib_dlsearch_path_spec=$sys_lib_dlsearch_path_spec
-
-# Fix the shell variable \$srcfile for the compiler.
-fix_srcfile_path="$fix_srcfile_path"
-
-# Set to yes if exported symbols are required.
-always_export_symbols=$always_export_symbols
-
-# The commands to list exported symbols.
-export_symbols_cmds=$export_symbols_cmds
-
-# Symbols that should not be listed in the preloaded symbols.
-exclude_expsyms=$exclude_expsyms
-
-# Symbols that must always be exported.
-include_expsyms=$include_expsyms
-
-EOF
-
-case "$ltmain" in
-*.sh)
- echo '### END LIBTOOL CONFIG' >> "$ofile"
- echo >> "$ofile"
- case "$host_os" in
- aix3*)
- cat <<\EOF >> "$ofile"
-
-# AIX sometimes has problems with the GCC collect2 program. For some
-# reason, if we set the COLLECT_NAMES environment variable, the problems
-# vanish in a puff of smoke.
-if test "X${COLLECT_NAMES+set}" != Xset; then
- COLLECT_NAMES=
- export COLLECT_NAMES
-fi
-EOF
- ;;
- esac
-
- # Append the ltmain.sh script.
- sed '$q' "$ltmain" >> "$ofile" || (rm -f "$ofile"; exit 1)
- # We use sed instead of cat because bash on DJGPP gets confused if
- # if finds mixed CR/LF and LF-only lines. Since sed operates in
- # text mode, it properly converts lines to CR/LF. This bash problem
- # is reportedly fixed, but why not run on old versions too?
-
- chmod +x "$ofile"
- ;;
-
-*)
- # Compile the libtool program.
- echo "FIXME: would compile $ltmain"
- ;;
-esac
-
-test -n "$cache_file" || exit 0
-
-# AC_CACHE_SAVE
-trap '' 1 2 15
-cat > confcache <<\EOF
-# This file is a shell script that caches the results of configure
-# tests run on this system so they can be shared between configure
-# scripts and configure runs. It is not useful on other systems.
-# If it contains results you don't want to keep, you may remove or edit it.
-#
-# By default, configure uses ./config.cache as the cache file,
-# creating it if it does not exist already. You can give configure
-# the --cache-file=FILE option to use a different cache file; that is
-# what configure does when it calls configure scripts in
-# subdirectories, so they share the cache.
-# Giving --cache-file=/dev/null disables caching, for debugging configure.
-# config.status only pays attention to the cache file if you give it the
-# --recheck option to rerun configure.
-#
-EOF
-# The following way of writing the cache mishandles newlines in values,
-# but we know of no workaround that is simple, portable, and efficient.
-# So, don't put newlines in cache variables' values.
-# Ultrix sh set writes to stderr and can't be redirected directly,
-# and sets the high bit in the cache file unless we assign to the vars.
-(set) 2>&1 |
- case `(ac_space=' '; set | grep ac_space) 2>&1` in
- *ac_space=\ *)
- # `set' does not quote correctly, so add quotes (double-quote substitution
- # turns \\\\ into \\, and sed turns \\ into \).
- sed -n \
- -e "s/'/'\\\\''/g" \
- -e "s/^\\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\\)=\\(.*\\)/\\1=\${\\1='\\2'}/p"
- ;;
- *)
- # `set' quotes correctly as required by POSIX, so do not add quotes.
- sed -n -e 's/^\([a-zA-Z0-9_]*_cv_[a-zA-Z0-9_]*\)=\(.*\)/\1=${\1=\2}/p'
- ;;
- esac >> confcache
-if cmp -s $cache_file confcache; then
- :
-else
- if test -w $cache_file; then
- echo "updating cache $cache_file"
- cat confcache > $cache_file
- else
- echo "not updating unwritable cache $cache_file"
- fi
-fi
-rm -f confcache
-
-exit 0
-
-# Local Variables:
-# mode:shell-script
-# sh-indentation:2
-# End:
-#! /bin/bash
diff --git a/bdb/dist/ltmain.sh b/bdb/dist/ltmain.sh
index 4feadbfb759..f07d424527d 100644
--- a/bdb/dist/ltmain.sh
+++ b/bdb/dist/ltmain.sh
@@ -1,7 +1,8 @@
# ltmain.sh - Provide generalized library-building support services.
-# NOTE: Changing this file will not affect anything until you rerun ltconfig.
+# NOTE: Changing this file will not affect anything until you rerun configure.
#
-# Copyright (C) 1996-1999 Free Software Foundation, Inc.
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001
+# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
#
# This program is free software; you can redistribute it and/or modify
@@ -54,8 +55,8 @@ modename="$progname"
# Constants.
PROGRAM=ltmain.sh
PACKAGE=libtool
-VERSION=1.3.5
-TIMESTAMP=" (1.385.2.206 2000/05/27 11:12:27)"
+VERSION=1.4.2
+TIMESTAMP=" (1.922.2.53 2001/09/11 03:18:52)"
default_mode=
help="Try \`$progname --help' for more information."
@@ -83,11 +84,8 @@ if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
-if test "$LTCONFIG_VERSION" != "$VERSION"; then
- echo "$modename: ltconfig version \`$LTCONFIG_VERSION' does not match $PROGRAM version \`$VERSION'" 1>&2
- echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
- exit 1
-fi
+# Make sure IFS has a sensible default
+: ${IFS=" "}
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
echo "$modename: not configured to build any kind of library" 1>&2
@@ -113,16 +111,16 @@ do
arg="$1"
shift
- case "$arg" in
+ case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
- case "$prev" in
+ case $prev in
execute_dlfiles)
- eval "$prev=\"\$$prev \$arg\""
+ execute_dlfiles="$execute_dlfiles $arg"
;;
*)
eval "$prev=\$arg"
@@ -135,7 +133,7 @@ do
fi
# Have we seen a non-optional argument yet?
- case "$arg" in
+ case $arg in
--help)
show_help=yes
;;
@@ -146,7 +144,7 @@ do
;;
--config)
- sed -e '1,/^### BEGIN LIBTOOL CONFIG/d' -e '/^### END LIBTOOL CONFIG/,$d' $0
+ sed -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $0
exit 0
;;
@@ -207,16 +205,21 @@ if test -n "$prevopt"; then
exit 1
fi
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end. This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
- case "$nonopt" in
+ case $nonopt in
*cc | *++ | gcc* | *-gcc*)
mode=link
for arg
do
- case "$arg" in
+ case $arg in
-c)
mode=compile
break
@@ -261,12 +264,13 @@ if test -z "$show_help"; then
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
- case "$mode" in
+ case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
+ prev=
lastarg=
srcfile="$nonopt"
suppress_output=
@@ -274,8 +278,34 @@ if test -z "$show_help"; then
user_target=no
for arg
do
+ case $prev in
+ "") ;;
+ xcompiler)
+ # Aesthetically quote the previous argument.
+ prev=
+ lastarg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+
+ case $arg in
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+
+ # Add the previous argument to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
+ esac
+
# Accept any command-line options.
- case "$arg" in
+ case $arg in
-o)
if test "$user_target" != "no"; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
@@ -288,9 +318,53 @@ if test -z "$show_help"; then
build_old_libs=yes
continue
;;
+
+ -prefer-pic)
+ pic_mode=yes
+ continue
+ ;;
+
+ -prefer-non-pic)
+ pic_mode=no
+ continue
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
+ lastarg=
+ save_ifs="$IFS"; IFS=','
+ for arg in $args; do
+ IFS="$save_ifs"
+
+ # Double-quote args containing other shell metacharacters.
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ lastarg="$lastarg $arg"
+ done
+ IFS="$save_ifs"
+ lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
+
+ # Add the arguments to base_compile.
+ if test -z "$base_compile"; then
+ base_compile="$lastarg"
+ else
+ base_compile="$base_compile $lastarg"
+ fi
+ continue
+ ;;
esac
- case "$user_target" in
+ case $user_target in
next)
# The next one is the -o target name
user_target=yes
@@ -316,10 +390,10 @@ if test -z "$show_help"; then
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
# Double-quote args containing other shell metacharacters.
- # Many Bourne shells cannot handle close brackets correctly in scan
- # sets, so we specify it separately.
- case "$lastarg" in
- *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ # Many Bourne shells cannot handle close brackets correctly
+ # in scan sets, so we specify it separately.
+ case $lastarg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
lastarg="\"$lastarg\""
;;
esac
@@ -332,7 +406,7 @@ if test -z "$show_help"; then
fi
done
- case "$user_target" in
+ case $user_target in
set)
;;
no)
@@ -348,7 +422,7 @@ if test -z "$show_help"; then
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSfmso]'
- case "$libobj" in
+ case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
@@ -363,7 +437,7 @@ if test -z "$show_help"; then
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
- case "$libobj" in
+ case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
@@ -387,10 +461,21 @@ if test -z "$show_help"; then
$run $rm $removelist
trap "$run $rm $removelist; exit 1" 1 2 15
+ # On Cygwin there's no "real" PIC flag so we must build both object types
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2*)
+ pic_mode=default
+ ;;
+ esac
+ if test $pic_mode = no && test "$deplibs_check_method" != pass_all; then
+ # non-PIC code in shared libraries is not supported
+ pic_mode=default
+ fi
+
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
- output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\..*$%%'`.${objext}
+ output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit 1" 1 2 15
@@ -402,7 +487,7 @@ if test -z "$show_help"; then
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
- until ln "$0" "$lockfile" 2>/dev/null; do
+ until $run ln "$0" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
@@ -434,8 +519,13 @@ compiler."
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
- # All platforms use -DPIC, to notify preprocessed assembler code.
- command="$base_compile $srcfile $pic_flag -DPIC"
+ if test "$pic_mode" != no; then
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ else
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ fi
if test "$build_old_libs" = yes; then
lo_libobj="$libobj"
dir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
@@ -506,7 +596,8 @@ compiler."
fi
# If we have no pic_flag, then copy the object into place and finish.
- if test -z "$pic_flag" && test "$build_old_libs" = yes; then
+ if (test -z "$pic_flag" || test "$pic_mode" != default) &&
+ test "$build_old_libs" = yes; then
# Rename the .lo from within objdir to obj
if test -f $obj; then
$show $rm $obj
@@ -532,6 +623,10 @@ compiler."
# Now arrange that obj and lo_libobj become the same file
$show "(cd $xdir && $LN_S $baseobj $libobj)"
if $run eval '(cd $xdir && $LN_S $baseobj $libobj)'; then
+ # Unlock the critical section if it was locked
+ if test "$need_locks" != no; then
+ $run $rm "$lockfile"
+ fi
exit 0
else
error=$?
@@ -546,7 +641,13 @@ compiler."
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
- command="$base_compile $srcfile"
+ if test "$pic_mode" != yes; then
+ # Don't build PIC code
+ command="$base_compile $srcfile"
+ else
+ # All platforms use -DPIC, to notify preprocessed assembler code.
+ command="$base_compile $srcfile $pic_flag -DPIC"
+ fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
output_obj="$obj"
@@ -612,17 +713,17 @@ compiler."
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
- $rm "$lockfile"
+ $run $rm "$lockfile"
fi
exit 0
;;
# libtool link mode
- link)
+ link | relink)
modename="$modename: link"
- case "$host" in
- *-*-cygwin* | *-*-mingw* | *-*-os2*)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
@@ -635,179 +736,12 @@ compiler."
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
-
- # This is a source program that is used to create dlls on Windows
- # Don't remove nor modify the starting and closing comments
-# /* ltdll.c starts here */
-# #define WIN32_LEAN_AND_MEAN
-# #include <windows.h>
-# #undef WIN32_LEAN_AND_MEAN
-# #include <stdio.h>
-#
-# #ifndef __CYGWIN__
-# # ifdef __CYGWIN32__
-# # define __CYGWIN__ __CYGWIN32__
-# # endif
-# #endif
-#
-# #ifdef __cplusplus
-# extern "C" {
-# #endif
-# BOOL APIENTRY DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved);
-# #ifdef __cplusplus
-# }
-# #endif
-#
-# #ifdef __CYGWIN__
-# #include <cygwin/cygwin_dll.h>
-# DECLARE_CYGWIN_DLL( DllMain );
-# #endif
-# HINSTANCE __hDllInstance_base;
-#
-# BOOL APIENTRY
-# DllMain (HINSTANCE hInst, DWORD reason, LPVOID reserved)
-# {
-# __hDllInstance_base = hInst;
-# return TRUE;
-# }
-# /* ltdll.c ends here */
- # This is a source program that is used to create import libraries
- # on Windows for dlls which lack them. Don't remove nor modify the
- # starting and closing comments
-# /* impgen.c starts here */
-# /* Copyright (C) 1999 Free Software Foundation, Inc.
-#
-# This file is part of GNU libtool.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-# */
-#
-# #include <stdio.h> /* for printf() */
-# #include <unistd.h> /* for open(), lseek(), read() */
-# #include <fcntl.h> /* for O_RDONLY, O_BINARY */
-# #include <string.h> /* for strdup() */
-#
-# static unsigned int
-# pe_get16 (fd, offset)
-# int fd;
-# int offset;
-# {
-# unsigned char b[2];
-# lseek (fd, offset, SEEK_SET);
-# read (fd, b, 2);
-# return b[0] + (b[1]<<8);
-# }
-#
-# static unsigned int
-# pe_get32 (fd, offset)
-# int fd;
-# int offset;
-# {
-# unsigned char b[4];
-# lseek (fd, offset, SEEK_SET);
-# read (fd, b, 4);
-# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
-# }
-#
-# static unsigned int
-# pe_as32 (ptr)
-# void *ptr;
-# {
-# unsigned char *b = ptr;
-# return b[0] + (b[1]<<8) + (b[2]<<16) + (b[3]<<24);
-# }
-#
-# int
-# main (argc, argv)
-# int argc;
-# char *argv[];
-# {
-# int dll;
-# unsigned long pe_header_offset, opthdr_ofs, num_entries, i;
-# unsigned long export_rva, export_size, nsections, secptr, expptr;
-# unsigned long name_rvas, nexp;
-# unsigned char *expdata, *erva;
-# char *filename, *dll_name;
-#
-# filename = argv[1];
-#
-# dll = open(filename, O_RDONLY|O_BINARY);
-# if (!dll)
-# return 1;
-#
-# dll_name = filename;
-#
-# for (i=0; filename[i]; i++)
-# if (filename[i] == '/' || filename[i] == '\\' || filename[i] == ':')
-# dll_name = filename + i +1;
-#
-# pe_header_offset = pe_get32 (dll, 0x3c);
-# opthdr_ofs = pe_header_offset + 4 + 20;
-# num_entries = pe_get32 (dll, opthdr_ofs + 92);
-#
-# if (num_entries < 1) /* no exports */
-# return 1;
-#
-# export_rva = pe_get32 (dll, opthdr_ofs + 96);
-# export_size = pe_get32 (dll, opthdr_ofs + 100);
-# nsections = pe_get16 (dll, pe_header_offset + 4 +2);
-# secptr = (pe_header_offset + 4 + 20 +
-# pe_get16 (dll, pe_header_offset + 4 + 16));
-#
-# expptr = 0;
-# for (i = 0; i < nsections; i++)
-# {
-# char sname[8];
-# unsigned long secptr1 = secptr + 40 * i;
-# unsigned long vaddr = pe_get32 (dll, secptr1 + 12);
-# unsigned long vsize = pe_get32 (dll, secptr1 + 16);
-# unsigned long fptr = pe_get32 (dll, secptr1 + 20);
-# lseek(dll, secptr1, SEEK_SET);
-# read(dll, sname, 8);
-# if (vaddr <= export_rva && vaddr+vsize > export_rva)
-# {
-# expptr = fptr + (export_rva - vaddr);
-# if (export_rva + export_size > vaddr + vsize)
-# export_size = vsize - (export_rva - vaddr);
-# break;
-# }
-# }
-#
-# expdata = (unsigned char*)malloc(export_size);
-# lseek (dll, expptr, SEEK_SET);
-# read (dll, expdata, export_size);
-# erva = expdata - export_rva;
-#
-# nexp = pe_as32 (expdata+24);
-# name_rvas = pe_as32 (expdata+32);
-#
-# printf ("EXPORTS\n");
-# for (i = 0; i<nexp; i++)
-# {
-# unsigned long name_rva = pe_as32 (erva+name_rvas+i*4);
-# printf ("\t%s @ %ld ;\n", erva+name_rva, 1+ i);
-# }
-#
-# return 0;
-# }
-# /* impgen.c ends here */
;;
*)
allow_undefined=yes
;;
esac
+ libtool_args="$nonopt"
compile_command="$nonopt"
finalize_command="$nonopt"
@@ -818,18 +752,12 @@ compiler."
convenience=
old_convenience=
deplibs=
- linkopts=
+ old_deplibs=
+ compiler_flags=
+ linker_flags=
+ dllsearchpath=
+ lib_search_path=`pwd`
- if test -n "$shlibpath_var"; then
- # get the directories listed in $shlibpath_var
- eval lib_search_path=\`\$echo \"X \${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
- else
- lib_search_path=
- fi
- # now prepend the system-specific ones
- eval lib_search_path=\"$sys_lib_search_path_spec\$lib_search_path\"
- eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
-
avoid_version=no
dlfiles=
dlprefiles=
@@ -839,9 +767,9 @@ compiler."
export_symbols_regex=
generated=
libobjs=
- link_against_libtool_libs=
ltlibs=
module=no
+ no_install=no
objs=
prefer_static_libs=no
preload=no
@@ -858,7 +786,7 @@ compiler."
# We need to know -static, to get the right output filenames.
for arg
do
- case "$arg" in
+ case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
@@ -887,17 +815,24 @@ compiler."
while test $# -gt 0; do
arg="$1"
shift
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
+ ;;
+ *) qarg=$arg ;;
+ esac
+ libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
- case "$prev" in
+ case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
- case "$prev" in
+ case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
@@ -905,7 +840,7 @@ compiler."
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
- case "$arg" in
+ case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
@@ -934,6 +869,7 @@ compiler."
dlprefiles="$dlprefiles $arg"
fi
prev=
+ continue
;;
esac
;;
@@ -958,7 +894,7 @@ compiler."
;;
rpath | xrpath)
# We need an absolute path.
- case "$arg" in
+ case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
@@ -979,17 +915,32 @@ compiler."
prev=
continue
;;
+ xcompiler)
+ compiler_flags="$compiler_flags $qarg"
+ prev=
+ compile_command="$compile_command $qarg"
+ finalize_command="$finalize_command $qarg"
+ continue
+ ;;
+ xlinker)
+ linker_flags="$linker_flags $qarg"
+ compiler_flags="$compiler_flags $wl$qarg"
+ prev=
+ compile_command="$compile_command $wl$qarg"
+ finalize_command="$finalize_command $wl$qarg"
+ continue
+ ;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
- fi
+ fi # test -n $prev
prevarg="$arg"
- case "$arg" in
+ case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
@@ -1026,7 +977,7 @@ compiler."
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
- $echo "$modename: not more than one -exported-symbols argument allowed"
+ $echo "$modename: more than one -exported-symbols argument is not allowed"
exit 1
fi
if test "X$arg" = "X-export-symbols"; then
@@ -1037,58 +988,76 @@ compiler."
continue
;;
+ # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+ # so, if we see these flags be careful not to treat them like -L
+ -L[A-Z][A-Z]*:*)
+ case $with_gcc/$host in
+ no/*-*-irix*)
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ ;;
+ esac
+ continue
+ ;;
+
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
- case "$dir" in
+ case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
- $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
- $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
- absdir="$dir"
+ $echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
+ exit 1
fi
dir="$absdir"
;;
esac
- case " $deplibs " in
- *" $arg "*) ;;
- *) deplibs="$deplibs $arg";;
- esac
- case " $lib_search_path " in
- *" $dir "*) ;;
- *) lib_search_path="$lib_search_path $dir";;
+ case "$deplibs " in
+ *" -L$dir "*) ;;
+ *)
+ deplibs="$deplibs -L$dir"
+ lib_search_path="$lib_search_path $dir"
+ ;;
esac
- case "$host" in
- *-*-cygwin* | *-*-mingw* | *-*-os2*)
- dllsearchdir=`cd "$dir" && pwd || echo "$dir"`
- case ":$dllsearchpath:" in
- ::) dllsearchpath="$dllsearchdir";;
- *":$dllsearchdir:"*) ;;
- *) dllsearchpath="$dllsearchpath:$dllsearchdir";;
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$dir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
+ continue
;;
-l*)
- if test "$arg" = "-lc"; then
- case "$host" in
- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
- # These systems don't actually have c library (as such)
+ if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+ case $host in
+ *-*-cygwin* | *-*-pw32* | *-*-beos*)
+ # These systems don't actually have a C or math library (as such)
continue
;;
+ *-*-mingw* | *-*-os2*)
+ # These systems don't actually have a C library (as such)
+ test "X$arg" = "X-lc" && continue
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ test "X$arg" = "X-lc" && continue
+ ;;
esac
- elif test "$arg" = "-lm"; then
- case "$host" in
- *-*-cygwin* | *-*-beos*)
- # These systems don't actually have math library (as such)
+ elif test "X$arg" = "X-lc_r"; then
+ case $host in
+ *-*-openbsd*)
+ # Do not include libc_r directly, use -pthread flag.
continue
;;
esac
fi
deplibs="$deplibs $arg"
+ continue
;;
-module)
@@ -1096,6 +1065,32 @@ compiler."
continue
;;
+ #### Local change for Sleepycat's Berkeley DB [#6117]:
+ -jnimodule)
+ module=yes
+ jnimodule=yes
+ continue
+ ;;
+
+ -no-fast-install)
+ fast_install=no
+ continue
+ ;;
+
+ -no-install)
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ # The PATH hackery in wrapper scripts is required on Windows
+ # in order for the loader to find any dlls it needs.
+ $echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
+ $echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
+ fast_install=no
+ ;;
+ *) no_install=yes ;;
+ esac
+ continue
+ ;;
+
-no-undefined)
allow_undefined=no
continue
@@ -1121,7 +1116,7 @@ compiler."
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
- case "$dir" in
+ case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
@@ -1136,11 +1131,11 @@ compiler."
;;
-static)
- # If we have no pic_flag, then this is the same as -all-static.
- if test -z "$pic_flag" && test -n "$link_static_flag"; then
- compile_command="$compile_command $link_static_flag"
- finalize_command="$finalize_command $link_static_flag"
- fi
+ # The effects of -static are defined in a previous loop.
+ # We used to do the same as -all-static on platforms that
+ # didn't have a PIC flag, but the assumption that the effects
+ # would be equivalent was wrong. It would break on at least
+ # Digital Unix and AIX.
continue
;;
@@ -1154,28 +1149,71 @@ compiler."
continue
;;
+ -Wc,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Wl,*)
+ args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
+ arg=
+ save_ifs="$IFS"; IFS=','
+ for flag in $args; do
+ IFS="$save_ifs"
+ case $flag in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ flag="\"$flag\""
+ ;;
+ esac
+ arg="$arg $wl$flag"
+ compiler_flags="$compiler_flags $wl$flag"
+ linker_flags="$linker_flags $flag"
+ done
+ IFS="$save_ifs"
+ arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
+ ;;
+
+ -Xcompiler)
+ prev=xcompiler
+ continue
+ ;;
+
+ -Xlinker)
+ prev=xlinker
+ continue
+ ;;
+
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
- case "$arg" in
- *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
- *.o | *.obj | *.a | *.lib)
- # A standard object.
- objs="$objs $arg"
- ;;
-
- *.lo)
- # A library object.
+ *.lo | *.$objext)
+ # A library or standard object.
if test "$prev" = dlfiles; then
- dlfiles="$dlfiles $arg"
- if test "$build_libtool_libs" = yes && test "$dlopen" = yes; then
+ # This file was specified with -dlopen.
+ if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+ dlfiles="$dlfiles $arg"
prev=
continue
else
@@ -1188,357 +1226,890 @@ compiler."
# Preload the old-style object.
dlprefiles="$dlprefiles "`$echo "X$arg" | $Xsed -e "$lo2o"`
prev=
+ else
+ case $arg in
+ *.lo) libobjs="$libobjs $arg" ;;
+ *) objs="$objs $arg" ;;
+ esac
fi
- libobjs="$libobjs $arg"
+ ;;
+
+ *.$libext)
+ # An archive.
+ deplibs="$deplibs $arg"
+ old_deplibs="$old_deplibs $arg"
+ continue
;;
*.la)
# A libtool-controlled library.
- dlname=
- libdir=
- library_names=
- old_library=
+ if test "$prev" = dlfiles; then
+ # This library was specified with -dlopen.
+ dlfiles="$dlfiles $arg"
+ prev=
+ elif test "$prev" = dlprefiles; then
+ # The library was specified with -dlpreopen.
+ dlprefiles="$dlprefiles $arg"
+ prev=
+ else
+ deplibs="$deplibs $arg"
+ fi
+ continue
+ ;;
+
+ # Some other compiler argument.
+ *)
+ # Unknown arguments in both finalize_command and compile_command need
+ # to be aesthetically quoted because they are evaled later.
+ arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
+ case $arg in
+ *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
+ arg="\"$arg\""
+ ;;
+ esac
+ ;;
+ esac # arg
+
+ # Now actually substitute the argument into the commands.
+ if test -n "$arg"; then
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+ done # argument parsing loop
+
+ if test -n "$prev"; then
+ $echo "$modename: the \`$prevarg' option requires an argument" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ fi
+
+ if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+ eval arg=\"$export_dynamic_flag_spec\"
+ compile_command="$compile_command $arg"
+ finalize_command="$finalize_command $arg"
+ fi
+
+ # calculate the name of the file, without its directory
+ outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
+ libobjs_save="$libobjs"
+
+ if test -n "$shlibpath_var"; then
+ # get the directories listed in $shlibpath_var
+ eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ else
+ shlib_search_path=
+ fi
+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+ output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
+ if test "X$output_objdir" = "X$output"; then
+ output_objdir="$objdir"
+ else
+ output_objdir="$output_objdir/$objdir"
+ fi
+ # Create the object directory.
+ if test ! -d $output_objdir; then
+ $show "$mkdir $output_objdir"
+ $run $mkdir $output_objdir
+ status=$?
+ if test $status -ne 0 && test ! -d $output_objdir; then
+ exit $status
+ fi
+ fi
+
+ # Determine the type of output
+ case $output in
+ "")
+ $echo "$modename: you must specify an output file" 1>&2
+ $echo "$help" 1>&2
+ exit 1
+ ;;
+ *.$libext) linkmode=oldlib ;;
+ *.lo | *.$objext) linkmode=obj ;;
+ *.la) linkmode=lib ;;
+ *) linkmode=prog ;; # Anything else should be a program.
+ esac
+
+ specialdeplibs=
+ libs=
+ # Find all interdependent deplibs by searching for libraries
+ # that are linked more than once (e.g. -la -lb -la)
+ for deplib in $deplibs; do
+ case "$libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ libs="$libs $deplib"
+ done
+ deplibs=
+ newdependency_libs=
+ newlib_search_path=
+ need_relink=no # whether we're linking any uninstalled libtool libraries
+ notinst_deplibs= # not-installed libtool libraries
+ notinst_path= # paths that contain not-installed libtool libraries
+ case $linkmode in
+ lib)
+ passes="conv link"
+ for file in $dlfiles $dlprefiles; do
+ case $file in
+ *.la) ;;
+ *)
+ $echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+ ;;
+ prog)
+ compile_deplibs=
+ finalize_deplibs=
+ alldeplibs=no
+ newdlfiles=
+ newdlprefiles=
+ passes="conv scan dlopen dlpreopen link"
+ ;;
+ *) passes="conv"
+ ;;
+ esac
+ for pass in $passes; do
+ if test $linkmode = prog; then
+ # Determine which files to process
+ case $pass in
+ dlopen)
+ libs="$dlfiles"
+ save_deplibs="$deplibs" # Collect dlpreopened libraries
+ deplibs=
+ ;;
+ dlpreopen) libs="$dlprefiles" ;;
+ link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
+ esac
+ fi
+ for deplib in $libs; do
+ lib=
+ found=no
+ case $deplib in
+ -l*)
+ if test $linkmode = oldlib && test $linkmode = obj; then
+ $echo "$modename: warning: \`-l' is ignored for archives/objects: $deplib" 1>&2
+ continue
+ fi
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
+ for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ # Search the libtool library
+ lib="$searchdir/lib${name}.la"
+ if test -f "$lib"; then
+ found=yes
+ break
+ fi
+ done
+ if test "$found" != yes; then
+ # deplib doesn't seem to be a libtool library
+ if test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ deplibs="$deplib $deplibs"
+ test $linkmode = lib && newdependency_libs="$deplib $newdependency_libs"
+ fi
+ continue
+ fi
+ ;; # -l
+ -L*)
+ case $linkmode in
+ lib)
+ deplibs="$deplib $deplibs"
+ test $pass = conv && continue
+ newdependency_libs="$deplib $newdependency_libs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ ;;
+ prog)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ if test $pass = scan; then
+ deplibs="$deplib $deplibs"
+ newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ ;;
+ *)
+ $echo "$modename: warning: \`-L' is ignored for archives/objects: $deplib" 1>&2
+ ;;
+ esac # linkmode
+ continue
+ ;; # -L
+ -R*)
+ if test $pass = link; then
+ dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
+ # Make sure the xrpath contains only unique directories.
+ case "$xrpath " in
+ *" $dir "*) ;;
+ *) xrpath="$xrpath $dir" ;;
+ esac
+ fi
+ deplibs="$deplib $deplibs"
+ continue
+ ;;
+ *.la) lib="$deplib" ;;
+ *.$libext)
+ if test $pass = conv; then
+ deplibs="$deplib $deplibs"
+ continue
+ fi
+ case $linkmode in
+ lib)
+ if test "$deplibs_check_method" != pass_all; then
+ echo
+ echo "*** Warning: This library needs some functionality provided by $deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the"
+ echo "*** static library $deplib is not portable!"
+ deplibs="$deplib $deplibs"
+ fi
+ continue
+ ;;
+ prog)
+ if test $pass != link; then
+ deplibs="$deplib $deplibs"
+ else
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ fi
+ continue
+ ;;
+ esac # linkmode
+ ;; # *.$libext
+ *.lo | *.$objext)
+ if test $pass = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlopen support or we're linking statically,
+ # we need to preload.
+ newdlprefiles="$newdlprefiles $deplib"
+ compile_deplibs="$deplib $compile_deplibs"
+ finalize_deplibs="$deplib $finalize_deplibs"
+ else
+ newdlfiles="$newdlfiles $deplib"
+ fi
+ continue
+ ;;
+ %DEPLIBS%)
+ alldeplibs=yes
+ continue
+ ;;
+ esac # case $deplib
+ if test $found = yes || test -f "$lib"; then :
+ else
+ $echo "$modename: cannot find the library \`$lib'" 1>&2
+ exit 1
+ fi
# Check to see that this really is a libtool archive.
- if (sed -e '2q' $arg | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
+ if (sed -e '2q' $lib | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
- $echo "$modename: \`$arg' is not a valid libtool archive" 1>&2
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
+ ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$ladir" = "X$lib" && ladir="."
+
+ dlname=
+ dlopen=
+ dlpreopen=
+ libdir=
+ library_names=
+ old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variable installed.
installed=yes
# Read the .la file
- # If there is no directory component, then add one.
- case "$arg" in
- */* | *\\*) . $arg ;;
- *) . ./$arg ;;
+ case $lib in
+ */* | *\\*) . $lib ;;
+ *) . ./$lib ;;
esac
+ if test "$linkmode,$pass" = "lib,link" ||
+ test "$linkmode,$pass" = "prog,scan" ||
+ { test $linkmode = oldlib && test $linkmode = obj; }; then
+ # Add dl[pre]opened files of deplib
+ test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
+ test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
+ fi
+
+ if test $pass = conv; then
+ # Only check for convenience libraries
+ deplibs="$lib $deplibs"
+ if test -z "$libdir"; then
+ if test -z "$old_library"; then
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
+ exit 1
+ fi
+ # It is a libtool convenience library, so add in its objects.
+ convenience="$convenience $ladir/$objdir/$old_library"
+ old_convenience="$old_convenience $ladir/$objdir/$old_library"
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ deplibs="$deplib $deplibs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
+ elif test $linkmode != prog && test $linkmode != lib; then
+ $echo "$modename: \`$lib' is not a convenience library" 1>&2
+ exit 1
+ fi
+ continue
+ fi # $pass = conv
+
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
-
if test -z "$linklib"; then
- $echo "$modename: cannot find name of link library for \`$arg'" 1>&2
+ $echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit 1
fi
- # Find the relevant object directory and library name.
- name=`$echo "X$arg" | $Xsed -e 's%^.*/%%' -e 's/\.la$//' -e 's/^lib//'`
-
- if test "X$installed" = Xyes; then
- dir="$libdir"
- else
- dir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
- if test "X$dir" = "X$arg"; then
- dir="$objdir"
+ # This library was specified with -dlopen.
+ if test $pass = dlopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
+ if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+ # If there is no dlname, no dlopen support or we're linking
+ # statically, we need to preload.
+ dlprefiles="$dlprefiles $lib"
else
- dir="$dir/$objdir"
+ newdlfiles="$newdlfiles $lib"
fi
- fi
-
- if test -n "$dependency_libs"; then
- # Extract -R and -L from dependency_libs
- temp_deplibs=
- for deplib in $dependency_libs; do
- case "$deplib" in
- -R*) temp_xrpath=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
- case " $rpath $xrpath " in
- *" $temp_xrpath "*) ;;
- *) xrpath="$xrpath $temp_xrpath";;
- esac;;
- -L*) case "$compile_command $temp_deplibs " in
- *" $deplib "*) ;;
- *) temp_deplibs="$temp_deplibs $deplib";;
- esac
- temp_dir=`$echo "X$deplib" | $Xsed -e 's/^-L//'`
- case " $lib_search_path " in
- *" $temp_dir "*) ;;
- *) lib_search_path="$lib_search_path $temp_dir";;
- esac
- ;;
- *) temp_deplibs="$temp_deplibs $deplib";;
- esac
- done
- dependency_libs="$temp_deplibs"
- fi
-
- if test -z "$libdir"; then
- # It is a libtool convenience library, so add in its objects.
- convenience="$convenience $dir/$old_library"
- old_convenience="$old_convenience $dir/$old_library"
- deplibs="$deplibs$dependency_libs"
- compile_command="$compile_command $dir/$old_library$dependency_libs"
- finalize_command="$finalize_command $dir/$old_library$dependency_libs"
continue
- fi
+ fi # $pass = dlopen
- # This library was specified with -dlopen.
- if test "$prev" = dlfiles; then
- dlfiles="$dlfiles $arg"
- if test -z "$dlname" || test "$dlopen" != yes || test "$build_libtool_libs" = no; then
- # If there is no dlname, no dlopen support or we're linking statically,
- # we need to preload.
- prev=dlprefiles
- else
- # We should not create a dependency on this library, but we
- # may need any libraries it requires.
- compile_command="$compile_command$dependency_libs"
- finalize_command="$finalize_command$dependency_libs"
- prev=
- continue
+ # We need an absolute path.
+ case $ladir in
+ [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+ *)
+ abs_ladir=`cd "$ladir" && pwd`
+ if test -z "$abs_ladir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
+ $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
+ abs_ladir="$ladir"
fi
- fi
+ ;;
+ esac
+ laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
- # The library was specified with -dlpreopen.
- if test "$prev" = dlprefiles; then
+ # Find the relevant object directory and library name.
+ if test "X$installed" = Xyes; then
+ if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ $echo "$modename: warning: library \`$lib' was moved." 1>&2
+ dir="$ladir"
+ absdir="$abs_ladir"
+ libdir="$abs_ladir"
+ else
+ dir="$libdir"
+ absdir="$libdir"
+ fi
+ else
+ dir="$ladir/$objdir"
+ absdir="$abs_ladir/$objdir"
+ # Remove this search path later
+ notinst_path="$notinst_path $abs_ladir"
+ fi # $installed = yes
+ name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
+
+ # This library was specified with -dlpreopen.
+ if test $pass = dlpreopen; then
+ if test -z "$libdir"; then
+ $echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
+ exit 1
+ fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
- dlprefiles="$dlprefiles $dir/$old_library"
+ newdlprefiles="$newdlprefiles $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ newdlprefiles="$newdlprefiles $dir/$dlname"
else
- dlprefiles="$dlprefiles $dir/$linklib"
+ newdlprefiles="$newdlprefiles $dir/$linklib"
fi
- prev=
+ fi # $pass = dlpreopen
+
+ if test -z "$libdir"; then
+ # Link the convenience library
+ if test $linkmode = lib; then
+ deplibs="$dir/$old_library $deplibs"
+ elif test "$linkmode,$pass" = "prog,link"; then
+ compile_deplibs="$dir/$old_library $compile_deplibs"
+ finalize_deplibs="$dir/$old_library $finalize_deplibs"
+ else
+ deplibs="$lib $deplibs"
+ fi
+ continue
fi
- if test -n "$library_names" &&
- { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
- link_against_libtool_libs="$link_against_libtool_libs $arg"
- if test -n "$shlibpath_var"; then
- # Make sure the rpath contains only unique directories.
- case "$temp_rpath " in
- *" $dir "*) ;;
- *) temp_rpath="$temp_rpath $dir" ;;
- esac
+ if test $linkmode = prog && test $pass != link; then
+ newlib_search_path="$newlib_search_path $ladir"
+ deplibs="$lib $deplibs"
+
+ linkalldeplibs=no
+ if test "$link_all_deplibs" != no || test -z "$library_names" ||
+ test "$build_libtool_libs" = no; then
+ linkalldeplibs=yes
fi
- # We need an absolute path.
- case "$dir" in
- [\\/] | [A-Za-z]:[\\/]*) absdir="$dir" ;;
- *)
- absdir=`cd "$dir" && pwd`
- if test -z "$absdir"; then
- $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
- $echo "$modename: passing it literally to the linker, although it might fail" 1>&2
- absdir="$dir"
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
+ esac
+ # Need to link against all dependency_libs?
+ if test $linkalldeplibs = yes; then
+ deplibs="$deplib $deplibs"
+ else
+ # Need to hardcode shared library paths
+ # or/and link against static libraries
+ newdependency_libs="$deplib $newdependency_libs"
fi
- ;;
- esac
-
- # This is the magic to use -rpath.
- # Skip directories that are in the system default run-time
- # search path, unless they have been requested with -R.
- case " $sys_lib_dlsearch_path " in
- *" $absdir "*) ;;
- *)
- case "$compile_rpath " in
- *" $absdir "*) ;;
- *) compile_rpath="$compile_rpath $absdir"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
- ;;
- esac
+ tmp_libs="$tmp_libs $deplib"
+ done # for deplib
+ continue
+ fi # $linkmode = prog...
- case " $sys_lib_dlsearch_path " in
- *" $libdir "*) ;;
- *)
- case "$finalize_rpath " in
+ link_static=no # Whether the deplib will be linked statically
+ if test -n "$library_names" &&
+ { test "$prefer_static_libs" = no || test -z "$old_library"; }; then
+ # Link against this shared library
+
+ if test "$linkmode,$pass" = "prog,link" ||
+ { test $linkmode = lib && test $hardcode_into_libs = yes; }; then
+ # Hardcode the library path.
+ # Skip directories that are in the system default run-time
+ # search path.
+ case " $sys_lib_dlsearch_path " in
+ *" $absdir "*) ;;
+ *)
+ case "$compile_rpath " in
+ *" $absdir "*) ;;
+ *) compile_rpath="$compile_rpath $absdir"
+ esac
+ ;;
+ esac
+ case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
- *) finalize_rpath="$finalize_rpath $libdir"
+ *)
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir"
+ esac
+ ;;
esac
- ;;
- esac
+ if test $linkmode = prog; then
+ # We need to hardcode the library path
+ if test -n "$shlibpath_var"; then
+ # Make sure the rpath contains only unique directories.
+ case "$temp_rpath " in
+ *" $dir "*) ;;
+ *" $absdir "*) ;;
+ *) temp_rpath="$temp_rpath $dir" ;;
+ esac
+ fi
+ fi
+ fi # $linkmode,$pass = prog,link...
- lib_linked=yes
- case "$hardcode_action" in
- immediate | unsupported)
- if test "$hardcode_direct" = no; then
- compile_command="$compile_command $dir/$linklib"
- deplibs="$deplibs $dir/$linklib"
- case "$host" in
- *-*-cygwin* | *-*-mingw* | *-*-os2*)
- dllsearchdir=`cd "$dir" && pwd || echo "$dir"`
- if test -n "$dllsearchpath"; then
- dllsearchpath="$dllsearchpath:$dllsearchdir"
- else
- dllsearchpath="$dllsearchdir"
- fi
- ;;
- esac
- elif test "$hardcode_minus_L" = no; then
- case "$host" in
- *-*-sunos*)
- compile_shlibpath="$compile_shlibpath$dir:"
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
+ fi
+
+ if test "$installed" = no; then
+ notinst_deplibs="$notinst_deplibs $lib"
+ need_relink=yes
+ fi
+
+ if test -n "$old_archive_from_expsyms_cmds"; then
+ # figure out the soname
+ set dummy $library_names
+ realname="$2"
+ shift; shift
+ libname=`eval \\$echo \"$libname_spec\"`
+ # use dlname if we got it. it's perfectly good, no?
+ if test -n "$dlname"; then
+ soname="$dlname"
+ elif test -n "$soname_spec"; then
+ # bleh windows
+ case $host in
+ *cygwin*)
+ major=`expr $current - $age`
+ versuffix="-$major"
;;
esac
- case "$compile_command " in
- *" -L$dir "*) ;;
- *) compile_command="$compile_command -L$dir";;
- esac
- compile_command="$compile_command -l$name"
- deplibs="$deplibs -L$dir -l$name"
- elif test "$hardcode_shlibpath_var" = no; then
- case ":$compile_shlibpath:" in
- *":$dir:"*) ;;
- *) compile_shlibpath="$compile_shlibpath$dir:";;
+ eval soname=\"$soname_spec\"
+ else
+ soname="$realname"
+ fi
+
+ # Make a new name for the extract_expsyms_cmds to use
+ soroot="$soname"
+ soname=`echo $soroot | sed -e 's/^.*\///'`
+ newlib="libimp-`echo $soname | sed 's/^lib//;s/\.dll$//'`.a"
+
+ # If the library has no export list, then create one now
+ if test -f "$output_objdir/$soname-def"; then :
+ else
+ $show "extracting exported symbol list from \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$extract_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+
+ # Create $newlib
+ if test -f "$output_objdir/$newlib"; then :; else
+ $show "generating import library for \`$soname'"
+ save_ifs="$IFS"; IFS='~'
+ eval cmds=\"$old_archive_from_expsyms_cmds\"
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd" || exit $?
+ done
+ IFS="$save_ifs"
+ fi
+ # make sure the library variables are pointing to the new library
+ dir=$output_objdir
+ linklib=$newlib
+ fi # test -n $old_archive_from_expsyms_cmds
+
+ if test $linkmode = prog || test "$mode" != relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ lib_linked=yes
+ case $hardcode_action in
+ immediate | unsupported)
+ if test "$hardcode_direct" = no; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = no; then
+ case $host in
+ *-*-sunos*) add_shlibpath="$dir" ;;
+ esac
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = no; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ relink)
+ if test "$hardcode_direct" = yes; then
+ add="$dir/$linklib"
+ elif test "$hardcode_minus_L" = yes; then
+ add_dir="-L$dir"
+ add="-l$name"
+ elif test "$hardcode_shlibpath_var" = yes; then
+ add_shlibpath="$dir"
+ add="-l$name"
+ else
+ lib_linked=no
+ fi
+ ;;
+ *) lib_linked=no ;;
+ esac
+
+ if test "$lib_linked" != yes; then
+ $echo "$modename: configuration error: unsupported hardcode properties"
+ exit 1
+ fi
+
+ if test -n "$add_shlibpath"; then
+ case :$compile_shlibpath: in
+ *":$add_shlibpath:"*) ;;
+ *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
- compile_command="$compile_command -l$name"
- deplibs="$deplibs -l$name"
+ fi
+ if test $linkmode = prog; then
+ test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+ test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
- lib_linked=no
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ if test "$hardcode_direct" != yes && \
+ test "$hardcode_minus_L" != yes && \
+ test "$hardcode_shlibpath_var" = yes; then
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ esac
+ fi
fi
- ;;
+ fi
- relink)
+ if test $linkmode = prog || test "$mode" = relink; then
+ add_shlibpath=
+ add_dir=
+ add=
+ # Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
- compile_command="$compile_command $absdir/$linklib"
- deplibs="$deplibs $absdir/$linklib"
+ add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
- case "$compile_command " in
- *" -L$absdir "*) ;;
- *) compile_command="$compile_command -L$absdir";;
- esac
- compile_command="$compile_command -l$name"
- deplibs="$deplibs -L$absdir -l$name"
+ add_dir="-L$libdir"
+ add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
- case ":$compile_shlibpath:" in
- *":$absdir:"*) ;;
- *) compile_shlibpath="$compile_shlibpath$absdir:";;
+ case :$finalize_shlibpath: in
+ *":$libdir:"*) ;;
+ *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
- compile_command="$compile_command -l$name"
- deplibs="$deplibs -l$name"
+ add="-l$name"
else
- lib_linked=no
+ # We cannot seem to hardcode it, guess we'll fake it.
+ add_dir="-L$libdir"
+ add="-l$name"
fi
- ;;
-
- *)
- lib_linked=no
- ;;
- esac
-
- if test "$lib_linked" != yes; then
- $echo "$modename: configuration error: unsupported hardcode properties"
- exit 1
- fi
- # Finalize command for both is simple: just hardcode it.
- if test "$hardcode_direct" = yes; then
- finalize_command="$finalize_command $libdir/$linklib"
- elif test "$hardcode_minus_L" = yes; then
- case "$finalize_command " in
- *" -L$libdir "*) ;;
- *) finalize_command="$finalize_command -L$libdir";;
- esac
- finalize_command="$finalize_command -l$name"
- elif test "$hardcode_shlibpath_var" = yes; then
- case ":$finalize_shlibpath:" in
- *":$libdir:"*) ;;
- *) finalize_shlibpath="$finalize_shlibpath$libdir:";;
- esac
- finalize_command="$finalize_command -l$name"
- else
- # We cannot seem to hardcode it, guess we'll fake it.
- case "$finalize_command " in
- *" -L$dir "*) ;;
- *) finalize_command="$finalize_command -L$libdir";;
- esac
- finalize_command="$finalize_command -l$name"
+ if test $linkmode = prog; then
+ test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+ test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+ else
+ test -n "$add_dir" && deplibs="$add_dir $deplibs"
+ test -n "$add" && deplibs="$add $deplibs"
+ fi
fi
- else
- # Transform directly to old archives if we don't build new libraries.
- if test -n "$pic_flag" && test -z "$old_library"; then
- $echo "$modename: cannot find static library for \`$arg'" 1>&2
- exit 1
+ elif test $linkmode = prog; then
+ if test "$alldeplibs" = yes &&
+ { test "$deplibs_check_method" = pass_all ||
+ { test "$build_libtool_libs" = yes &&
+ test -n "$library_names"; }; }; then
+ # We only need to search for static libraries
+ continue
fi
+ # Try to link the static library
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
- compile_command="$compile_command $dir/$linklib"
- finalize_command="$finalize_command $dir/$linklib"
+ compile_deplibs="$dir/$linklib $compile_deplibs"
+ finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
- case "$compile_command " in
- *" -L$dir "*) ;;
- *) compile_command="$compile_command -L$dir";;
- esac
- compile_command="$compile_command -l$name"
- case "$finalize_command " in
- *" -L$dir "*) ;;
- *) finalize_command="$finalize_command -L$dir";;
- esac
- finalize_command="$finalize_command -l$name"
+ compile_deplibs="-l$name -L$dir $compile_deplibs"
+ finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+ fi
+ elif test "$build_libtool_libs" = yes; then
+ # Not a shared library
+ if test "$deplibs_check_method" != pass_all; then
+ # We're trying link a shared library against a static one
+ # but the system doesn't support it.
+
+ # Just print a warning and add the library to dependency_libs so
+ # that the program can be linked against the static library.
+ echo
+ echo "*** Warning: This library needs some functionality provided by $lib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ if test "$module" = yes; then
+ echo "*** Therefore, libtool will create a static module, that should work "
+ echo "*** as long as the dlopening application is linked with the -dlopen flag."
+ if test -z "$global_symbol_pipe"; then
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
+ fi
+ if test "$build_old_libs" = no; then
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
+ else
+ convenience="$convenience $dir/$old_library"
+ old_convenience="$old_convenience $dir/$old_library"
+ deplibs="$dir/$old_library $deplibs"
+ link_static=yes
+ fi
+ fi # link shared/static library?
+
+ if test $linkmode = lib; then
+ if test -n "$dependency_libs" &&
+ { test $hardcode_into_libs != yes || test $build_old_libs = yes ||
+ test $link_static = yes; }; then
+ # Extract -R from dependency_libs
+ temp_deplibs=
+ for libdir in $dependency_libs; do
+ case $libdir in
+ -R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
+ case " $xrpath " in
+ *" $temp_xrpath "*) ;;
+ *) xrpath="$xrpath $temp_xrpath";;
+ esac;;
+ *) temp_deplibs="$temp_deplibs $libdir";;
+ esac
+ done
+ dependency_libs="$temp_deplibs"
fi
- fi
-
- # Add in any libraries that this one depends upon.
- compile_command="$compile_command$dependency_libs"
- finalize_command="$finalize_command$dependency_libs"
- continue
- ;;
- # Some other compiler argument.
- *)
- # Unknown arguments in both finalize_command and compile_command need
- # to be aesthetically quoted because they are evaled later.
- arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
- case "$arg" in
- *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
- arg="\"$arg\""
- ;;
- esac
- ;;
- esac
+ newlib_search_path="$newlib_search_path $absdir"
+ # Link against this library
+ test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+ # ... and its dependency_libs
+ tmp_libs=
+ for deplib in $dependency_libs; do
+ newdependency_libs="$deplib $newdependency_libs"
+ case "$tmp_libs " in
+ *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ esac
+ tmp_libs="$tmp_libs $deplib"
+ done
- # Now actually substitute the argument into the commands.
- if test -n "$arg"; then
- compile_command="$compile_command $arg"
- finalize_command="$finalize_command $arg"
+ if test $link_all_deplibs != no; then
+ # Add the search paths of all dependency libraries
+ for deplib in $dependency_libs; do
+ case $deplib in
+ -L*) path="$deplib" ;;
+ *.la)
+ dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
+ test "X$dir" = "X$deplib" && dir="."
+ # We need an absolute path.
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+ *)
+ absdir=`cd "$dir" && pwd`
+ if test -z "$absdir"; then
+ $echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
+ absdir="$dir"
+ fi
+ ;;
+ esac
+ if grep "^installed=no" $deplib > /dev/null; then
+ path="-L$absdir/$objdir"
+ else
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ if test "$absdir" != "$libdir"; then
+ $echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
+ fi
+ path="-L$absdir"
+ fi
+ ;;
+ *) continue ;;
+ esac
+ case " $deplibs " in
+ *" $path "*) ;;
+ *) deplibs="$deplibs $path" ;;
+ esac
+ done
+ fi # link_all_deplibs != no
+ fi # linkmode = lib
+ done # for deplib in $libs
+ if test $pass = dlpreopen; then
+ # Link the dlpreopened libraries before other libraries
+ for deplib in $save_deplibs; do
+ deplibs="$deplib $deplibs"
+ done
fi
- done
-
- if test -n "$prev"; then
- $echo "$modename: the \`$prevarg' option requires an argument" 1>&2
- $echo "$help" 1>&2
- exit 1
- fi
-
- if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
- eval arg=\"$export_dynamic_flag_spec\"
- compile_command="$compile_command $arg"
- finalize_command="$finalize_command $arg"
- fi
-
- oldlibs=
- # calculate the name of the file, without its directory
- outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
- libobjs_save="$libobjs"
-
- case "$output" in
- "")
- $echo "$modename: you must specify an output file" 1>&2
- $echo "$help" 1>&2
- exit 1
- ;;
+ if test $pass != dlopen; then
+ test $pass != scan && dependency_libs="$newdependency_libs"
+ if test $pass != conv; then
+ # Make sure lib_search_path contains only unique directories.
+ lib_search_path=
+ for dir in $newlib_search_path; do
+ case "$lib_search_path " in
+ *" $dir "*) ;;
+ *) lib_search_path="$lib_search_path $dir" ;;
+ esac
+ done
+ newlib_search_path=
+ fi
- *.a | *.lib)
- if test -n "$link_against_libtool_libs"; then
- $echo "$modename: error: cannot link libtool libraries into archives" 1>&2
- exit 1
+ if test "$linkmode,$pass" != "prog,link"; then
+ vars="deplibs"
+ else
+ vars="compile_deplibs finalize_deplibs"
+ fi
+ for var in $vars dependency_libs; do
+ # Add libraries to $var in reverse order
+ eval tmp_libs=\"\$$var\"
+ new_libs=
+ for deplib in $tmp_libs; do
+ case $deplib in
+ -L*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $specialdeplibs " in
+ *" $deplib "*) new_libs="$deplib $new_libs" ;;
+ *)
+ case " $new_libs " in
+ *" $deplib "*) ;;
+ *) new_libs="$deplib $new_libs" ;;
+ esac
+ ;;
+ esac
+ ;;
+ esac
+ done
+ tmp_libs=
+ for deplib in $new_libs; do
+ case $deplib in
+ -L*)
+ case " $tmp_libs " in
+ *" $deplib "*) ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ ;;
+ *) tmp_libs="$tmp_libs $deplib" ;;
+ esac
+ done
+ eval $var=\"$tmp_libs\"
+ done # for var
fi
-
- if test -n "$deplibs"; then
- $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
+ if test "$pass" = "conv" &&
+ { test "$linkmode" = "lib" || test "$linkmode" = "prog"; }; then
+ libs="$deplibs" # reset libs
+ deplibs=
fi
+ done # for pass
+ if test $linkmode = prog; then
+ dlfiles="$newdlfiles"
+ dlprefiles="$newdlprefiles"
+ fi
+ case $linkmode in
+ oldlib)
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
@@ -1566,11 +2137,12 @@ compiler."
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
+ objs="$objs$old_deplibs"
;;
- *.la)
+ lib)
# Make sure we only generate libraries of the form `libNAME.la'.
- case "$outputname" in
+ case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval libname=\"$libname_spec\"
@@ -1591,26 +2163,20 @@ compiler."
;;
esac
- output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
- if test "X$output_objdir" = "X$output"; then
- output_objdir="$objdir"
- else
- output_objdir="$output_objdir/$objdir"
- fi
-
if test -n "$objs"; then
- $echo "$modename: cannot build libtool library \`$output' from non-libtool objects:$objs" 2>&1
- exit 1
- fi
-
- # How the heck are we supposed to write a wrapper for a shared library?
- if test -n "$link_against_libtool_libs"; then
- $echo "$modename: error: cannot link shared libraries into libtool libraries" 1>&2
- exit 1
+ if test "$deplibs_check_method" != pass_all; then
+ $echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
+ exit 1
+ else
+ echo
+ echo "*** Warning: Linking the shared library $output against the non-libtool"
+ echo "*** objects $objs is not portable!"
+ libobjs="$libobjs $objs"
+ fi
fi
- if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
- $echo "$modename: warning: \`-dlopen' is ignored for libtool libraries" 1>&2
+ if test "$dlself" != no; then
+ $echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
@@ -1628,7 +2194,6 @@ compiler."
build_libtool_libs=convenience
build_old_libs=yes
fi
- dependency_libs="$deplibs"
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
@@ -1640,7 +2205,7 @@ compiler."
else
# Parse the version information argument.
- IFS="${IFS= }"; save_ifs="$IFS"; IFS=':'
+ save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
@@ -1655,8 +2220,8 @@ compiler."
age="$4"
# Check that each of the things are valid numbers.
- case "$current" in
- 0 | [1-9] | [1-9][0-9]*) ;;
+ case $current in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
@@ -1664,8 +2229,8 @@ compiler."
;;
esac
- case "$revision" in
- 0 | [1-9] | [1-9][0-9]*) ;;
+ case $revision in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
@@ -1673,8 +2238,8 @@ compiler."
;;
esac
- case "$age" in
- 0 | [1-9] | [1-9][0-9]*) ;;
+ case $age in
+ 0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
@@ -1692,12 +2257,31 @@ compiler."
major=
versuffix=
verstring=
- case "$version_type" in
+ case $version_type in
none) ;;
+ darwin)
+ # Like Linux, but with the current version available in
+ # verstring for coding it into the library header
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
+ # Darwin ld doesn't like 0 for these options...
+ minor_current=`expr $current + 1`
+ verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+ ;;
+
+ freebsd-aout)
+ major=".$current"
+ versuffix=".$current.$revision";
+ ;;
+
+ freebsd-elf)
+ major=".$current"
+ versuffix=".$current";
+ ;;
+
irix)
major=`expr $current - $age + 1`
- versuffix="$major.$revision"
verstring="sgi$major.$revision"
# Add in all the interfaces that we are compatible with.
@@ -1707,6 +2291,10 @@ compiler."
loop=`expr $loop - 1`
verstring="sgi$major.$iface:$verstring"
done
+
+ # Before this point, $major must not contain `.'.
+ major=.$major
+ versuffix="$major.$revision"
;;
linux)
@@ -1736,21 +2324,11 @@ compiler."
versuffix=".$current.$revision"
;;
- freebsd-aout)
- major=".$current"
- versuffix=".$current.$revision";
- ;;
-
- freebsd-elf)
- major=".$current"
- versuffix=".$current";
- ;;
-
windows)
- # Like Linux, but with '-' rather than '.', since we only
- # want one extension on Windows 95.
+ # Use '-' rather than '.', since we only want one
+ # extension on DOS 8.3 filesystems.
major=`expr $current - $age`
- versuffix="-$major-$age-$revision"
+ versuffix="-$major"
;;
*)
@@ -1764,6 +2342,16 @@ compiler."
if test -z "$vinfo" && test -n "$release"; then
major=
verstring="0.0"
+ case $version_type in
+ darwin)
+ # we can't check for "0.0" in archive_cmds due to quoting
+ # problems, so we reset it completely
+ verstring=""
+ ;;
+ *)
+ verstring="0.0"
+ ;;
+ esac
if test "$need_version" = no; then
versuffix=
else
@@ -1777,7 +2365,7 @@ compiler."
versuffix=
verstring=""
fi
-
+
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
@@ -1789,38 +2377,12 @@ compiler."
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
-
- dependency_libs="$deplibs"
- case "$host" in
- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
- # these systems don't actually have a c library (as such)!
- ;;
-
- #### local change for Sleepycat DB: [#2380]
- # The following case is added, since the linker's -pthread
- # option implicitly controls use of -lc or -lc_r.
- *freebsd*)
- # defer to whether the user wants -lc, or -lc_r
- ;;
-
- *)
- # Add libc to deplibs on all other systems.
- deplibs="$deplibs -lc"
- ;;
- esac
fi
- # Create the output directory, or remove our outputs if we need to.
- if test -d $output_objdir; then
+ if test "$mode" != relink; then
+ # Remove our outputs.
$show "${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*"
$run ${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*
- else
- $show "$mkdir $output_objdir"
- $run $mkdir $output_objdir
- status=$?
- if test $status -ne 0 && test ! -d $output_objdir; then
- exit $status
- fi
fi
# Now set the variables for building old libraries.
@@ -1831,7 +2393,80 @@ compiler."
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
+ # Eliminate all temporary directories.
+ for path in $notinst_path; do
+ lib_search_path=`echo "$lib_search_path " | sed -e 's% $path % %g'`
+ deplibs=`echo "$deplibs " | sed -e 's% -L$path % %g'`
+ dependency_libs=`echo "$dependency_libs " | sed -e 's% -L$path % %g'`
+ done
+
+ if test -n "$xrpath"; then
+ # If the user specified any rpath flags, then add them.
+ temp_xrpath=
+ for libdir in $xrpath; do
+ temp_xrpath="$temp_xrpath -R$libdir"
+ case "$finalize_rpath " in
+ *" $libdir "*) ;;
+ *) finalize_rpath="$finalize_rpath $libdir" ;;
+ esac
+ done
+ if test $hardcode_into_libs != yes || test $build_old_libs = yes; then
+ dependency_libs="$temp_xrpath $dependency_libs"
+ fi
+ fi
+
+ # Make sure dlfiles contains only unique files that won't be dlpreopened
+ old_dlfiles="$dlfiles"
+ dlfiles=
+ for lib in $old_dlfiles; do
+ case " $dlprefiles $dlfiles " in
+ *" $lib "*) ;;
+ *) dlfiles="$dlfiles $lib" ;;
+ esac
+ done
+
+ # Make sure dlprefiles contains only unique files
+ old_dlprefiles="$dlprefiles"
+ dlprefiles=
+ for lib in $old_dlprefiles; do
+ case "$dlprefiles " in
+ *" $lib "*) ;;
+ *) dlprefiles="$dlprefiles $lib" ;;
+ esac
+ done
+
if test "$build_libtool_libs" = yes; then
+ if test -n "$rpath"; then
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
+ # these systems don't actually have a c library (as such)!
+ ;;
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # Rhapsody C library is in the System framework
+ deplibs="$deplibs -framework System"
+ ;;
+ *-*-netbsd*)
+ # Don't link with libc until the a.out ld.so is fixed.
+ ;;
+ *-*-openbsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ #### Local change for Sleepycat's Berkeley DB [#2380]:
+ # FreeBSD, like OpenBSD, uses libc/libc_r and should not
+ # link against libc/c_r explicitly; the -pthread linker flag
+ # implicitly controls use of -lc and -lc_r.
+ *-*-freebsd*)
+ # Do not include libc due to us having libc/libc_r.
+ ;;
+ *)
+ # Add libc to deplibs on all other systems if necessary.
+ if test $build_libtool_need_lc = "yes"; then
+ deplibs="$deplibs -lc"
+ fi
+ ;;
+ esac
+ fi
+
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
@@ -1846,7 +2481,7 @@ compiler."
major=""
newdeplibs=
droppeddeps=no
- case "$deplibs_check_method" in
+ case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
@@ -1871,7 +2506,7 @@ EOF
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
- if test "$name" != "" ; then
+ if test -n "$name" && test "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
@@ -1896,7 +2531,7 @@ EOF
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
- if test "$name" != "" ; then
+ if test -n "$name" && test "$name" != "0"; then
$rm conftest
$CC -o conftest conftest.c $i
# Did it work?
@@ -1932,19 +2567,19 @@ EOF
;;
file_magic*)
set dummy $deplibs_check_method
- file_magic_regex="`expr \"$deplibs_check_method\" : \"$2 \(.*\)\"`"
+ file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
- if test "$name" != "" ; then
+ if test -n "$name" && test "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
- for i in $lib_search_path; do
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
- continue
+ continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
@@ -1954,7 +2589,7 @@ EOF
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | sed 's/.* -> //'`
- case "$potliblink" in
+ case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
@@ -1982,6 +2617,40 @@ EOF
fi
done # Gone through all deplibs.
;;
+ match_pattern*)
+ set dummy $deplibs_check_method
+ match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
+ for a_deplib in $deplibs; do
+ name="`expr $a_deplib : '-l\(.*\)'`"
+ # If $name is empty we are operating on a -L argument.
+ if test -n "$name" && test "$name" != "0"; then
+ libname=`eval \\$echo \"$libname_spec\"`
+ for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+ potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ for potent_lib in $potential_libs; do
+ if eval echo \"$potent_lib\" 2>/dev/null \
+ | sed 10q \
+ | egrep "$match_pattern_regex" > /dev/null; then
+ newdeplibs="$newdeplibs $a_deplib"
+ a_deplib=""
+ break 2
+ fi
+ done
+ done
+ if test -n "$a_deplib" ; then
+ droppeddeps=yes
+ echo
+ echo "*** Warning: This library needs some functionality provided by $a_deplib."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
+ fi
+ else
+ # Add a -L argument.
+ newdeplibs="$newdeplibs $a_deplib"
+ fi
+ done # Gone through all deplibs.
+ ;;
none | unknown | *)
newdeplibs=""
if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
@@ -2004,6 +2673,13 @@ EOF
libname=$libname_save
name=$name_save
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
echo
@@ -2029,6 +2705,21 @@ EOF
echo "*** The inter-library dependencies that have been dropped here will be"
echo "*** automatically added whenever a program is linked with this library"
echo "*** or is declared to -dlopen it."
+
+ if test $allow_undefined = no; then
+ echo
+ echo "*** Since this library must not contain undefined symbols,"
+ echo "*** because either the platform does not support them or"
+ echo "*** it was explicitly requested with -no-undefined,"
+ echo "*** libtool will only create a static version of it."
+ if test "$build_old_libs" = no; then
+ oldlibs="$output_objdir/$libname.$libext"
+ build_libtool_libs=module
+ build_old_libs=yes
+ else
+ build_libtool_libs=no
+ fi
+ fi
fi
fi
# Done checking deplibs!
@@ -2039,9 +2730,64 @@ EOF
library_names=
old_library=
dlname=
-
+
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
+ if test $hardcode_into_libs = yes; then
+ # Hardcode the library paths
+ hardcode_libdirs=
+ dep_rpath=
+ rpath="$finalize_rpath"
+ test "$mode" != relink && rpath="$compile_rpath$rpath"
+ for libdir in $rpath; do
+ if test -n "$hardcode_libdir_flag_spec"; then
+ if test -n "$hardcode_libdir_separator"; then
+ if test -z "$hardcode_libdirs"; then
+ hardcode_libdirs="$libdir"
+ else
+ # Just accumulate the unique libdirs.
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+ *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+ ;;
+ *)
+ hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ ;;
+ esac
+ fi
+ else
+ eval flag=\"$hardcode_libdir_flag_spec\"
+ dep_rpath="$dep_rpath $flag"
+ fi
+ elif test -n "$runpath_var"; then
+ case "$perm_rpath " in
+ *" $libdir "*) ;;
+ *) perm_rpath="$perm_rpath $libdir" ;;
+ esac
+ fi
+ done
+ # Substitute the hardcoded libdirs into the rpath.
+ if test -n "$hardcode_libdir_separator" &&
+ test -n "$hardcode_libdirs"; then
+ libdir="$hardcode_libdirs"
+ eval dep_rpath=\"$hardcode_libdir_flag_spec\"
+ fi
+ if test -n "$runpath_var" && test -n "$perm_rpath"; then
+ # We should set the runpath_var.
+ rpath=
+ for dir in $perm_rpath; do
+ rpath="$rpath$dir:"
+ done
+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+ fi
+ test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+ fi
+
+ shlibpath="$finalize_shlibpath"
+ test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ if test -n "$shlibpath"; then
+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+ fi
+
# Get the real and link names of the library.
eval library_names=\"$library_names_spec\"
set dummy $library_names
@@ -2053,6 +2799,7 @@ EOF
else
soname="$realname"
fi
+ test -z "$dlname" && dlname=$soname
lib="$output_objdir/$realname"
for link
@@ -2087,7 +2834,7 @@ EOF
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
eval cmds=\"$export_symbols_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -2124,7 +2871,7 @@ EOF
for xlib in $convenience; do
# Extract the objects.
- case "$xlib" in
+ case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
@@ -2149,7 +2896,12 @@ EOF
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
- linkopts="$linkopts $flag"
+ linker_flags="$linker_flags $flag"
+ fi
+
+ # Make a backup of the uninstalled library when relinking
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
@@ -2158,7 +2910,7 @@ EOF
else
eval cmds=\"$archive_cmds\"
fi
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -2166,6 +2918,12 @@ EOF
done
IFS="$save_ifs"
+ # Restore the uninstalled library and exit
+ if test "$mode" = relink; then
+ $run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
+ exit 0
+ fi
+
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
@@ -2182,12 +2940,7 @@ EOF
fi
;;
- *.lo | *.o | *.obj)
- if test -n "$link_against_libtool_libs"; then
- $echo "$modename: error: cannot link libtool libraries into objects" 1>&2
- exit 1
- fi
-
+ obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
@@ -2212,9 +2965,9 @@ EOF
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
- case "$output" in
+ case $output in
*.lo)
- if test -n "$objs"; then
+ if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit 1
fi
@@ -2238,7 +2991,7 @@ EOF
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
- wl=
+ wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
@@ -2257,7 +3010,7 @@ EOF
for xlib in $convenience; do
# Extract the objects.
- case "$xlib" in
+ case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
@@ -2281,11 +3034,11 @@ EOF
fi
# Create the old-style object.
- reload_objs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs"
+ reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
eval cmds=\"$reload_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -2316,12 +3069,12 @@ EOF
exit 0
fi
- if test -n "$pic_flag"; then
+ if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
eval cmds=\"$reload_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -2352,8 +3105,10 @@ EOF
exit 0
;;
- # Anything else should be a program.
- *)
+ prog)
+ case $host in
+ *cygwin*) output=`echo $output | sed -e 's,.exe$,,;s,$,.exe,'` ;;
+ esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
@@ -2363,20 +3118,27 @@ EOF
fi
if test "$preload" = yes; then
- if test "$dlopen" = unknown && test "$dlopen_self" = unknown &&
+ if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
- fi
+ fi
fi
-
+
+ case $host in
+ *-*-rhapsody* | *-*-darwin1.[012])
+ # On Rhapsody replace the C library is the System framework
+ compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
+ ;;
+ esac
+
+ compile_command="$compile_command $compile_deplibs"
+ finalize_command="$finalize_command $finalize_deplibs"
+
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
- case "$compile_rpath " in
- *" $libdir "*) ;;
- *) compile_rpath="$compile_rpath $libdir" ;;
- esac
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
@@ -2394,7 +3156,7 @@ EOF
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
- case "$hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator" in
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
@@ -2412,6 +3174,14 @@ EOF
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
+ case $host in
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
+ case :$dllsearchpath: in
+ *":$libdir:"*) ;;
+ *) dllsearchpath="$dllsearchpath:$libdir";;
+ esac
+ ;;
+ esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
@@ -2430,7 +3200,7 @@ EOF
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
- case "$hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator" in
+ case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
@@ -2457,23 +3227,6 @@ EOF
fi
finalize_rpath="$rpath"
- output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
- if test "X$output_objdir" = "X$output"; then
- output_objdir="$objdir"
- else
- output_objdir="$output_objdir/$objdir"
- fi
-
- # Create the binary in the object directory, then wrap it.
- if test ! -d $output_objdir; then
- $show "$mkdir $output_objdir"
- $run $mkdir $output_objdir
- status=$?
- if test $status -ne 0 && test ! -d $output_objdir; then
- exit $status
- fi
- fi
-
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
@@ -2490,7 +3243,7 @@ EOF
fi
if test -n "$dlsyms"; then
- case "$dlsyms" in
+ case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
@@ -2522,7 +3275,7 @@ extern \"C\" {
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
- progfiles=`$echo "X$objs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
@@ -2532,7 +3285,7 @@ extern \"C\" {
$run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
-
+
if test -n "$export_symbols_regex"; then
$run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
@@ -2584,27 +3337,25 @@ extern \"C\" {
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
-# define lt_ptr_t void *
+# define lt_ptr void *
#else
-# define lt_ptr_t char *
+# define lt_ptr char *
# define const
#endif
/* The mapping between symbol names and symbols. */
const struct {
const char *name;
- lt_ptr_t address;
+ lt_ptr address;
}
lt_preloaded_symbols[] =
{\
"
- sed -n -e 's/^: \([^ ]*\) $/ {\"\1\", (lt_ptr_t) 0},/p' \
- -e 's/^. \([^ ]*\) \([^ ]*\)$/ {"\2", (lt_ptr_t) \&\2},/p' \
- < "$nlist" >> "$output_objdir/$dlsyms"
+ eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
- {0, (lt_ptr_t) 0}
+ {0, (lt_ptr) 0}
};
/* This works around a problem in FreeBSD linker */
@@ -2621,7 +3372,7 @@ static const void *lt_preloaded_setup() {
fi
pic_flag_for_symtable=
- case "$host" in
+ case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
@@ -2666,7 +3417,7 @@ static const void *lt_preloaded_setup() {
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
- if test -z "$link_against_libtool_libs" || test "$build_libtool_libs" != yes; then
+ if test $need_relink = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
@@ -2675,7 +3426,7 @@ static const void *lt_preloaded_setup() {
$show "$link_command"
$run eval "$link_command"
status=$?
-
+
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
@@ -2689,7 +3440,7 @@ static const void *lt_preloaded_setup() {
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
- case "$dir" in
+ case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
@@ -2731,11 +3482,24 @@ static const void *lt_preloaded_setup() {
fi
fi
+ if test "$no_install" = yes; then
+ # We don't need to create a wrapper script.
+ link_command="$compile_var$compile_command$compile_rpath"
+ # Replace the output file specification.
+ link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ # Delete the old output file.
+ $run $rm $output
+ # Link the executable and exit
+ $show "$link_command"
+ $run eval "$link_command" || exit $?
+ exit 0
+ fi
+
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
-
+
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
@@ -2755,7 +3519,7 @@ static const void *lt_preloaded_setup() {
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
-
+
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
@@ -2767,12 +3531,24 @@ static const void *lt_preloaded_setup() {
# Quote the relink command for shipping.
if test -n "$relink_command"; then
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ relink_command="cd `pwd`; $relink_command"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
- case "$0" in
+ case $0 in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
*) qecho="$SHELL `pwd`/$0 --fallback-echo";;
esac
@@ -2788,6 +3564,11 @@ static const void *lt_preloaded_setup() {
case $output in
*.exe) output=`echo $output|sed 's,.exe$,,'` ;;
esac
+ # test for cygwin because mv fails w/o .exe extensions
+ case $host in
+ *cygwin*) exeext=.exe ;;
+ *) exeext= ;;
+ esac
$rm $output
trap "$rm $output; exit 1" 1 2 15
@@ -2817,7 +3598,7 @@ relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
- link_against_libtool_libs='$link_against_libtool_libs'
+ notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
@@ -2850,7 +3631,7 @@ else
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
- [\\/]* | [A-Za-z]:[\\/]*) thisdir=\"\$destdir\" ;;
+ [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
@@ -2866,9 +3647,9 @@ else
if test "$fast_install" = yes; then
echo >> $output "\
- program=lt-'$outputname'
+ program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
-
+
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
@@ -2885,8 +3666,9 @@ else
# relink executable if necessary
if test -n \"\$relink_command\"; then
- if (cd \"\$thisdir\" && eval \$relink_command); then :
+ if relink_command_output=\`eval \$relink_command 2>&1\`; then :
else
+ $echo \"\$relink_command_output\" >&2
$rm \"\$progdir/\$file\"
exit 1
fi
@@ -2935,9 +3717,9 @@ else
# Run the actual program with our arguments.
"
case $host in
- # win32 systems need to use the prog path for dll
- # lookup to work
- *-*-cygwin*)
+ # win32 systems need to use the prog path for dll
+ # lookup to work
+ *-*-cygwin* | *-*-pw32*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
@@ -2991,7 +3773,7 @@ fi\
oldobjs="$libobjs_save"
build_libtool_libs=no
else
- oldobjs="$objs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
+ oldobjs="$objs$old_deplibs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
fi
addlibs="$old_convenience"
fi
@@ -3007,11 +3789,11 @@ fi\
exit $status
fi
generated="$generated $gentop"
-
+
# Add in members from convenience archives.
for xlib in $addlibs; do
# Extract the objects.
- case "$xlib" in
+ case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
@@ -3057,7 +3839,7 @@ fi\
eval cmds=\"$old_archive_cmds\"
fi
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -3072,19 +3854,26 @@ fi\
fi
# Now create the libtool archive.
- case "$output" in
+ case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
- if test -n "$xrpath"; then
- temp_xrpath=
- for libdir in $xrpath; do
- temp_xrpath="$temp_xrpath -R$libdir"
- done
- dependency_libs="$temp_xrpath $dependency_libs"
- fi
+ # Preserve any variables that may affect compiler behavior
+ for var in $variables_saved_for_relink; do
+ if eval test -z \"\${$var+set}\"; then
+ relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
+ elif eval var_value=\$$var; test -z "$var_value"; then
+ relink_command="$var=; export $var; $relink_command"
+ else
+ var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
+ relink_command="$var=\"$var_value\"; export $var; $relink_command"
+ fi
+ done
+ # Quote the link command for shipping.
+ relink_command="cd `pwd`; $SHELL $0 --mode=relink $libtool_args"
+ relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
# Only create the output if not a dry run.
if test -z "$run"; then
@@ -3094,8 +3883,52 @@ fi\
break
fi
output="$output_objdir/$outputname"i
+ # Replace all uninstalled libtool libraries with the installed ones
+ newdependency_libs=
+ for deplib in $dependency_libs; do
+ case $deplib in
+ *.la)
+ name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdependency_libs="$newdependency_libs $libdir/$name"
+ ;;
+ *) newdependency_libs="$newdependency_libs $deplib" ;;
+ esac
+ done
+ dependency_libs="$newdependency_libs"
+ newdlfiles=
+ for lib in $dlfiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlfiles="$newdlfiles $libdir/$name"
+ done
+ dlfiles="$newdlfiles"
+ newdlprefiles=
+ for lib in $dlprefiles; do
+ name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
+ eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+ if test -z "$libdir"; then
+ $echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
+ exit 1
+ fi
+ newdlprefiles="$newdlprefiles $libdir/$name"
+ done
+ dlprefiles="$newdlprefiles"
fi
$rm $output
+ # place dlname in correct position for cygwin
+ tdlname=$dlname
+ case $host,$output,$installed,$module,$dlname in
+ *cygwin*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
+ esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
@@ -3104,7 +3937,7 @@ fi\
# It is necessary for linking the library.
# The name that we can dlopen(3).
-dlname='$dlname'
+dlname='$tdlname'
# Names of this library.
library_names='$library_names'
@@ -3123,16 +3956,23 @@ revision=$revision
# Is this an already installed library?
installed=$installed
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
# Directory that this library needs to be installed in:
-libdir='$install_libdir'\
-"
+libdir='$install_libdir'"
+ if test "$installed" = no && test $need_relink = yes; then
+ $echo >> $output "\
+relink_command=\"$relink_command\""
+ fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
- $run eval "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)" || exit $?
+ $run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit 0
@@ -3144,10 +3984,12 @@ libdir='$install_libdir'\
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
- if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh; then
+ if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+ # Allow the use of GNU shtool's install command.
+ $echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
- case "$arg" in
+ case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
@@ -3163,7 +4005,7 @@ libdir='$install_libdir'\
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
- case "$arg" in
+ case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
@@ -3186,7 +4028,7 @@ libdir='$install_libdir'\
continue
fi
- case "$arg" in
+ case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
@@ -3211,7 +4053,7 @@ libdir='$install_libdir'\
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
- case "$arg" in
+ case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
@@ -3262,11 +4104,11 @@ libdir='$install_libdir'\
exit 1
fi
fi
- case "$destdir" in
+ case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
- case "$file" in
+ case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
@@ -3288,8 +4130,8 @@ libdir='$install_libdir'\
for file in $files; do
# Do each installation.
- case "$file" in
- *.a | *.lib)
+ case $file in
+ *.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
@@ -3305,8 +4147,9 @@ libdir='$install_libdir'\
library_names=
old_library=
+ relink_command=
# If there is no directory component, then add one.
- case "$file" in
+ case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
@@ -3325,10 +4168,20 @@ libdir='$install_libdir'\
esac
fi
- dir="`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/"
+ dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
+ if test -n "$relink_command"; then
+ $echo "$modename: warning: relinking \`$file'" 1>&2
+ $show "$relink_command"
+ if $run eval "$relink_command"; then :
+ else
+ $echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
+ continue
+ fi
+ fi
+
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
@@ -3336,9 +4189,16 @@ libdir='$install_libdir'\
shift
shift
+ srcname="$realname"
+ test -n "$relink_command" && srcname="$realname"T
+
# Install the shared library and build the symlinks.
- $show "$install_prog $dir/$realname $destdir/$realname"
- $run eval "$install_prog $dir/$realname $destdir/$realname" || exit $?
+ $show "$install_prog $dir/$srcname $destdir/$realname"
+ $run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$striplib $destdir/$realname"
+ $run eval "$striplib $destdir/$realname" || exit $?
+ fi
if test $# -gt 0; then
# Delete the old symlinks, and create new ones.
@@ -3354,7 +4214,7 @@ libdir='$install_libdir'\
# Do each command in the postinstall commands.
lib="$destdir/$realname"
eval cmds=\"$postinstall_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -3385,11 +4245,11 @@ libdir='$install_libdir'\
fi
# Deduce the name of the destination old-style object file.
- case "$destfile" in
+ case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
- *.o | *.obj)
+ *.$objext)
staticdest="$destfile"
destfile=
;;
@@ -3428,39 +4288,46 @@ libdir='$install_libdir'\
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
- link_against_libtool_libs=
+ notinst_deplibs=
relink_command=
# If there is no directory component, then add one.
- case "$file" in
+ case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Check the variables that should have been set.
- if test -z "$link_against_libtool_libs"; then
+ if test -z "$notinst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
exit 1
fi
finalize=yes
- for lib in $link_against_libtool_libs; do
+ for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
- case "$lib" in
+ case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
- libfile="$libdir/`$echo "X$lib" | $Xsed -e 's%^.*/%%g'`"
+ libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
+ relink_command=
+ # If there is no directory component, then add one.
+ case $file in
+ */* | *\\*) . $file ;;
+ *) . ./$file ;;
+ esac
+
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
@@ -3472,6 +4339,7 @@ libdir='$install_libdir'\
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
+ file=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
@@ -3493,6 +4361,23 @@ libdir='$install_libdir'\
fi
fi
+ # remove .exe since cygwin /usr/bin/install will append another
+ # one anyways
+ case $install_prog,$host in
+ /usr/bin/install*,*cygwin*)
+ case $file:$destfile in
+ *.exe:*.exe)
+ # this is ok
+ ;;
+ *.exe:*)
+ destfile=$destfile.exe
+ ;;
+ *:*.exe)
+ destfile=`echo $destfile | sed -e 's,.exe$,,'`
+ ;;
+ esac
+ ;;
+ esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
@@ -3509,9 +4394,14 @@ libdir='$install_libdir'\
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
+ if test -n "$stripme" && test -n "$striplib"; then
+ $show "$old_striplib $oldlib"
+ $run eval "$old_striplib $oldlib" || exit $?
+ fi
+
# Do each command in the postinstall commands.
eval cmds=\"$old_postinstall_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -3527,11 +4417,10 @@ libdir='$install_libdir'\
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
- exec $SHELL $0 --finish$current_libdirs
- exit 1
+ exec_cmd='$SHELL $0 --finish$current_libdirs'
+ else
+ exit 0
fi
-
- exit 0
;;
# libtool finish mode
@@ -3550,7 +4439,7 @@ libdir='$install_libdir'\
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
eval cmds=\"$finish_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
+ save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
@@ -3569,7 +4458,7 @@ libdir='$install_libdir'\
fi
# Exit here if they wanted silent mode.
- test "$show" = : && exit 0
+ test "$show" = ":" && exit 0
echo "----------------------------------------------------------------------"
echo "Libraries have been installed in:"
@@ -3579,7 +4468,7 @@ libdir='$install_libdir'\
echo
echo "If you ever happen to want to link against installed libraries"
echo "in a given directory, LIBDIR, you must either use libtool, and"
- echo "specify the full pathname of the library, or use \`-LLIBDIR'"
+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
@@ -3629,7 +4518,7 @@ libdir='$install_libdir'\
fi
dir=
- case "$file" in
+ case $file in
*.la)
# Check to see that this really is a libtool archive.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
@@ -3644,7 +4533,7 @@ libdir='$install_libdir'\
library_names=
# If there is no directory component, then add one.
- case "$file" in
+ case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
@@ -3699,13 +4588,13 @@ libdir='$install_libdir'\
args=
for file
do
- case "$file" in
+ case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
- case "$file" in
+ case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
@@ -3722,8 +4611,8 @@ libdir='$install_libdir'\
if test -z "$run"; then
if test -n "$shlibpath_var"; then
- # Export the shlibpath_var.
- eval "export $shlibpath_var"
+ # Export the shlibpath_var.
+ eval "export $shlibpath_var"
fi
# Restore saved enviroment variables
@@ -3734,31 +4623,35 @@ libdir='$install_libdir'\
LANG="$save_LANG"; export LANG
fi
- # Now actually exec the command.
- eval "exec \$cmd$args"
-
- $echo "$modename: cannot exec \$cmd$args"
- exit 1
+ # Now prepare to actually exec the command.
+ exec_cmd='"$cmd"$args'
else
# Display what would be done.
if test -n "$shlibpath_var"; then
- eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
- $echo "export $shlibpath_var"
+ eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
+ $echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit 0
fi
;;
- # libtool uninstall mode
- uninstall)
- modename="$modename: uninstall"
+ # libtool clean and uninstall mode
+ clean | uninstall)
+ modename="$modename: $mode"
rm="$nonopt"
files=
+ rmforce=
+ exit_status=0
+
+ # This variable tells wrapper scripts just to set variables rather
+ # than running their programs.
+ libtool_install_magic="$magic"
for arg
do
- case "$arg" in
+ case $arg in
+ -f) rm="$rm $arg"; rmforce=yes ;;
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
@@ -3770,14 +4663,42 @@ libdir='$install_libdir'\
exit 1
fi
+ rmdirs=
+
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
- test "X$dir" = "X$file" && dir=.
+ if test "X$dir" = "X$file"; then
+ dir=.
+ objdir="$objdir"
+ else
+ objdir="$dir/$objdir"
+ fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
+ test $mode = uninstall && objdir="$dir"
+
+ # Remember objdir for removal later, being careful to avoid duplicates
+ if test $mode = clean; then
+ case " $rmdirs " in
+ *" $objdir "*) ;;
+ *) rmdirs="$rmdirs $objdir" ;;
+ esac
+ fi
+
+ # Don't error if the file doesn't exist and rm -f was used.
+ if (test -L "$file") >/dev/null 2>&1 \
+ || (test -h "$file") >/dev/null 2>&1 \
+ || test -f "$file"; then
+ :
+ elif test -d "$file"; then
+ exit_status=1
+ continue
+ elif test "$rmforce" = yes; then
+ continue
+ fi
rmfiles="$file"
- case "$name" in
+ case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
@@ -3785,38 +4706,43 @@ libdir='$install_libdir'\
# Delete the libtool libraries and symlinks.
for n in $library_names; do
- rmfiles="$rmfiles $dir/$n"
+ rmfiles="$rmfiles $objdir/$n"
done
- test -n "$old_library" && rmfiles="$rmfiles $dir/$old_library"
-
- $show "$rm $rmfiles"
- $run $rm $rmfiles
-
- if test -n "$library_names"; then
- # Do each command in the postuninstall commands.
- eval cmds=\"$postuninstall_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
- for cmd in $cmds; do
+ test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
+ test $mode = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
+
+ if test $mode = uninstall; then
+ if test -n "$library_names"; then
+ # Do each command in the postuninstall commands.
+ eval cmds=\"$postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
IFS="$save_ifs"
- $show "$cmd"
- $run eval "$cmd"
- done
- IFS="$save_ifs"
- fi
+ fi
- if test -n "$old_library"; then
- # Do each command in the old_postuninstall commands.
- eval cmds=\"$old_postuninstall_cmds\"
- IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
- for cmd in $cmds; do
+ if test -n "$old_library"; then
+ # Do each command in the old_postuninstall commands.
+ eval cmds=\"$old_postuninstall_cmds\"
+ save_ifs="$IFS"; IFS='~'
+ for cmd in $cmds; do
+ IFS="$save_ifs"
+ $show "$cmd"
+ $run eval "$cmd"
+ if test $? != 0 && test "$rmforce" != yes; then
+ exit_status=1
+ fi
+ done
IFS="$save_ifs"
- $show "$cmd"
- $run eval "$cmd"
- done
- IFS="$save_ifs"
+ fi
+ # FIXME: should reinstall the best remaining shared library.
fi
-
- # FIXME: should reinstall the best remaining shared library.
fi
;;
@@ -3825,17 +4751,35 @@ libdir='$install_libdir'\
oldobj=`$echo "X$name" | $Xsed -e "$lo2o"`
rmfiles="$rmfiles $dir/$oldobj"
fi
- $show "$rm $rmfiles"
- $run $rm $rmfiles
;;
*)
- $show "$rm $rmfiles"
- $run $rm $rmfiles
+ # Do a test to see if this is a libtool program.
+ if test $mode = clean &&
+ (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
+ relink_command=
+ . $dir/$file
+
+ rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
+ if test "$fast_install" = yes && test -n "$relink_command"; then
+ rmfiles="$rmfiles $objdir/lt-$name"
+ fi
+ fi
;;
esac
+ $show "$rm $rmfiles"
+ $run $rm $rmfiles || exit_status=1
done
- exit 0
+
+ # Try to remove the ${objdir}s in the directories where we deleted files
+ for dir in $rmdirs; do
+ if test -d "$dir"; then
+ $show "rmdir $dir"
+ $run rmdir $dir >/dev/null 2>&1
+ fi
+ done
+
+ exit $exit_status
;;
"")
@@ -3845,13 +4789,20 @@ libdir='$install_libdir'\
;;
esac
- $echo "$modename: invalid operation mode \`$mode'" 1>&2
- $echo "$generic_help" 1>&2
- exit 1
+ if test -z "$exec_cmd"; then
+ $echo "$modename: invalid operation mode \`$mode'" 1>&2
+ $echo "$generic_help" 1>&2
+ exit 1
+ fi
fi # test -z "$show_help"
+if test -n "$exec_cmd"; then
+ eval exec $exec_cmd
+ exit 1
+fi
+
# We need to display help for each of the modes.
-case "$mode" in
+case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
@@ -3870,6 +4821,7 @@ Provide generalized library-building support services.
MODE must be one of the following:
+ clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
@@ -3882,6 +4834,20 @@ a more detailed description of MODE."
exit 0
;;
+clean)
+ $echo \
+"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+ ;;
+
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
@@ -3891,6 +4857,8 @@ Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
+ -prefer-pic try to building PIC objects only
+ -prefer-non-pic try to building non-PIC objects only
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
@@ -3969,7 +4937,10 @@ The following components of LINK-COMMAND are treated specially:
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
+ -jnimodule build a library that can dlopened via Java JNI
-module build a library that can dlopened
+ -no-fast-install disable the fast-install mode
+ -no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-release RELEASE specify package release information
@@ -4026,4 +4997,3 @@ exit 0
# mode:shell-script
# sh-indentation:2
# End:
-#! /bin/bash
diff --git a/bdb/dist/pubdef.in b/bdb/dist/pubdef.in
new file mode 100644
index 00000000000..f42363022cd
--- /dev/null
+++ b/bdb/dist/pubdef.in
@@ -0,0 +1,350 @@
+# Name
+# D == documentation
+# I == include file
+# C == Java case value (declared and initialized)
+# J == Java constant (declared only)
+DB_AFTER D I J
+DB_AGGRESSIVE D I J
+DB_ALREADY_ABORTED * I *
+DB_AM_CHKSUM * I *
+DB_AM_CL_WRITER * I *
+DB_AM_COMPENSATE * I *
+DB_AM_CREATED * I *
+DB_AM_CREATED_MSTR * I *
+DB_AM_DBM_ERROR * I *
+DB_AM_DELIMITER * I *
+DB_AM_DIRTY * I *
+DB_AM_DISCARD * I *
+DB_AM_DUP * I *
+DB_AM_DUPSORT * I *
+DB_AM_ENCRYPT * I *
+DB_AM_FIXEDLEN * I *
+DB_AM_INMEM * I *
+DB_AM_IN_RENAME * I *
+DB_AM_OPEN_CALLED * I *
+DB_AM_PAD * I *
+DB_AM_PGDEF * I *
+DB_AM_RDONLY * I *
+DB_AM_RECNUM * I *
+DB_AM_RECOVER * I *
+DB_AM_RENUMBER * I *
+DB_AM_REVSPLITOFF * I *
+DB_AM_SECONDARY * I *
+DB_AM_SNAPSHOT * I *
+DB_AM_SUBDB * I *
+DB_AM_SWAP * I *
+DB_AM_TXN * I *
+DB_AM_VERIFYING * I *
+DB_APPEND D I J
+DB_ARCH_ABS D I J
+DB_ARCH_DATA D I J
+DB_ARCH_LOG D I J
+DB_AUTO_COMMIT D I J
+DB_BEFORE D I J
+DB_BTREE D I C
+DB_BTREEMAGIC * I *
+DB_BTREEOLDVER * I *
+DB_BTREEVERSION * I *
+DB_CACHED_COUNTS * I J
+DB_CDB_ALLDB D I J
+DB_CHKSUM_SHA1 D I J
+DB_CLIENT D I J
+DB_COMMIT * I *
+DB_CONFIG D * *
+DB_CONSUME D I J
+DB_CONSUME_WAIT D I J
+DB_CREATE D I J
+DB_CURRENT D I J
+DB_CXX_NO_EXCEPTIONS D I J
+DB_DBM_HSEARCH * I *
+DB_DBT_APPMALLOC D I *
+DB_DBT_DUPOK * I *
+DB_DBT_ISSET * I *
+DB_DBT_MALLOC D I J
+DB_DBT_PARTIAL D I J
+DB_DBT_REALLOC D I J
+DB_DBT_USERMEM D I J
+DB_DELETED * I *
+DB_DIRECT D I J
+DB_DIRECT_DB D I J
+DB_DIRECT_LOG D I J
+DB_DIRTY_READ D I J
+DB_DONOTINDEX D I C
+DB_DUP D I J
+DB_DUPSORT D I J
+DB_EID_BROADCAST D I J
+DB_EID_INVALID D I J
+DB_ENCRYPT D I J
+DB_ENCRYPT_AES D I J
+DB_ENV_AUTO_COMMIT * I *
+DB_ENV_CDB * I *
+DB_ENV_CDB_ALLDB * I *
+DB_ENV_CREATE * I *
+DB_ENV_DBLOCAL * I *
+DB_ENV_DIRECT_DB * I *
+DB_ENV_DIRECT_LOG * I *
+DB_ENV_FATAL * I *
+DB_ENV_LOCKDOWN * I *
+DB_ENV_NOLOCKING * I *
+DB_ENV_NOMMAP * I *
+DB_ENV_NOPANIC * I *
+DB_ENV_OPEN_CALLED * I *
+DB_ENV_OVERWRITE * I *
+DB_ENV_PRIVATE * I *
+DB_ENV_REGION_INIT * I *
+DB_ENV_REP_CLIENT * I *
+DB_ENV_REP_LOGSONLY * I *
+DB_ENV_REP_MASTER * I *
+DB_ENV_RPCCLIENT * I *
+DB_ENV_RPCCLIENT_GIVEN * I *
+DB_ENV_SYSTEM_MEM * I *
+DB_ENV_THREAD * I *
+DB_ENV_TXN_NOSYNC * I *
+DB_ENV_TXN_WRITE_NOSYNC * I *
+DB_ENV_YIELDCPU * I *
+DB_EXCL D I J
+DB_EXTENT * I *
+DB_FAST_STAT D I J
+DB_FCNTL_LOCKING * I *
+DB_FILE_ID_LEN * I *
+DB_FIRST D I J
+DB_FLUSH D I J
+DB_FORCE D I J
+DB_GET_BOTH D I J
+DB_GET_BOTHC * I *
+DB_GET_BOTH_RANGE D I J
+DB_GET_RECNO D I J
+DB_HANDLE_LOCK * I *
+DB_HASH D I C
+DB_HASHMAGIC * I *
+DB_HASHOLDVER * I *
+DB_HASHVERSION * I *
+DB_HOME D * *
+DB_INIT_CDB D I J
+DB_INIT_LOCK D I J
+DB_INIT_LOG D I J
+DB_INIT_MPOOL D I J
+DB_INIT_TXN D I J
+DB_JAVA_CALLBACK * I *
+DB_JOINENV D I J
+DB_JOIN_ITEM D I J
+DB_JOIN_NOSORT D I J
+DB_KEYEMPTY D I C
+DB_KEYEXIST D I C
+DB_KEYFIRST D I J
+DB_KEYLAST D I J
+DB_LAST D I J
+DB_LOCKDOWN D I J
+DB_LOCKVERSION * I *
+DB_LOCK_DEADLOCK D I C
+DB_LOCK_DEFAULT D I J
+DB_LOCK_DIRTY * I *
+DB_LOCK_DUMP * I *
+DB_LOCK_EXPIRE D I J
+DB_LOCK_FREE_LOCKER * I *
+DB_LOCK_GET D I J
+DB_LOCK_GET_TIMEOUT D I J
+DB_LOCK_INHERIT * I *
+DB_LOCK_IREAD D I J
+DB_LOCK_IWR D I J
+DB_LOCK_IWRITE D I J
+DB_LOCK_MAXLOCKS D I J
+DB_LOCK_MINLOCKS D I J
+DB_LOCK_MINWRITE D I J
+DB_LOCK_NG * I *
+DB_LOCK_NORUN * I *
+DB_LOCK_NOTEXIST * I *
+DB_LOCK_NOTGRANTED D I C
+DB_LOCK_NOWAIT D I J
+DB_LOCK_OLDEST D I J
+DB_LOCK_PUT D I J
+DB_LOCK_PUT_ALL D I J
+DB_LOCK_PUT_OBJ D I J
+DB_LOCK_PUT_READ * I *
+DB_LOCK_RANDOM D I J
+DB_LOCK_READ D I J
+DB_LOCK_RECORD * I *
+DB_LOCK_REMOVE * I *
+DB_LOCK_SET_TIMEOUT * I *
+DB_LOCK_SWITCH * I *
+DB_LOCK_TIMEOUT D I J
+DB_LOCK_TRADE * I *
+DB_LOCK_UPGRADE * I *
+DB_LOCK_UPGRADE_WRITE * I *
+DB_LOCK_WAIT * I *
+DB_LOCK_WRITE D I J
+DB_LOCK_WWRITE * I *
+DB_LOCK_YOUNGEST D I J
+DB_LOGC_BUF_SIZE * I *
+DB_LOGFILEID_INVALID * I *
+DB_LOGMAGIC * I *
+DB_LOGOLDVER * I *
+DB_LOGVERSION * I *
+DB_LOG_DISK * I *
+DB_LOG_LOCKED * I *
+DB_LOG_SILENT_ERR * I *
+DB_LSTAT_ABORTED * I *
+DB_LSTAT_ERR * I *
+DB_LSTAT_EXPIRED * I *
+DB_LSTAT_FREE * I *
+DB_LSTAT_HELD * I *
+DB_LSTAT_NOTEXIST * I *
+DB_LSTAT_PENDING * I *
+DB_LSTAT_WAITING * I *
+DB_MAX_PAGES * I *
+DB_MAX_RECORDS * I *
+DB_MPOOL_CLEAN D I *
+DB_MPOOL_CREATE D I *
+DB_MPOOL_DIRTY D I *
+DB_MPOOL_DISCARD D I *
+DB_MPOOL_LAST D I *
+DB_MPOOL_NEW D I *
+DB_MULTIPLE D I J
+DB_MULTIPLE_INIT D I *
+DB_MULTIPLE_KEY D I J
+DB_MULTIPLE_KEY_NEXT D I *
+DB_MULTIPLE_NEXT D I *
+DB_MULTIPLE_RECNO_NEXT D I *
+DB_NEEDSPLIT * I *
+DB_NEXT D I J
+DB_NEXT_DUP D I J
+DB_NEXT_NODUP D I J
+DB_NOCOPY * I *
+DB_NODUPDATA D I J
+DB_NOLOCKING D I J
+DB_NOMMAP D I J
+DB_NOORDERCHK D I J
+DB_NOOVERWRITE D I J
+DB_NOPANIC D I J
+DB_NOSERVER D I C
+DB_NOSERVER_HOME D I C
+DB_NOSERVER_ID D I C
+DB_NOSYNC D I J
+DB_NOTFOUND D I C
+DB_ODDFILESIZE D I J
+DB_OK_BTREE * I *
+DB_OK_HASH * I *
+DB_OK_QUEUE * I *
+DB_OK_RECNO * I *
+DB_OLD_VERSION D I C
+DB_OPFLAGS_MASK * I *
+DB_ORDERCHKONLY D I J
+DB_OVERWRITE D I J
+DB_PAGE_LOCK * I *
+DB_PAGE_NOTFOUND D I C
+DB_PANIC_ENVIRONMENT D I J
+DB_PERMANENT * I *
+DB_POSITION D I J
+DB_POSITIONI * I *
+DB_PREV D I J
+DB_PREV_NODUP D I J
+DB_PRINTABLE D I J
+DB_PRIORITY_DEFAULT D I J
+DB_PRIORITY_HIGH D I J
+DB_PRIORITY_LOW D I J
+DB_PRIORITY_VERY_HIGH D I J
+DB_PRIORITY_VERY_LOW D I J
+DB_PRIVATE D I J
+DB_PR_PAGE * I *
+DB_PR_RECOVERYTEST * I *
+DB_QAMMAGIC * I *
+DB_QAMOLDVER * I *
+DB_QAMVERSION * I *
+DB_QUEUE D I C
+DB_RDONLY D I J
+DB_RDWRMASTER * I *
+DB_RECNO D I C
+DB_RECNUM D I J
+DB_RECORDCOUNT * I J
+DB_RECORD_LOCK * I *
+DB_RECOVER D I J
+DB_RECOVER_FATAL D I J
+DB_REDO * I *
+DB_REGION_INIT D I J
+DB_REGION_MAGIC * I *
+DB_RENAMEMAGIC * I *
+DB_RENUMBER D I J
+DB_REP_CLIENT D I J
+DB_REP_DUPMASTER D I C
+DB_REP_HOLDELECTION D I C
+DB_REP_LOGSONLY D I J
+DB_REP_MASTER D I J
+DB_REP_NEWMASTER D I C
+DB_REP_NEWSITE D I C
+DB_REP_OUTDATED D I C
+DB_REP_PERMANENT D I J
+DB_REP_UNAVAIL D I J
+DB_REVSPLITOFF D I J
+DB_RMW D I J
+DB_RUNRECOVERY D I C
+DB_SALVAGE D I J
+DB_SECONDARY_BAD D I C
+DB_SET D I J
+DB_SET_LOCK_TIMEOUT D I J
+DB_SET_RANGE D I J
+DB_SET_RECNO D I J
+DB_SET_TXN_NOW * I *
+DB_SET_TXN_TIMEOUT D I J
+DB_SNAPSHOT D I J
+DB_STAT_CLEAR D I J
+DB_SURPRISE_KID * I *
+DB_SWAPBYTES * I *
+DB_SYSTEM_MEM D I J
+DB_TEST_ELECTINIT * I *
+DB_TEST_ELECTSEND * I *
+DB_TEST_ELECTVOTE1 * I *
+DB_TEST_ELECTVOTE2 * I *
+DB_TEST_ELECTWAIT1 * I *
+DB_TEST_ELECTWAIT2 * I *
+DB_TEST_POSTDESTROY * I *
+DB_TEST_POSTLOG * I *
+DB_TEST_POSTLOGMETA * I *
+DB_TEST_POSTOPEN * I *
+DB_TEST_POSTSYNC * I *
+DB_TEST_PREDESTROY * I *
+DB_TEST_PREOPEN * I *
+DB_TEST_SUBDB_LOCKS * I *
+DB_THREAD D I J
+DB_TIMEOUT * I *
+DB_TRUNCATE D I J
+DB_TXNVERSION * I *
+DB_TXN_ABORT D I C
+DB_TXN_APPLY D I C
+DB_TXN_BACKWARD_ALLOC * I *
+DB_TXN_BACKWARD_ROLL D I C
+DB_TXN_CKP * I *
+DB_TXN_FORWARD_ROLL D I C
+DB_TXN_GETPGNOS * I *
+DB_TXN_LOCK * I *
+DB_TXN_NOSYNC D I J
+DB_TXN_NOWAIT D I J
+DB_TXN_OPENFILES * I *
+DB_TXN_POPENFILES * I *
+DB_TXN_PRINT D I C
+DB_TXN_SYNC D I J
+DB_TXN_WRITE_NOSYNC D I J
+DB_UNDO * I *
+DB_UNKNOWN D I C
+DB_UPDATE_SECONDARY * I *
+DB_UPGRADE D I J
+DB_USE_ENVIRON D I J
+DB_USE_ENVIRON_ROOT D I J
+DB_VERB_CHKPOINT D I J
+DB_VERB_DEADLOCK D I J
+DB_VERB_RECOVERY D I J
+DB_VERB_REPLICATION D I J
+DB_VERB_WAITSFOR D I J
+DB_VERIFY D I J
+DB_VERIFY_BAD D I C
+DB_VERIFY_FATAL * I *
+DB_VERSION_MAJOR * I J
+DB_VERSION_MINOR * I J
+DB_VERSION_PATCH * I J
+DB_VERSION_STRING * I *
+DB_WRITECURSOR D I J
+DB_WRITELOCK * I *
+DB_WRITEOPEN * I *
+DB_WRNOSYNC * I *
+DB_XA_CREATE D I J
+DB_XIDDATASIZE D I J
+DB_YIELDCPU D I J
diff --git a/bdb/dist/s_all b/bdb/dist/s_all
index dab6c75913b..c0e3ac72f3a 100644
--- a/bdb/dist/s_all
+++ b/bdb/dist/s_all
@@ -1,16 +1,21 @@
#!/bin/sh -
-# $Id: s_all,v 1.7 2000/12/22 15:35:32 bostic Exp $
+# $Id: s_all,v 1.10 2001/08/04 14:01:44 bostic Exp $
sh s_perm # permissions.
sh s_symlink # symbolic links.
-sh s_config # autoconf.
sh s_readme # db/README file.
+
+#
+# The following order is important, s_include must run last.
+#
+sh s_config # autoconf.
sh s_recover # logging/recovery files.
sh s_rpc # RPC files.
sh s_include # standard include files.
+
sh s_win32 # Win32 include files.
sh s_win32_dsp # Win32 build environment.
sh s_vxworks # VxWorks include files.
sh s_java # Java support.
-sh s_tcl # Tcl support.
+sh s_test # Test suite support.
sh s_tags # Tags files.
diff --git a/bdb/dist/s_config b/bdb/dist/s_config
index 870109c38f9..3e033da81ab 100755
--- a/bdb/dist/s_config
+++ b/bdb/dist/s_config
@@ -1,15 +1,16 @@
#!/bin/sh -
-# $Id: s_config,v 1.3 2000/07/13 18:38:46 bostic Exp $
+# $Id: s_config,v 1.9 2002/05/20 19:18:13 bostic Exp $
#
# Build the autoconfiguration files.
-msgm4="dnl DO NOT EDIT: automatically built by dist/s_config."
+trap 'rm -f aclocal.m4 ; exit 0' 0 1 2 3 13 15
+
+msgac="# DO NOT EDIT: automatically built by dist/s_config."
. ./RELEASE
echo "Building aclocal.m4"
-rm -f aclocal.m4
-(echo "$msgm4" &&
+(echo "$msgac" &&
echo "AC_DEFUN(AM_VERSION_SET, [" &&
echo "AC_SUBST(DB_VERSION_MAJOR)" &&
echo "DB_VERSION_MAJOR=$DB_VERSION_MAJOR" &&
@@ -17,21 +18,28 @@ rm -f aclocal.m4
echo "DB_VERSION_MINOR=$DB_VERSION_MINOR" &&
echo "AC_SUBST(DB_VERSION_PATCH)" &&
echo "DB_VERSION_PATCH=$DB_VERSION_PATCH" &&
+ echo "AC_SUBST(DB_VERSION_UNIQUE_NAME)" &&
+ echo "DB_VERSION_UNIQUE_NAME=$DB_VERSION_UNIQUE_NAME" &&
echo "AC_SUBST(DB_VERSION_STRING)" &&
echo "DB_VERSION_STRING=\"\\\"$DB_VERSION_STRING\\\"\"" &&
- echo "])dnl" &&
- cat aclocal/*.m4) > aclocal.m4
-chmod 444 aclocal.m4
+ echo "])" &&
+ cat aclocal/*.ac aclocal_java/*.ac) > aclocal.m4
+echo "Running autoheader to build config.hin"
rm -f config.hin
-echo "Building config.hin (autoheader)"
-(autoheader configure.in > config.hin) 2>&1 | \
- sed '/warning: AC_TRY_RUN called without default/d'
+autoheader
chmod 444 config.hin
+echo "Running autoconf to build configure"
rm -f configure
-echo "Building configure (autoconf)"
-autoconf 2>&1 | sed '/warning: AC_TRY_RUN called without default/d'
+autoconf
+
+# Edit version information we couldn't pre-compute.
+(echo "1,\$s/__EDIT_DB_VERSION__/$DB_VERSION/g" &&
+ echo "w" &&
+ echo "q") | ed configure
+
+rm -rf autom4te.cache
+chmod 555 configure
-chmod 555 configure config.guess config.sub install-sh
-rm -f aclocal.m4
+chmod 555 config.guess config.sub install-sh
diff --git a/bdb/dist/s_crypto b/bdb/dist/s_crypto
new file mode 100644
index 00000000000..be7e5de0474
--- /dev/null
+++ b/bdb/dist/s_crypto
@@ -0,0 +1,57 @@
+#!/bin/sh -
+# $Id: s_crypto,v 11.5 2002/09/13 13:14:14 bostic Exp $
+
+# Remove crypto from the DB source tree.
+
+d=..
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
+if ! test -d $d/crypto; then
+ echo "s_crypto: no crypto sources found in the source tree."
+ exit 1
+fi
+
+# Remove the crypto.
+rm -rf $d/crypto
+
+# Update the docs.
+f=$d/docs/ref/env/encrypt.html
+chmod 664 $f
+(echo '/DOES/' &&
+ echo 's/DOES/DOES NOT/' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+# Win/32.
+f=win_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+f=srcfiles.in
+chmod 664 $f
+(echo 'g/^crypto\//d' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_win32
+ sh ./s_win32_dsp
+
+# VxWorks
+f=vx_config.in
+chmod 664 $f
+(echo '/#define.HAVE_CRYPTO/' &&
+ echo 'c' &&
+ echo '/* #undef HAVE_CRYPTO */'
+ echo '.' &&
+ echo 'w' &&
+ echo 'q') | ed $f
+
+ sh ./s_vxworks
diff --git a/bdb/dist/s_include b/bdb/dist/s_include
index fee6e50330f..44bfce30ee7 100755
--- a/bdb/dist/s_include
+++ b/bdb/dist/s_include
@@ -1,33 +1,160 @@
#!/bin/sh -
-# $Id: s_include,v 1.7 2000/07/13 18:38:46 bostic Exp $
+# $Id: s_include,v 1.19 2002/03/27 04:31:50 bostic Exp $
#
# Build the automatically generated function prototype files.
msgc="/* DO NOT EDIT: automatically built by dist/s_include. */"
-cxx_if="#if defined(__cplusplus)"
-cxx_head="extern \"C\" {"
-cxx_foot="}"
-cxx_endif="#endif"
-
-tmp=/tmp/__db_inc.$$
-trap 'rm -f $tmp ; exit 0' 0 1 2 3 13 15
-
-for i in db btree clib common env hash \
- lock log mp mutex os qam rpc_client rpc_server tcl txn xa; do
- f=../include/${i}_ext.h
- (echo "$msgc" &&
- echo "#ifndef _${i}_ext_h_" &&
- echo "#define _${i}_ext_h_" &&
- echo "$cxx_if" &&
- echo "$cxx_head" &&
- echo "$cxx_endif" &&
- sed -n "s/^ \* PUBLIC:[ ]\(.*\)/\1/p" ../$i/*.c;
- [ $i = os ] &&
- sed -n "s/^ \* PUBLIC:[ ]\(.*\)/\1/p" ../os_win32/*.c;
- echo "$cxx_if" &&
- echo "$cxx_foot" &&
- echo "$cxx_endif" &&
- echo "#endif /* _${i}_ext_h_ */") > $tmp
- cmp $tmp $f > /dev/null 2>&1 ||
- (echo "Building $f" && rm -f $f && cp $tmp $f && chmod 444 $f)
+
+. ./RELEASE
+
+head()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ space)
+ echo ""; shift;;
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo "$msgc"
+ echo "#ifndef $name"
+ echo "#define $name"
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "extern \"C\" {"
+ echo "#endif"
+ echo ""
+ fi
+}
+
+tail()
+{
+ defonly=0
+ while :
+ do case "$1" in
+ defonly)
+ defonly=1; shift;;
+ *)
+ name="$1"; break;;
+ esac
+ done
+
+ echo ""
+ if [ $defonly -eq 0 ]; then
+ echo "#if defined(__cplusplus)"
+ echo "}"
+ echo "#endif"
+ fi
+ echo "#endif /* !$name */"
+}
+
+# We are building several files:
+# 1 external #define file
+# 1 external prototype file
+# 1 internal #define file
+# N internal prototype files
+e_dfile=/tmp/__db_c.$$
+e_pfile=/tmp/__db_a.$$
+i_dfile=/tmp/__db_d.$$
+i_pfile=/tmp/__db_b.$$
+trap 'rm -f $e_dfile $e_pfile $i_dfile $i_pfile; exit 0' 0 1 2 3 13 15
+
+head defonly space _DB_EXT_DEF_IN_ > $e_dfile
+head space _DB_EXT_PROT_IN_ > $e_pfile
+head defonly _DB_INT_DEF_IN_ > $i_dfile
+
+# Process the standard directories, creating per-directory prototype
+# files and adding to the external prototype and #define files.
+for i in db btree clib common crypto dbreg env fileops hash hmac \
+ lock log mp mutex os qam rep rpc_client rpc_server tcl txn xa; do
+ head "_${i}_ext_h_" > $i_pfile
+
+ f="../$i/*.c"
+ [ $i = os ] && f="$f ../os_win32/*.c"
+ [ $i = rpc_server ] && f="../$i/c/*.c"
+ [ $i = crypto ] && f="../$i/*.c ../$i/*/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile=$i_dfile \
+ -v i_pfile=$i_pfile $f
+
+ tail "_${i}_ext_h_" >> $i_pfile
+
+ f=../dbinc_auto/${i}_ext.h
+ cmp $i_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_pfile $f && chmod 444 $f)
done
+
+# Process directories which only add to the external prototype and #define
+# files.
+for i in dbm hsearch; do
+ f="../$i/*.c"
+ awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+done
+
+# RPC uses rpcgen to generate a header file; post-process it to add more
+# interfaces to the internal #define file.
+sed -e '/extern bool_t xdr___/{' \
+ -e 's/.* //' \
+ -e 's/();//' \
+ -e 's/.*/#define & &@DB_VERSION_UNIQUE_NAME@/' \
+ -e 'p' \
+ -e '}' \
+ -e d < ../dbinc_auto/db_server.h >> $i_dfile
+
+# There are a few globals in DB -- add them to the external/internal
+# #define files.
+(echo "#define __db_global_values __db_global_values@DB_VERSION_UNIQUE_NAME@";
+ echo "#define __db_jump __db_jump@DB_VERSION_UNIQUE_NAME@") >> $i_dfile
+(echo "#define db_xa_switch db_xa_switch@DB_VERSION_UNIQUE_NAME@") >> $e_dfile
+
+# Wrap up the external #defines/prototypes, and internal #defines.
+tail defonly _DB_EXT_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
+
+tail defonly _DB_INT_DEF_IN_ >> $i_dfile
+f=../dbinc_auto/int_def.in
+cmp $i_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $i_dfile $f && chmod 444 $f)
+
+# DB185 compatibility support.
+head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile
+head space _DB_EXT_185_PROT_IN_ > $e_pfile
+
+f="../db185/*.c"
+awk -f gen_inc.awk \
+ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
+ -v e_dfile=$e_dfile \
+ -v e_pfile=$e_pfile \
+ -v i_dfile="" \
+ -v i_pfile="" $f
+
+tail defonly _DB_EXT_185_DEF_IN_ >> $e_dfile
+f=../dbinc_auto/ext_185_def.in
+cmp $e_dfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_dfile $f && chmod 444 $f)
+
+tail _DB_EXT_185_PROT_IN_ >> $e_pfile
+f=../dbinc_auto/ext_185_prot.in
+cmp $e_pfile $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $e_pfile $f && chmod 444 $f)
diff --git a/bdb/dist/s_java b/bdb/dist/s_java
index f324678abaa..2a65da60a73 100755
--- a/bdb/dist/s_java
+++ b/bdb/dist/s_java
@@ -1,31 +1,273 @@
#!/bin/sh -
-# $Id: s_java,v 1.3 2000/07/13 18:38:46 bostic Exp $
+# $Id: s_java,v 1.13 2002/09/09 20:47:30 bostic Exp $
#
# Build the Java files.
-msgcxx="// DO NOT EDIT: automatically built by dist/s_java."
+msgjava="/* DO NOT EDIT: automatically built by dist/s_java. */"
. RELEASE
-f=../java/src/com/sleepycat/db/DbConstants.java
-echo "Building $f"
-rm -f $f
-(echo "$msgcxx" && \
+t=/tmp/__java
+c=/tmp/__javajnic
+h=/tmp/__javajnih
+trap 'rm -f $t $c $h; exit 0' 0 1 2 3 13 15
+
+# Build DbConstants.java.
+(echo "$msgjava" &&
echo &&
echo 'package com.sleepycat.db;' &&
echo &&
- echo 'public class DbConstants' &&
+ echo 'class DbConstants' &&
echo '{' &&
- egrep '^#define.DB_' ../include/db.src | \
- sed -e '/"/d' \
- -e '/@DB_VERSION_/d' \
- -e '/DB_REDO/d' \
- -e '/DB_UNDO/d' \
- -e 's/[()]/ /g' \
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/" \
+ -e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/" \
+ -e "s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/" \
+ -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
+ -e 's/\/\*/ /' | \
+ awk '{ print " static final int " $1 " = " $2 ";" }' &&
+ echo '}' &&
+ echo &&
+ echo '// end of DbConstants.java') > $t
+
+f=../java/src/com/sleepycat/db/DbConstants.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build Db.java.
+f=../java/src/com/sleepycat/db/Db.java
+sed '/BEGIN-JAVA-SPECIAL-CONSTANTS/q' < $f > $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*C$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 " = " $2 ";" }') >> $t
+(for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' |
+ awk '{ print " public static final int " $1 ";" }') >> $t
+sed -n \
+ '/END-JAVA-SPECIAL-CONSTANTS/,/BEGIN-JAVA-CONSTANT-INITIALIZATION/p' \
+ < $f >> $t
+(echo " $msgjava" &&
+ for i in `egrep '^DB_.*J$' pubdef.in | awk '{print $1}'`; do \
+ egrep -w "^#define[ ]$i|^[ ][ ]*$i" ../dbinc/db.in; \
+ done |
+ sed -e 's/^#define[ ][ ]*//' \
+ -e 's/[()=,]/ /g' \
-e 's/\/\*/ /' | \
- awk '{ print "\tpublic static final int " $2 " = " $3 ";" }' &&
- echo " public static final int DB_VERSION_MAJOR = $DB_VERSION_MAJOR;" &&
- echo " public static final int DB_VERSION_MINOR = $DB_VERSION_MINOR;" &&
- echo " public static final int DB_VERSION_PATCH = $DB_VERSION_PATCH;" &&
- echo '}') > $f
-chmod 444 $f
+ awk '{ print " " $1 " = DbConstants." $1 ";" }') >> $t
+sed -n '/END-JAVA-CONSTANT-INITIALIZATION/,$p' < $f >> $t
+
+f=../java/src/com/sleepycat/db/Db.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass()
+{
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public String \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public long \1;/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public DbLsn \1;/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/$2 public Active \1[];/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/$2 public int \1;/p" \
+ -e '}'
+}
+
+# Script to convert DB C structure declarations into Java declarations.
+jclass_jni()
+{
+ c=$3
+ h=$4
+ echo "extern int $2(JNIEnv *jnienv, jclass cl, jobject jobj, struct $1 *statp);" >> $h
+ echo "int $2(JNIEnv *jnienv, jclass cl," >> $c
+ echo " jobject jobj, struct $1 *statp) {" >> $c
+ cat ../dbinc/db.in |
+ sed -n \
+ -e "/struct $1 {/,/^}/{" \
+ -e "/$1/d" \
+ -e '/;/!d' \
+ -e '/^}/d' \
+ -e '/char[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_STRING(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/time_t/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, \1);/p" \
+ -e 'd' \
+ -e '}' \
+ -e '/DB_LSN[ ]*/{' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e '/DB_TXN_ACTIVE[ ]*\*/{' \
+ -e "s/^[ ]*[^\*]*\*[ ]*\([^;]*\).*/ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, \1);/p"\
+ -e 'd' \
+ -e '}' \
+ -e "s/^[ ]*[^ ]*[ ]*\([^;]*\).*/ JAVADB_STAT_INT(jnienv, cl, jobj, statp, \1);/p" \
+ -e '}' >> $c
+ echo ' return (0);' >> $c
+ echo '}' >> $c
+}
+
+echo "$msgjava" >> $c
+echo "$msgjava" >> $h
+echo '#include "java_util.h"' >> $c
+
+# Build DbBtreeStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbBtreeStat"
+ echo '{'
+ jclass __db_bt_stat &&
+ echo '}' &&
+ echo '// end of DbBtreeStat.java') > $t
+jclass_jni __db_bt_stat __jv_fill_bt_stat $c $h
+f=../java/src/com/sleepycat/db/DbBtreeStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbHashStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbHashStat"
+ echo '{'
+ jclass __db_h_stat &&
+ echo '}' &&
+ echo '// end of DbHashStat.java') > $t
+jclass_jni __db_h_stat __jv_fill_h_stat $c $h
+f=../java/src/com/sleepycat/db/DbHashStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLockStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLockStat"
+ echo '{'
+ jclass __db_lock_stat &&
+ echo '}' &&
+ echo '// end of DbLockStat.java') > $t
+jclass_jni __db_lock_stat __jv_fill_lock_stat $c $h
+f=../java/src/com/sleepycat/db/DbLockStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbLogStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbLogStat"
+ echo '{'
+ jclass __db_log_stat &&
+ echo '}' &&
+ echo '// end of DbLogStat.java') > $t
+jclass_jni __db_log_stat __jv_fill_log_stat $c $h
+f=../java/src/com/sleepycat/db/DbLogStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbMpoolFStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbMpoolFStat"
+ echo '{'
+ jclass __db_mpool_fstat &&
+ echo '}' &&
+ echo '// end of DbMpoolFStat.java') > $t
+jclass_jni __db_mpool_stat __jv_fill_mpool_stat $c $h
+f=../java/src/com/sleepycat/db/DbMpoolFStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbQueueStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbQueueStat"
+ echo '{'
+ jclass __db_qam_stat &&
+ echo '}' &&
+ echo '// end of DbQueueStat.java') > $t
+jclass_jni __db_qam_stat __jv_fill_qam_stat $c $h
+f=../java/src/com/sleepycat/db/DbQueueStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbRepStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbRepStat"
+ echo '{'
+ jclass __db_rep_stat &&
+ echo '}' &&
+ echo '// end of DbRepStat.java') > $t
+jclass_jni __db_rep_stat __jv_fill_rep_stat $c $h
+f=../java/src/com/sleepycat/db/DbRepStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build DbTxnStat.java
+(echo "$msgjava" &&
+ echo &&
+ echo 'package com.sleepycat.db;' &&
+ echo &&
+ echo "public class DbTxnStat"
+ echo '{'
+ echo " public static class Active {"
+ jclass __db_txn_active " " &&
+ echo ' };' &&
+ jclass __db_txn_stat &&
+ echo '}' &&
+ echo '// end of DbTxnStat.java') > $t
+jclass_jni __db_txn_stat __jv_fill_txn_stat $c $h
+f=../java/src/com/sleepycat/db/DbTxnStat.java
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $c $t
+f=../libdb_java/java_stat_auto.c
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+mv $h $t
+f=../libdb_java/java_stat_auto.h
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/bdb/dist/s_javah b/bdb/dist/s_javah
new file mode 100755
index 00000000000..480856e4b5c
--- /dev/null
+++ b/bdb/dist/s_javah
@@ -0,0 +1,55 @@
+#!/bin/sh -
+# $Id: s_javah,v 1.1 2002/08/14 17:14:24 dda Exp $
+#
+# Use javah to build the libdb_java/com_*.h header files.
+#
+# To run this, you will need a javac and javah in your PATH.
+# If possible, install tools with a recent vintage, JDK 1.3 or higher is good.
+# Using Sun's JDK rather than some other installation ensures
+# that the header files will not be constantly changed.
+
+. RELEASE
+
+JAVAC=javac
+JAVAH=javah
+export CLASSPATH
+CLASSPATH=
+
+# CLASSES are only those classes for which we have native methods.
+D=com.sleepycat.db
+CLASSES="$D.Dbc $D.DbEnv $D.Db $D.DbLock $D.DbLogc $D.DbLsn $D.Dbt $D.DbTxn $D.xa.DbXAResource"
+
+d=/tmp/__javah
+c=$d/classes
+trap 'rm -rf $d; exit 0' 0 1 2 3 13 15
+
+rm -rf $d
+mkdir $d || exit 1
+mkdir $c || exit 1
+
+# Make skeleton versions of XA classes and interfaces
+# We only need to compile them, not run them.
+pkg="package javax.transaction.xa"
+echo "$pkg; public interface XAResource {}" > $d/XAResource.java
+echo "$pkg; public interface Xid {}" > $d/Xid.java
+echo "$pkg; public class XAException extends Exception {}" \
+ > $d/XAException.java
+
+
+# Create the .class files and use them with javah to create the .h files
+${JAVAC} -d $c $d/*.java \
+ ../java/src/com/sleepycat/db/*.java \
+ ../java/src/com/sleepycat/db/xa/*.java || exit 1
+${JAVAH} -classpath $c -d $d ${CLASSES} || exit 1
+
+for cl in ${CLASSES}; do
+ h=`echo $cl | sed -e 's/\./_/g'`.h
+ t=$d/$h
+ f=../libdb_java/$h
+ if [ ! -f $t ]; then
+ echo "ERROR: $t does not exist"
+ exit 1
+ fi
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
diff --git a/bdb/dist/s_perm b/bdb/dist/s_perm
index 4b0997f2f55..03cc4a35a8a 100755
--- a/bdb/dist/s_perm
+++ b/bdb/dist/s_perm
@@ -1,37 +1,47 @@
#!/bin/sh -
-# $Id: s_perm,v 1.9 2001/01/24 15:55:38 bostic Exp $
+# $Id: s_perm,v 1.23 2002/09/09 15:03:06 bostic Exp $
+d=..
echo 'Updating Berkeley DB source tree permissions...'
run()
{
echo " $1 ($2)"
- if [ -f "../$1" ]; then
- chmod "$2" "../$1"
+ if [ -f "$d/$1" ]; then
+ chmod "$2" "$d/$1"
else
- echo "$1: no such file or directory"
+ echo "$d/$1: no such file or directory"
exit 1
fi
}
-run dist/build/chk.def 0555
-run dist/build/chk.define 0555
-run dist/build/chk.offt 0555
-run dist/build/chk.srcfiles 0555
-run dist/build/chk.tags 0555
-run dist/config.guess 0555
-run dist/config.sub 0555
-run dist/configure 0555
-run dist/install-sh 0555
-run dist/s_config 0555
-run dist/s_include 0555
-run dist/s_java 0555
-run dist/s_perm 0555
-run dist/s_readme 0555
-run dist/s_recover 0555
-run dist/s_symlink 0555
-run dist/s_tags 0555
-run dist/s_tcl 0555
-run dist/s_win32 0555
-run perl.BerkeleyDB/dbinfo 0555
-run perl.BerkeleyDB/mkpod 0555
+run build_win32/include.tcl 664
+run dist/config.guess 555
+run dist/config.sub 555
+run dist/configure 555
+run dist/install-sh 555
+run dist/s_all 555
+run dist/s_config 555
+run dist/s_include 555
+run dist/s_java 555
+run dist/s_javah 555
+run dist/s_perm 555
+run dist/s_readme 555
+run dist/s_recover 555
+run dist/s_rpc 555
+run dist/s_symlink 555
+run dist/s_tags 555
+run dist/s_test 555
+run dist/s_vxworks 555
+run dist/s_win32 555
+run dist/s_win32_dsp 555
+run dist/vx_buildcd 555
+
+run perl/BerkeleyDB/dbinfo 555
+run perl/BerkeleyDB/mkpod 555
+
+for i in `cd $d && find build_vxworks \
+ -name '*.wsp' -o -name '*.cdf' -o -name '*.wpj'`; do
+ echo " $i (775)"
+ chmod 775 $d/$i
+done
diff --git a/bdb/dist/s_readme b/bdb/dist/s_readme
index 9ff8a69bc88..229a152b8a9 100755
--- a/bdb/dist/s_readme
+++ b/bdb/dist/s_readme
@@ -1,18 +1,23 @@
#!/bin/sh -
-# $Id: s_readme,v 1.2 2000/01/27 21:42:18 bostic Exp $
+# $Id: s_readme,v 1.6 2002/02/14 02:50:26 bostic Exp $
#
# Build the README.
+d=..
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
. RELEASE
+cat << END_OF_README>$t
+$DB_VERSION_STRING
+
+This is version $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH of Berkeley DB from Sleepycat Software. To view
+the release and installation documentation, load the distribution file
+docs/index.html into your web browser.
+END_OF_README
+
f=../README
-echo "Building $f"
-rm -f $f
-(echo "$DB_VERSION_STRING" &&
- echo "" &&
- echo -n "This is version " &&
- echo -n "$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH" &&
- echo " of Berkeley DB from Sleepycat Software. To view" &&
- echo "the release and installation documentation, load the distribution file" &&
- echo "docs/index.html into your web browser.") > $f
-chmod 444 $f
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/bdb/dist/s_recover b/bdb/dist/s_recover
index b0593cfc052..331ae623d3f 100755
--- a/bdb/dist/s_recover
+++ b/bdb/dist/s_recover
@@ -1,23 +1,18 @@
#!/bin/sh -
-# $Id: s_recover,v 1.3 2000/03/30 05:24:36 krinsky Exp $
+# $Id: s_recover,v 1.14 2002/03/27 04:31:51 bostic Exp $
#
# Build the automatically generated logging/recovery files.
-DIR="db btree hash log qam txn"
+tmp=/tmp/__db_a
+loglist=/tmp/__db_b
+source=/tmp/__db_c
+header=/tmp/__db_d
+template=/tmp/__db_e
-t=/tmp/__db_$$
-loglist=../test/logtrack.list
-rm -f $t
-rm -f $loglist
+trap 'rm -f $tmp $loglist $source $header $template; exit 1' 1 2 3 13 15
+trap 'rm -f $tmp $loglist $source $header $template; exit 0' 0
-trap 'rm -f $t; exit 1' 1 2 3 13 15
-
-# Use the standard -k option if it works;
-# otherwise fall back on the traditional notation.
-if sort -k 1,1 /dev/null
-then sort_2_etc='-k 2'
-else sort_2_etc='+1'
-fi
+DIR="db dbreg btree hash qam txn"
# Check to make sure we haven't duplicated a log record entry, and build
# the list of log record types that the test suite uses.
@@ -27,38 +22,46 @@ for i in $DIR; do
# Grab the PREFIX; there should only be one per file, and
# so it's okay to just take the first.
grep '^PREFIX' $f | sed q
- egrep '^DEPRECATED[ ]|^BEGIN[ ]' $f | \
+ egrep '^BEGIN[ ]|^IGNORED[ ]|^DEPRECATED[ ]' $f |
awk '{print $1 "\t" $2 "\t" $3}'
done
done > $loglist
-grep -v '^PREFIX' $loglist | awk '{print $2 "\t" $3}' | \
- sort $sort_2_etc -n | \
- uniq -d -f 1 > $t
-[ -s $t ] && {
+grep -v '^PREFIX' $loglist |
+ awk '{print $2 "\t" $3}' | sort -n -k 2 | uniq -d -f 1 > $tmp
+[ -s $tmp ] && {
echo "DUPLICATE LOG VALUES:"
- cat $t
- rm -f $t
+ cat $tmp
+ rm -f $tmp
exit 1
}
-rm -f $t
+f=../test/logtrack.list
+cmp $loglist $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $loglist $f && chmod 444 $f)
-for i in db btree hash log qam txn; do
+# Build DB's recovery routines.
+for i in db dbreg btree fileops hash qam txn; do
for f in ../$i/*.src; do
subsystem=`basename $f .src`
- header_file=../include/${subsystem}_auto.h
- source_file=../$i/${subsystem}_auto.c
- template_file=template/rec_${subsystem}
- template_source=.
-
- echo "Building $source_file, $header_file, $template_file"
- rm -f $header_file $source_file $template_file
awk -f gen_rec.awk \
- -v subsystem=$subsystem \
- -v source_file=$source_file \
- -v header_file=$header_file \
- -v template_file=$template_file \
- -v template_dir=. < $f
- chmod 444 $header_file $source_file $template_file
+ -v source_file=$source \
+ -v header_file=$header \
+ -v template_file=$template < $f
+
+ f=../dbinc_auto/${subsystem}_auto.h
+ cmp $header $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $header $f && chmod 444 $f)
+ f=../$i/${subsystem}_auto.c
+ cmp $source $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $source $f && chmod 444 $f)
+ f=template/rec_${subsystem}
+ cmp $template $f > /dev/null 2>&1 ||
+ (echo "Building $f" &&
+ rm -f $f && cp $template $f && chmod 444 $f)
done
done
+
+# Build the example application's recovery routines.
+(cd ../examples_c/ex_apprec && sh auto_rebuild)
diff --git a/bdb/dist/s_rpc b/bdb/dist/s_rpc
index f7d9cf4eb1c..302930068ca 100644
--- a/bdb/dist/s_rpc
+++ b/bdb/dist/s_rpc
@@ -1,45 +1,58 @@
#!/bin/sh -
-# $Id: s_rpc,v 11.6 2000/04/26 19:15:51 sue Exp $
+# $Id: s_rpc,v 11.18 2002/08/15 15:22:09 bostic Exp $
#
# Build the automatically generated RPC files
echo "Building RPC client/server files"
+. ./RELEASE
+
+t=/tmp/__db_a
+trap 'rm -f $t ; exit 0' 0
+trap 'rm -f $t ; exit 1' 1 2 3 13 15
+
client_file=../rpc_client/gen_client.c
-cproto_file=../include/gen_client_ext.h
ctmpl_file=./template/gen_client_ret
-header_file=../include/db_server.h
+dbinc_file=../dbinc/db.in
+defs_file=../dbinc_auto/rpc_defs.in
+header_file=../dbinc_auto/db_server.h
+proc_file=../rpc_server/c/db_server_proc.c
rpcclnt_file=../rpc_client/db_server_clnt.c
-rpcsvc_file=../rpc_server/db_server_svc.c
-rpcxdr_file=../rpc_server/db_server_xdr.c
-sed_file=../rpc_server/db_server_proc.sed
-server_file=../rpc_server/gen_db_server.c
-sproto_file=../include/gen_server_ext.h
+rpcsvc_file=../rpc_server/c/db_server_svc.c
+rpcxdr_file=../rpc_server/c/db_server_xdr.c
+sed_file=../rpc_server/c/db_server_proc.sed
+server_file=../rpc_server/c/gen_db_server.c
stmpl_file=./template/db_server_proc
xdr_file=../rpc_server/db_server.x
#
-# NOTE: We do NOT want to remove proc_file. It is what we apply
-# sed_file to, but we do not want to remove it.
-#
-proc_file=../rpc_server/db_server_proc.c
-svrsed_file=../rpc_server/db_server_svc.sed
-rpcsed_file=../rpc_server/db_server.sed
-
-rm -f $client_file $cproto_file $ctmpl_file $header_file $rpcsvc_file \
- $rpcclnt_file $rpcxdr_file $sed_file $server_file $sproto_file \
- $stmpl_file $xdr_file
+# NOTE: We do NOT want to remove proc_file. It is what we apply $sed_file
+# to, but we do not want to remove it, it does not get built in place.
+rm -f $client_file \
+ $ctmpl_file \
+ $header_file \
+ $rpcclnt_file \
+ $rpcsvc_file \
+ $rpcxdr_file \
+ $sed_file \
+ $server_file \
+ $stmpl_file \
+ $xdr_file
#
# Generate client/server/XDR code
#
+xidsize=\
+`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file`
+
awk -f gen_rpc.awk \
+ -v major=$DB_VERSION_MAJOR \
+ -v minor=$DB_VERSION_MINOR \
+ -v xidsize=$xidsize \
-v client_file=$client_file \
- -v cproto_file=$cproto_file \
-v ctmpl_file=$ctmpl_file \
-v sed_file=$sed_file \
-v server_file=$server_file \
- -v sproto_file=$sproto_file \
-v stmpl_file=$stmpl_file \
-v xdr_file=$xdr_file < ../rpc_server/rpc.src
chmod 444 $client_file $server_file
@@ -52,31 +65,73 @@ rpcgen -l $xdr_file > $rpcclnt_file
rpcgen -s tcp $xdr_file > $rpcsvc_file
rpcgen -c $xdr_file > $rpcxdr_file
-# Run server files through sed
-sed -f $svrsed_file $rpcsvc_file > ${rpcsvc_file}.new
-mv ${rpcsvc_file}.new $rpcsvc_file
-#
-# This is a hack to handle the $proc_file special case. Since it
-# is both a source file and a generated file, we have to dance a
-# bit to get it to work with 'bk get', not just with 'bk edit'.
-# This still isn't perfect (I don't know what perfect would be in
-# this case), but it seems to work for now.
#
-#sed -f $sed_file $proc_file > ${proc_file}.new
-#mv ${proc_file}.new $proc_file
+# Run various server files through sed.
#
-mv $proc_file ${proc_file}.old
-sed -f $sed_file ${proc_file}.old > $proc_file
-test -w ${proc_file}.old || chmod a-w $proc_file
-rm -f ${proc_file}.old
-
-# Run rpcgen files through sed to add HAVE_RPC ifdef
-sed -f $rpcsed_file $rpcsvc_file > ${rpcsvc_file}.new
+cat <<ENDOFSEDTEXT>$t
+s/^#include[ ]"db_server.h"/#include "db_config.h"\\
+\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc\\/rpc.h>\\
+\\#include <rpc\\/pmap_clnt.h>/
+/^#include <netinet.in.h>/a\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"\\
+\\#include "dbinc/db_server_int.h"\\
+\\#include "dbinc_auto/rpc_server_ext.h"
+/^ return;/i\\
+\\ __dbsrv_timeout(0);
+s/svc_sendreply(transp, xdr_void,/svc_sendreply(transp, (xdrproc_t)xdr_void,/
+s/svc_getargs(transp, xdr_argument, &argument)/svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/svc_sendreply(transp, xdr_result, result)/svc_sendreply(transp, (xdrproc_t)xdr_result, result)/
+s/svc_freeargs(transp, xdr_argument, &argument)/svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)\&argument)/
+s/^main/void __dbsrv_main/
+ENDOFSEDTEXT
+sed -f $t $rpcsvc_file > ${rpcsvc_file}.new
mv ${rpcsvc_file}.new $rpcsvc_file
-sed -f $rpcsed_file $rpcxdr_file > ${rpcxdr_file}.new
+
+sed -f $sed_file $proc_file > ${proc_file}.new
+mv ${proc_file}.new $proc_file
+
+# Run rpcgen files through sed to add HAVE_RPC ifdef and appropriate
+# includes.
+cat <<ENDOFSEDTEXT>$t
+1i\\
+\\#include "db_config.h"\\
+\\
+\\#ifdef HAVE_RPC
+/^#include "db_server.h"/c\\
+\\#ifndef NO_SYSTEM_INCLUDES\\
+\\#include <rpc/rpc.h>\\
+\\
+\\#include <strings.h>\\
+\\#endif\\
+\\
+\\#include "db_int.h"\\
+\\#include "dbinc_auto/db_server.h"
+\$a\\
+\\#endif /* HAVE_RPC */
+ENDOFSEDTEXT
+
+sed -f $t $rpcxdr_file > ${rpcxdr_file}.new
mv ${rpcxdr_file}.new $rpcxdr_file
-sed -f $rpcsed_file $rpcclnt_file > ${rpcclnt_file}.new
+sed -f $t $rpcclnt_file > ${rpcclnt_file}.new
mv ${rpcclnt_file}.new $rpcclnt_file
-chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file
+# Copy the DB_RPC SERVER #defines into a separate file so
+# they can be part of db.h.
+msgc="/* DO NOT EDIT: automatically built by dist/s_rpc. */"
+(echo "" && echo "$msgc" &&
+ sed -n -e "/DB_RPC_SERVER/p" $header_file) > $defs_file
+
+# Fix up the header file:
+# Remove the DB_RPC_SERVER #defines.
+# Remove the <rpc/rpc.h> include, it needs to be included earlier
+# than that.
+sed -e "/DB_RPC_SERVER/d"\
+ -e "/^#include.*<rpc\/rpc.h>/d" $header_file > ${header_file}.new
+mv ${header_file}.new $header_file
+chmod 444 $header_file $rpcclnt_file $rpcsvc_file $rpcxdr_file
diff --git a/bdb/dist/s_symlink b/bdb/dist/s_symlink
index 70efa445521..e69bb57dc46 100755
--- a/bdb/dist/s_symlink
+++ b/bdb/dist/s_symlink
@@ -1,5 +1,5 @@
#!/bin/sh -
-# $Id: s_symlink,v 1.9 2000/09/05 21:27:04 bostic Exp $
+# $Id: s_symlink,v 1.28 2002/08/18 21:15:45 bostic Exp $
echo 'Creating Berkeley DB source tree symbolic links...'
@@ -13,6 +13,7 @@ build btree/tags ../dist/tags
build build_unix/tags ../dist/tags
build clib/tags ../dist/tags
build common/tags ../dist/tags
+build crypto/tags ../dist/tags
build cxx/tags ../dist/tags
build db/tags ../dist/tags
build db185/tags ../dist/tags
@@ -27,54 +28,18 @@ build db_recover/tags ../dist/tags
build db_stat/tags ../dist/tags
build db_upgrade/tags ../dist/tags
build db_verify/tags ../dist/tags
+build dbinc/tags ../dist/tags
+build dbinc_auto/tags ../dist/tags
build dbm/tags ../dist/tags
-build docs_src/api_cxx/Makefile ../api_c/Makefile
-build docs_src/api_cxx/m4.seealso ../api_c/m4.seealso
-build docs_src/api_cxx/spell.ok ../api_c/spell.ok
-build docs_src/api_java/Makefile ../api_c/Makefile
-build docs_src/api_java/java_index.so ../api_cxx/cxx_index.so
-build docs_src/api_java/m4.seealso ../api_c/m4.seealso
-build docs_src/api_java/spell.ok ../api_c/spell.ok
-build docs_src/api_tcl/spell.ok ../api_c/spell.ok
-build docs_src/ref/am/spell.ok ../spell.ok
-build docs_src/ref/am_conf/spell.ok ../spell.ok
-build docs_src/ref/arch/spell.ok ../spell.ok
-build docs_src/ref/build_unix/spell.ok ../spell.ok
-build docs_src/ref/build_vxworks/spell.ok ../spell.ok
-build docs_src/ref/build_win/spell.ok ../spell.ok
-build docs_src/ref/cam/spell.ok ../spell.ok
-build docs_src/ref/debug/spell.ok ../spell.ok
-build docs_src/ref/distrib/spell.ok ../spell.ok
-build docs_src/ref/dumpload/spell.ok ../spell.ok
-build docs_src/ref/env/spell.ok ../spell.ok
-build docs_src/ref/install/spell.ok ../spell.ok
-build docs_src/ref/intro/spell.ok ../spell.ok
-build docs_src/ref/java/spell.ok ../spell.ok
-build docs_src/ref/lock/spell.ok ../spell.ok
-build docs_src/ref/log/spell.ok ../spell.ok
-build docs_src/ref/mp/spell.ok ../spell.ok
-build docs_src/ref/perl/spell.ok ../spell.ok
-build docs_src/ref/program/spell.ok ../spell.ok
-build docs_src/ref/refs/spell.ok ../spell.ok
-build docs_src/ref/rpc/spell.ok ../spell.ok
-build docs_src/ref/sendmail/spell.ok ../spell.ok
-build docs_src/ref/simple_tut/spell.ok ../spell.ok
-build docs_src/ref/tcl/spell.ok ../spell.ok
-build docs_src/ref/test/spell.ok ../spell.ok
-build docs_src/ref/transapp/spell.ok ../spell.ok
-build docs_src/ref/txn/spell.ok ../spell.ok
-build docs_src/ref/upgrade.2.0/spell.ok ../spell.ok
-build docs_src/ref/upgrade.3.0/spell.ok ../spell.ok
-build docs_src/ref/upgrade.3.1/spell.ok ../spell.ok
-build docs_src/ref/upgrade.3.2/spell.ok ../spell.ok
-build docs_src/ref/xa/spell.ok ../spell.ok
+build dbreg/tags ../dist/tags
build env/tags ../dist/tags
build examples_c/tags ../dist/tags
build examples_cxx/tags ../dist/tags
build examples_java java/src/com/sleepycat/examples
+build fileops/tags ../dist/tags
build hash/tags ../dist/tags
+build hmac/tags ../dist/tags
build hsearch/tags ../dist/tags
-build include/tags ../dist/tags
build libdb_java/tags ../dist/tags
build lock/tags ../dist/tags
build log/tags ../dist/tags
@@ -84,8 +49,10 @@ build os/tags ../dist/tags
build os_vxworks/tags ../dist/tags
build os_win32/tags ../dist/tags
build qam/tags ../dist/tags
+build rep/tags ../dist/tags
build rpc_client/tags ../dist/tags
build rpc_server/tags ../dist/tags
build tcl/tags ../dist/tags
+build test_server/tags ../dist/tags
build txn/tags ../dist/tags
build xa/tags ../dist/tags
diff --git a/bdb/dist/s_tags b/bdb/dist/s_tags
index 834600b9fb1..1c0be7b9e0f 100755
--- a/bdb/dist/s_tags
+++ b/bdb/dist/s_tags
@@ -1,31 +1,39 @@
#!/bin/sh -
-# $Id: s_tags,v 1.5 2000/09/05 21:27:04 bostic Exp $
+# $Id: s_tags,v 1.16 2002/03/28 20:13:07 krinsky Exp $
#
# Build tags files.
-files="../include/*.h \
- ../include/*.src \
+files="../dbinc/*.h \
+ ../dbinc/*.in \
../btree/*.[ch] \
../clib/*.[ch] \
../common/*.[ch] \
+ ../crypto/*.[ch] \
+ ../crypto/mersenne/*.[ch] \
+ ../crypto/rijndael/*.[ch] \
../db/*.[ch] \
../db185/*.[ch] \
../dbm/*.[ch] \
+ ../dbreg/*.[ch] \
../env/*.[ch] \
+ ../fileops/*.[ch] \
../hash/*.[ch] \
+ ../hmac/*.[ch] \
../hsearch/*.[ch] \
- ../libdb_java/*.[ch] \
../lock/*.[ch] \
../log/*.[ch] \
../mp/*.[ch] \
../mutex/*.[ch] \
../os/*.[ch] \
../qam/*.[ch] \
+ ../rep/*.[ch] \
../rpc_client/*.[ch] \
- ../rpc_server/*.[ch] \
+ ../rpc_server/c/*.[ch] \
../tcl/*.[ch] \
../txn/*.[ch] \
- ../xa/*.[ch]"
+ ../xa/*.[ch] \
+ ../cxx/*.cpp \
+ ../libdb_java/*.[ch]"
f=tags
echo "Building $f"
@@ -45,3 +53,8 @@ fi
ctags $flags $files 2>/dev/null
chmod 444 $f
+
+f=../test_perf/tags
+echo "Building $f"
+(cd ../test_perf && ctags $flags *.[ch] 2>/dev/null)
+chmod 444 $f
diff --git a/bdb/dist/s_tcl b/bdb/dist/s_tcl
deleted file mode 100755
index 7350e9a49e9..00000000000
--- a/bdb/dist/s_tcl
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/sh -
-# $Id: s_tcl,v 1.14 2000/11/09 19:24:07 sue Exp $
-#
-# Build the Tcl test files.
-
-msgshb="# DO NOT EDIT BELOW THIS LINE: automatically built by dist/s_tcl."
-
-. RELEASE
-
-f=../test/include.tcl
-echo "Building $f"
-rm -f $f
-(echo "set tclsh_path @TCL_TCLSH@" && \
- echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@SOSUFFIX@" && \
- echo "set rpc_server localhost" && \
- echo "set rpc_path ." && \
- echo "set test_path @srcdir@/../test" && \
- echo "" && \
- echo "set KILL \"@db_cv_path_kill@\"" && \
- echo "" && \
- echo "$msgshb" && \
- echo "" && \
- echo "global dict" && \
- echo "global testdir" && \
- echo "global util_path" && \
- echo "set testdir ./TESTDIR" && \
- echo "set rpc_testdir \$rpc_path/TESTDIR" && \
- echo "" && \
- echo "global is_hp_test" && \
- echo "global is_qnx_test" && \
- echo "global is_windows_test") > $f
-chmod 444 $f
-
-f=../build_win32/include.tcl
-echo "Building $f"
-rm -f $f
-(echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
- echo "set test_path ../test" && \
- echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
- echo "" && \
- echo "set KILL ./dbkill.exe" && \
- echo "" && \
- echo "$msgshb" && \
- echo "" && \
- echo "global dict" && \
- echo "global testdir" && \
- echo "global util_path" && \
- echo "set testdir ./TESTDIR" && \
- echo "" && \
- echo "global is_hp_test" && \
- echo "global is_qnx_test" && \
- echo "global is_windows_test") > $f
-chmod 444 $f
diff --git a/bdb/dist/s_test b/bdb/dist/s_test
new file mode 100755
index 00000000000..266f27a743f
--- /dev/null
+++ b/bdb/dist/s_test
@@ -0,0 +1,92 @@
+#!/bin/sh -
+# $Id: s_test,v 1.24 2002/08/09 02:24:58 bostic Exp $
+#
+# Build the Tcl test files.
+
+msg1="# Automatically built by dist/s_test; may require local editing."
+msg2="# Automatically built by dist/s_test; may require local editing."
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. RELEASE
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path @TCL_TCLSH@" && \
+ echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@MODSUFFIX@" && \
+ echo "" && \
+ echo "set rpc_server localhost" && \
+ echo "set rpc_path ." && \
+ echo "set rpc_testdir \$rpc_path/TESTDIR" && \
+ echo "" && \
+ echo "set src_root @srcdir@/.." && \
+ echo "set test_path @srcdir@/../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL \"@db_cv_path_kill@\"") > $t
+
+f=../test/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(echo "$msg1" && \
+ echo "" && \
+ echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
+ echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
+ echo "" && \
+ echo "set src_root .." && \
+ echo "set test_path ../test" && \
+ echo "" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test" && \
+ echo "" && \
+ echo "set KILL ./dbkill.exe") > $t
+
+f=../build_win32/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build the test directory TESTS file.
+(echo $msg2;
+cat `egrep -l '^#[ ][ ]*TEST' ../test/*.tcl` |
+sed -e '/^#[ ][ ]*TEST/!{' \
+ -e 's/.*//' \
+ -e '}' |
+cat -s |
+sed -e '/TEST/{' \
+ -e 's/^#[ ][ ]*TEST[ ]*//' \
+ -e 's/^ //' \
+ -e 'H' \
+ -e 'd' \
+ -e '}' \
+ -e 's/.*//' \
+ -e x \
+ -e 's/\n/__LINEBREAK__/g' |
+sort |
+sed -e 's/__LINEBREAK__/\
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\
+/' \
+ -e 's/__LINEBREAK__/\
+ /g' |
+sed -e 's/^[ ][ ]*$//') > $t
+
+f=../test/TESTS
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/bdb/dist/s_vxworks b/bdb/dist/s_vxworks
index edf058df7ee..b7cf785f78b 100644
--- a/bdb/dist/s_vxworks
+++ b/bdb/dist/s_vxworks
@@ -1,5 +1,5 @@
#!/bin/sh -
-# $Id: s_vxworks,v 1.3 2000/07/13 18:38:46 bostic Exp $
+# $Id: s_vxworks,v 1.37 2002/08/19 21:27:06 bostic Exp $
#
# Build the VxWorks files.
@@ -7,15 +7,26 @@ msgc="/* DO NOT EDIT: automatically built by dist/s_vxworks. */"
. RELEASE
-t=/tmp/__db_$$
-rm -f $t
+s=/tmp/__db_a
+t=/tmp/__db_b
+vxfilelist=/tmp/__db_c
-trap 'rm -f $t ; exit 1' 1 2 3 13 15
+trap 'rm -f $s $t $vxfilelist ; exit 0' 0
+trap 'rm -f $s $t $vxfilelist ; exit 1' 1 2 3 13 15
+# Build the VxWorks automatically generated files.
f=../build_vxworks/db.h
-echo "Building $f"
-rm -f $f
-cat <<ENDOFSEDTEXT > $t
+cat <<ENDOFSEDTEXT > $s
+/extern "C" {/{
+n
+n
+i\\
+\\
+/* Tornado 2 does not provide a standard C pre-processor #define. */\\
+#ifndef __vxworks\\
+#define __vxworks\\
+#endif
+}
s/@u_int8_decl@/typedef unsigned char u_int8_t;/
s/@u_int16_decl@/typedef unsigned short u_int16_t;/
s/@u_int32_decl@/typedef unsigned int u_int32_t;/
@@ -30,19 +41,284 @@ s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
ENDOFSEDTEXT
-(echo "$msgc" && sed -f $t ../include/db.src) > $f
-chmod 444 $f
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
f=../build_vxworks/db_int.h
-echo "Building $f"
-rm -f $f
-cat <<ENDOFSEDTEXT > $t
+cat <<ENDOFSEDTEXT > $s
s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\/\\\\\\\\\\"/
s/@db_align_t_decl@/typedef unsigned long db_align_t;/
s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
ENDOFSEDTEXT
-(echo "$msgc" && sed -f $t ../include/db_int.src) > $f
-chmod 444 $f
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-rm -f $t
+f=../build_vxworks/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" vx_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build a sed script that will change a "standard" DB utility into
+# VxWorks-compatible code.
+transform()
+{
+ # Build a sed script that will add argument parsing support and
+ # rename all of the functions to be private to this file.
+cat <<ENDOFSEDTEXT
+/^main(argc, argv)$/{
+i\\
+$1(args)\\
+\\ char *args;\\
+{\\
+\\ int argc;\\
+\\ char **argv;\\
+\\
+\\ __db_util_arg("$1", args, &argc, &argv);\\
+\\ return ($1_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);\\
+}\\
+\\
+#include <stdio.h>\\
+#define ERROR_RETURN ERROR\\
+\\
+int\\
+$1_main(argc, argv)
+d
+}
+/^ while ((ch = getopt/i\\
+\\ __db_getopt_reset = 1;
+/^[ ]*extern int optind;/s/;/, __db_getopt_reset;/
+ENDOFSEDTEXT
+
+ # Replace all function names with VxWorks safe names.
+ # Function names are:
+ # Tokens starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ # Replace:
+ # Matches preceded by a non-C-token character and immediately
+ # followed by an opening parenthesis.
+ # Matches preceded by a non-C-token character and immediately
+ # followed by " __P".
+ # Matches starting at the beginning of the line, immediately
+ # followed by an opening parenthesis.
+ for k in `sed -e 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)(.*$/\1/p' -e d $2`; do
+ echo "s/\([^a-zA-Z0-9_]\)\($k(\)/\1$1_\2/g"
+ echo "s/\([^a-zA-Z0-9_]\)\($k[ ]__P\)/\1$1_\2/g"
+ echo "s/^\($k(\)/$1_\1/g"
+ done
+
+ # There is a special case the rules above don't catch:
+ # a txn_compare function used as an argument to qsort(3).
+ # a print_app_record function used as argument to
+ # dbenv->set_app_dispatch).
+ echo "s/, txn_compare);/, db_stat_txn_compare);/"
+ echo "s/, print_app_record)) /, db_printlog_print_app_record)) /"
+
+ # We convert the ex_access sample into dbdemo for VxWorks.
+ echo 's/progname = "ex_access";/progname = "dbdemo";/'
+
+ # The example programs have to load db_int.h, not db.h -- else
+ # they won't have the right Berkeley DB prototypes for getopt
+ # and friends.
+ echo '/#include.*db.h/c\'
+ echo '#include <db_config.h>\'
+ echo '#include <db_int.h>'
+}
+
+PROGRAM_LIST="db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify ex_access"
+
+# Build VxWorks versions of the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ transform $target $dir/$i.c > $s
+ sed -f $s < $dir/$i.c > $t
+
+ f=../build_vxworks/$target/$target.c
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build VxWorks Tornado 2.0 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ sed "s/__DB_APPLICATION_NAME__/$target/g" < vx_2.0/wpj.in > $t
+ f=../build_vxworks/$target/$target.wpj
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+done
+
+# Build the list of files VxWorks knows about.
+sed -e '/^$/d' -e '/^[ #]/d' srcfiles.in |
+ egrep -w vx |
+ sed 's/[ ].*//' > $vxfilelist
+
+# Build VxWorks Tornado 2.0 project files for the library itself.
+(cat vx_2.0/BerkeleyDB.wpj
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> FILE_\$(PRJ_DIR)/../$i"
+ echo "${o}_dependDone"
+ echo "TRUE"
+ echo "<END>"
+ echo
+ echo "${o}_dependencies"
+ echo "\$(PRJ_DIR)/db_config.h \\"
+ echo " \$(PRJ_DIR)/db_int.h \\"
+ echo " \$(PRJ_DIR)/db.h"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_tool"
+ echo "C/C++ compiler"
+ echo "<END>"
+ echo
+done
+echo "<BEGIN> PROJECT_FILES"
+sed -e '$!s/$/ \\/' \
+ -e 's/^/$(PRJ_DIR)\/..\//' \
+ -e '1!s/^/ /' < $vxfilelist
+echo "<END>"
+echo
+echo "<BEGIN> userComments"
+echo "BerkeleyDB"
+echo "<END>") > $t
+f=../build_vxworks/BerkeleyDB.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build VxWorks Tornado 3.1 project files for the utilities.
+for i in $PROGRAM_LIST; do
+ if [ $i = "ex_access" ]; then
+ target=dbdemo
+ dir=../examples_c
+ else
+ target=$i
+ dir=../$i
+ fi
+
+ cp vx_3.1/Makefile.custom $t
+ f=../build_vxworks/$target/$target/Makefile.custom
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+ for j in component.cdf component.wpj; do
+ #
+ # Some parts of the component files needs to have the
+ # name in all capitals. Sigh.
+ #
+ z=`echo $target | tr "a-z" "A-Z"`
+ sed -e "s/__DB_APPLICATION_NAME__/$target/g" \
+ -e "s/__DB_CAPAPPL_NAME__/$z/g" < vx_3.1/$j > $t
+ f=../build_vxworks/$target/$target/$j
+ cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+ done
+done
+
+# Build VxWorks Tornado 3.1 project files for the library itself.
+cp vx_3.1/Makefile.custom $t
+f=../build_vxworks/BerkeleyDB/Makefile.custom
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/cdf.1
+echo -n " MODULES"
+for i in `cat $vxfilelist`; do
+ echo " `basename $i .c`.o"
+done | sort | sed -e '$!s/$/ \\/'
+cat vx_3.1/cdf.2
+for i in `cat $vxfilelist`; do
+ b="`basename $i .c`.o"
+ echo "Module $b {"
+ echo
+ echo " NAME $b"
+ echo " SRC_PATH_NAME \$(PRJ_DIR)/../../$i"
+ echo "}"
+ echo
+done
+cat vx_3.1/cdf.3)> $t
+f=../build_vxworks/BerkeleyDB/component.cdf
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(cat vx_3.1/wpj.1
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.2
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUM2gnu.release_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.3
+for i in `cat $vxfilelist`; do
+ o="<BEGIN> BUILD_PENTIUMgnu.debug_FILE_\$(PRJ_DIR)/../../$i"
+ echo "${o}_infoTags"
+ echo "toolMacro objects"
+ echo "<END>"
+ echo
+ echo "${o}_objects"
+ echo "`basename $i .c`.o"
+ echo "<END>"
+ echo
+ echo "${o}_toolMacro"
+ echo "CC"
+ echo "<END>"
+ echo
+done
+cat vx_3.1/wpj.4
+sort $vxfilelist |
+sed -e 's/^/$(PRJ_DIR)\/..\/..\//' \
+ -e '1!s/^/ /' \
+ -e '$!s/$/ \\/'
+cat vx_3.1/wpj.5) > $t
+f=../build_vxworks/BerkeleyDB/component.wpj
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/bdb/dist/s_win32 b/bdb/dist/s_win32
index f989a615e48..78814ababa1 100755
--- a/bdb/dist/s_win32
+++ b/bdb/dist/s_win32
@@ -1,21 +1,22 @@
#!/bin/sh -
-# $Id: s_win32,v 1.9 2000/09/20 15:29:54 bostic Exp $
+# $Id: s_win32,v 1.25 2002/05/20 19:18:14 bostic Exp $
#
# Build Windows/32 include files.
msgc="/* DO NOT EDIT: automatically built by dist/s_win32. */"
+msgw="; DO NOT EDIT: automatically built by dist/s_win32."
. RELEASE
-t=/tmp/__db_$$
-rm -f $t
+s=/tmp/__db_a$$
+t=/tmp/__db_b$$
+rm -f $s $t
-trap 'rm -f $t ; exit 1' 1 2 3 13 15
+trap 'rm -f $s $t ; exit 1' 1 2 3 13 15
+# Build the Win32 automatically generated files.
f=../build_win32/db.h
-echo "Building $f"
-rm -f $f
-cat <<ENDOFSEDTEXT > $t
+cat <<ENDOFSEDTEXT > $s
s/@u_int8_decl@/typedef unsigned char u_int8_t;/
s/@int16_decl@/typedef short int16_t;/
s/@u_int16_decl@/typedef unsigned short u_int16_t;/
@@ -23,7 +24,7 @@ s/@int32_decl@/typedef int int32_t;/
s/@u_int32_decl@/typedef unsigned int u_int32_t;/
/@u_char_decl@/{
i\\
- #if !defined(_WINSOCKAPI_)
+#if !defined(_WINSOCKAPI_)
s/@u_char_decl@/typedef unsigned char u_char;/
}
s/@u_short_decl@/typedef unsigned short u_short;/
@@ -31,37 +32,77 @@ s/@u_int_decl@/typedef unsigned int u_int;/
/@u_long_decl@/{
s/@u_long_decl@/typedef unsigned long u_long;/
a\\
- #endif
+#endif
+}
+/@ssize_t_decl@/{
+ i\\
+#if defined(_WIN64)\\
+typedef __int64 ssize_t;\\
+#else\\
+typedef int ssize_t;\\
+#endif
+ d
}
-s/@ssize_t_decl@/typedef int ssize_t;/
s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/
s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/
s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/
s/@DB_VERSION_STRING@/"$DB_VERSION_STRING"/
+s/@DB_VERSION_UNIQUE_NAME@//
+ENDOFSEDTEXT
+(echo "$msgc" &&
+ sed -f $s ../dbinc/db.in &&
+ cat ../dbinc_auto/rpc_defs.in &&
+ cat ../dbinc_auto/ext_prot.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_cxx.h
+cat <<ENDOFSEDTEXT > $s
+s/@cxx_have_stdheaders@/#define HAVE_CXX_STDHEADERS 1/
ENDOFSEDTEXT
-(echo "$msgc" && sed -f $t ../include/db.src) > $f
-chmod 444 $f
+(echo "$msgc" && sed -f $s ../dbinc/db_cxx.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
f=../build_win32/db_int.h
-echo "Building $f"
-rm -f $f
-cat <<ENDOFSEDTEXT > $t
+cat <<ENDOFSEDTEXT > $s
s/\(PATH_SEPARATOR[^"]*"\)\/"/\1\\\\\\\\\\/:\"/
s/@db_align_t_decl@/typedef unsigned long db_align_t;/
s/@db_alignp_t_decl@/typedef unsigned long db_alignp_t;/
+s/@db_int_def@//
ENDOFSEDTEXT
-(echo "$msgc" && sed -f $t ../include/db_int.src) > $f
-chmod 444 $f
+(echo "$msgc" && sed -f $s ../dbinc/db_int.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/db_config.h
+(echo "$msgc" && sed "s/__EDIT_DB_VERSION__/$DB_VERSION/" win_config.in) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
f=../build_win32/libdb.rc
-echo "Building $f"
-rm -f $f
-cat <<ENDOFSEDTEXT > $t
+cat <<ENDOFSEDTEXT > $s
s/%MAJOR%/$DB_VERSION_MAJOR/
s/%MINOR%/$DB_VERSION_MINOR/
s/%PATCH%/$DB_VERSION_PATCH/
ENDOFSEDTEXT
-sed -f $t ../build_win32/libdbrc.src > $f
-chmod 444 $f
+sed -f $s ../build_win32/libdbrc.src > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+f=../build_win32/libdb.def
+(echo $msgw &&
+ echo &&
+ echo \
+ "DESCRIPTION 'Berkeley DB $DB_VERSION_MAJOR.$DB_VERSION_MINOR Library'" &&
+ echo &&
+ echo EXPORTS;
+a=1
+for i in `sed -e '/^$/d' -e '/^#/d' win_exports.in`; do
+ echo " $i @$a"
+ a=`expr $a + 1`
+done) > $t
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
-rm -f $t
+rm -f $s $t
diff --git a/bdb/dist/s_win32_dsp b/bdb/dist/s_win32_dsp
index 8abee7c1a61..3b0bef831ba 100644
--- a/bdb/dist/s_win32_dsp
+++ b/bdb/dist/s_win32_dsp
@@ -1,5 +1,5 @@
#!/bin/sh -
-# $Id: s_win32_dsp,v 1.3 2000/12/02 04:36:47 dda Exp $
+# $Id: s_win32_dsp,v 1.8 2002/03/26 23:37:55 bostic Exp $
#
# Build Windows/32 .dsp files.
@@ -18,7 +18,6 @@ create_dsp()
dspoutput=$BUILDDIR/$projname.dsp
- echo "Building $dspoutput"
rm -f $dspoutput.insert
for srcpath in `egrep "$match" $sources | sed -e 's/[ ].*//'`
do
@@ -42,22 +41,26 @@ create_dsp()
-e "s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g" \
-e "s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g" \
< $dsptemplate > $dspoutput.new
- rm -f $dspoutput $dspoutput.insert
- mv $dspoutput.new $dspoutput
+
+ # Set the file mode to 644 because the VC++ IDE needs a writeable file
+ # in our development environment.
+ cmp $dspoutput.new $dspoutput > /dev/null 2>&1 ||
+ (echo "Building $dspoutput" && rm -f $dspoutput &&
+ cp $dspoutput.new $dspoutput && chmod 664 $dspoutput)
+ rm -f $dspoutput.insert $dspoutput.new
}
TMPA=/tmp/swin32dsp$$a
-trap "rm -f $TMPA; exit 1" 1 2 3 15
+trap "rm -f $TMPA; exit 1" 1 2 3 15
-# create a copy of the srcfiles with comments and 'skip' lines removed.
+# create a copy of the srcfiles with comments and empty lines removed.
# add a space at the end of each list of modules so that each module
# can be unambiguously matched e.g. ' dynamic '
-#
sed -e "s/#.*$//" \
-e "/^[ ]*$/d" \
-e "s/[ ][ ]*/ /" \
-e "s/[ ]*$//" \
- -e "/ skip$/d" \
+ -e "/[ ]/!d" \
-e "s/$/ /" < $SRCFILES > $TMPA
# get a list of all modules mentioned
@@ -80,6 +83,10 @@ do
create_dsp db_tcl " $module " $TMPA \
$BUILDDIR/tcl_dsp.src $BUILDDIR/srcfile_dsp.src
;;
+ testutil )
+ create_dsp db_test " $module " $TMPA \
+ $BUILDDIR/db_test.src $BUILDDIR/srcfile_dsp.src
+ ;;
static )
create_dsp db_static " $module " $TMPA \
$BUILDDIR/static_dsp.src $BUILDDIR/srcfile_dsp.src
@@ -89,6 +96,8 @@ do
create_dsp $appname " $module " $TMPA \
$BUILDDIR/app_dsp.src $BUILDDIR/srcfile_dsp.src
;;
+ vx )
+ ;;
* )
echo "s_win32_dsp: module name $module in $SRCFILES is unknown type"
;;
diff --git a/bdb/dist/srcfiles.in b/bdb/dist/srcfiles.in
index bfc564e13bb..54aeea0c1bc 100644
--- a/bdb/dist/srcfiles.in
+++ b/bdb/dist/srcfiles.in
@@ -1,8 +1,8 @@
-# $Id: srcfiles.in,v 1.7 2000/11/30 18:42:21 dda Exp $
+# $Id: srcfiles.in,v 1.59 2002/08/29 14:22:21 margo Exp $
#
-# This is an input file for the s_win32_dsp script. It describes every
-# source files used by Windows, and lists those that aren't as well,
-# as a completeness sanity check.
+# This is an input file for the s_win32_dsp and s_vxworks scripts. It lists
+# the source files in the Berkeley DB tree and notes which are used to build
+# the Win/32 and VxWorks libraries.
#
# Please keep this list sorted alphabetically!
#
@@ -11,259 +11,322 @@
#
# The possible modules, including the name of the project (.dsp) file:
#
-# app=NAME this is linked into application NAME.exe (db_NAME.dsp)
-# dynamic file appears in the DLL (db_dll.dsp)
-# java file appears in the java DLL (db_java.dsp)
-# skip this file is not used by Windows
-# static file appears in the static library (db_static.dsp)
-# tcl file appears in the tcl DLL (db_tcl.dsp)
+# app=NAME Linked into application NAME.exe (db_NAME.dsp)
+# dynamic File is in the Windows DLL (db_dll.dsp)
+# java File is in the Windows Java DLL (db_java.dsp)
+# static File is in the Windows static library (db_static.dsp)
+# tcl File is in the Windows tcl DLL (db_tcl.dsp)
+# testutil File is used for Windows testing (db_test.dsp)
+# vx File is in the VxWorks library.
-btree/bt_compare.c dynamic static
-btree/bt_conv.c dynamic static
-btree/bt_curadj.c dynamic static
-btree/bt_cursor.c dynamic static
-btree/bt_delete.c dynamic static
-btree/bt_method.c dynamic static
-btree/bt_open.c dynamic static
-btree/bt_put.c dynamic static
-btree/bt_rec.c dynamic static
-btree/bt_reclaim.c dynamic static
-btree/bt_recno.c dynamic static
-btree/bt_rsearch.c dynamic static
-btree/bt_search.c dynamic static
-btree/bt_split.c dynamic static
-btree/bt_stat.c dynamic static
-btree/bt_upgrade.c dynamic static
-btree/bt_verify.c dynamic static
-btree/btree_auto.c dynamic static
-build_win32/dbkill.cpp skip
-build_win32/dllmain.c dynamic
-build_win32/libdb.def dynamic
-build_win32/libdb.rc dynamic
-build_win32/libdb_tcl.def tcl
-clib/getcwd.c skip
-clib/getopt.c skip # must be linked into each app
-clib/memcmp.c skip
-clib/memmove.c skip
-clib/raise.c skip
-clib/snprintf.c skip
-clib/strcasecmp.c dynamic static
-clib/strerror.c skip
-clib/vsnprintf.c skip
-common/db_byteorder.c dynamic static
-common/db_err.c dynamic static
-common/db_getlong.c dynamic static
-common/db_log2.c dynamic static
-common/util_log.c dynamic static
-common/util_sig.c dynamic static
-cxx/cxx_app.cpp dynamic static
-cxx/cxx_except.cpp dynamic static
-cxx/cxx_lock.cpp dynamic static
-cxx/cxx_log.cpp dynamic static
-cxx/cxx_mpool.cpp dynamic static
-cxx/cxx_table.cpp dynamic static
-cxx/cxx_txn.cpp dynamic static
-db/crdel_auto.c dynamic static
-db/crdel_rec.c dynamic static
-db/db.c dynamic static
-db/db_am.c dynamic static
-db/db_auto.c dynamic static
-db/db_cam.c dynamic static
-db/db_conv.c dynamic static
-db/db_dispatch.c dynamic static
-db/db_dup.c dynamic static
-db/db_iface.c dynamic static
-db/db_join.c dynamic static
-db/db_meta.c dynamic static
-db/db_method.c dynamic static
-db/db_overflow.c dynamic static
-db/db_pr.c dynamic static
-db/db_rec.c dynamic static
-db/db_reclaim.c dynamic static
-db/db_ret.c dynamic static
-db/db_upg.c dynamic static
-db/db_upg_opd.c dynamic static
-db/db_vrfy.c dynamic static
-db/db_vrfyutil.c dynamic static
-db185/db185.c skip
-db_archive/db_archive.c app=db_archive
-db_checkpoint/db_checkpoint.c app=db_checkpoint
-db_deadlock/db_deadlock.c app=db_deadlock
-db_dump/db_dump.c app=db_dump
-db_dump185/db_dump185.c skip
-db_load/db_load.c app=db_load
-db_printlog/db_printlog.c app=db_printlog
-db_recover/db_recover.c app=db_recover
-db_stat/db_stat.c app=db_stat
-db_upgrade/db_upgrade.c app=db_upgrade
-db_verify/db_verify.c app=db_verify
-dbm/dbm.c dynamic static
-env/db_salloc.c dynamic static
-env/db_shash.c dynamic static
-env/env_method.c dynamic static
-env/env_open.c dynamic static
-env/env_recover.c dynamic static
-env/env_region.c dynamic static
-examples_c/ex_access.c app=ex_access
-examples_c/ex_btrec.c app=ex_btrec
-examples_c/ex_dbclient.c skip
-examples_c/ex_env.c app=ex_env
-examples_c/ex_lock.c app=ex_lock
-examples_c/ex_mpool.c app=ex_mpool
-examples_c/ex_thread.c skip
-examples_c/ex_tpcb.c app=ex_tpcb
-examples_cxx/AccessExample.cpp app=excxx_access
-examples_cxx/BtRecExample.cpp app=excxx_btrec
-examples_cxx/EnvExample.cpp app=excxx_env
-examples_cxx/LockExample.cpp app=excxx_lock
-examples_cxx/MpoolExample.cpp app=excxx_mpool
-examples_cxx/TpcbExample.cpp app=excxx_tpcb
-hash/hash.c dynamic static
-hash/hash_auto.c dynamic static
-hash/hash_conv.c dynamic static
-hash/hash_dup.c dynamic static
-hash/hash_func.c dynamic static
-hash/hash_meta.c dynamic static
-hash/hash_method.c dynamic static
-hash/hash_page.c dynamic static
-hash/hash_rec.c dynamic static
-hash/hash_reclaim.c dynamic static
-hash/hash_stat.c dynamic static
-hash/hash_upgrade.c dynamic static
-hash/hash_verify.c dynamic static
-hsearch/hsearch.c dynamic static
-libdb_java/java_Db.c java
-libdb_java/java_DbEnv.c java
-libdb_java/java_DbLock.c java
-libdb_java/java_DbLsn.c java
-libdb_java/java_DbTxn.c java
-libdb_java/java_Dbc.c java
-libdb_java/java_Dbt.c java
-libdb_java/java_info.c java
-libdb_java/java_locked.c java
-libdb_java/java_util.c java
-lock/lock.c dynamic static
-lock/lock_conflict.c dynamic static
-lock/lock_deadlock.c dynamic static
-lock/lock_method.c dynamic static
-lock/lock_region.c dynamic static
-lock/lock_stat.c dynamic static
-lock/lock_util.c dynamic static
-log/log.c dynamic static
-log/log_archive.c dynamic static
-log/log_auto.c dynamic static
-log/log_compare.c dynamic static
-log/log_findckp.c dynamic static
-log/log_get.c dynamic static
-log/log_method.c dynamic static
-log/log_put.c dynamic static
-log/log_rec.c dynamic static
-log/log_register.c dynamic static
-mp/mp_alloc.c dynamic static
-mp/mp_bh.c dynamic static
-mp/mp_fget.c dynamic static
-mp/mp_fopen.c dynamic static
-mp/mp_fput.c dynamic static
-mp/mp_fset.c dynamic static
-mp/mp_method.c dynamic static
-mp/mp_region.c dynamic static
-mp/mp_register.c dynamic static
-mp/mp_stat.c dynamic static
-mp/mp_sync.c dynamic static
-mp/mp_trickle.c dynamic static
-mutex/mut_fcntl.c skip
-mutex/mut_pthread.c skip
-mutex/mut_tas.c dynamic static
-mutex/mutex.c dynamic static
-os/os_abs.c skip
-os/os_alloc.c dynamic static
-os/os_dir.c skip
-os/os_errno.c skip
-os/os_fid.c skip
-os/os_finit.c skip
-os/os_fsync.c dynamic static
-os/os_handle.c dynamic static
-os/os_map.c skip
-os/os_method.c dynamic static
-os/os_oflags.c dynamic static
-os/os_open.c skip
-os/os_region.c dynamic static
-os/os_rename.c skip
-os/os_root.c dynamic static
-os/os_rpath.c dynamic static
-os/os_rw.c dynamic static
-os/os_seek.c skip
-os/os_sleep.c skip
-os/os_spin.c skip
-os/os_stat.c dynamic static
-os/os_tmpdir.c dynamic static
-os/os_unlink.c dynamic static
-os_vxworks/os_abs.c skip
-os_vxworks/os_finit.c skip
-os_vxworks/os_map.c skip
-os_win32/os_abs.c dynamic static
-os_win32/os_dir.c dynamic static
-os_win32/os_errno.c dynamic static
-os_win32/os_fid.c dynamic static
-os_win32/os_finit.c dynamic static
-os_win32/os_map.c dynamic static
-os_win32/os_open.c dynamic static
-os_win32/os_rename.c dynamic static
-os_win32/os_seek.c dynamic static
-os_win32/os_sleep.c dynamic static
-os_win32/os_spin.c dynamic static
-os_win32/os_type.c dynamic static
-qam/qam.c dynamic static
-qam/qam_auto.c dynamic static
-qam/qam_conv.c dynamic static
-qam/qam_files.c dynamic static
-qam/qam_method.c dynamic static
-qam/qam_open.c dynamic static
-qam/qam_rec.c dynamic static
-qam/qam_stat.c dynamic static
-qam/qam_upgrade.c dynamic static
-qam/qam_verify.c dynamic static
-rpc_client/client.c skip
-rpc_client/db_server_clnt.c skip
-rpc_client/gen_client.c skip
-rpc_client/gen_client_ret.c skip
-rpc_server/db_server_proc.c skip
-rpc_server/db_server_svc.c skip
-rpc_server/db_server_util.c skip
-rpc_server/db_server_xdr.c skip
-rpc_server/gen_db_server.c skip
-tcl/tcl_compat.c tcl
-tcl/tcl_db.c tcl
-tcl/tcl_db_pkg.c tcl
-tcl/tcl_dbcursor.c tcl
-tcl/tcl_env.c tcl
-tcl/tcl_internal.c tcl
-tcl/tcl_lock.c tcl
-tcl/tcl_log.c tcl
-tcl/tcl_mp.c tcl
-tcl/tcl_txn.c tcl
-test_server/dbs.c skip
-test_server/dbs_am.c skip
-test_server/dbs_checkpoint.c skip
-test_server/dbs_debug.c skip
-test_server/dbs_handles.c skip
-test_server/dbs_log.c skip
-test_server/dbs_qam.c skip
-test_server/dbs_spawn.c skip
-test_server/dbs_trickle.c skip
-test_server/dbs_util.c skip
-test_server/dbs_yield.c skip
-test_thread/lock.c skip
-test_thread/log.c skip
-test_thread/mpool.c skip
-test_thread/mutex.c skip
-test_vxworks/vx_mutex.c skip
-test_vxworks/vxtpcb_files.c skip
-test_vxworks/vxtpcb_onefile.c skip
-txn/txn.c dynamic static
-txn/txn_auto.c dynamic static
-txn/txn_rec.c dynamic static
-txn/txn_region.c dynamic static
-xa/xa.c dynamic static
-xa/xa_db.c dynamic static
-xa/xa_map.c dynamic static
+btree/bt_compare.c dynamic static vx
+btree/bt_conv.c dynamic static vx
+btree/bt_curadj.c dynamic static vx
+btree/bt_cursor.c dynamic static vx
+btree/bt_delete.c dynamic static vx
+btree/bt_method.c dynamic static vx
+btree/bt_open.c dynamic static vx
+btree/bt_put.c dynamic static vx
+btree/bt_rec.c dynamic static vx
+btree/bt_reclaim.c dynamic static vx
+btree/bt_recno.c dynamic static vx
+btree/bt_rsearch.c dynamic static vx
+btree/bt_search.c dynamic static vx
+btree/bt_split.c dynamic static vx
+btree/bt_stat.c dynamic static vx
+btree/bt_upgrade.c dynamic static vx
+btree/bt_verify.c dynamic static vx
+btree/btree_auto.c dynamic static vx
+build_vxworks/db_archive/db_archive.c
+build_vxworks/db_checkpoint/db_checkpoint.c
+build_vxworks/db_deadlock/db_deadlock.c
+build_vxworks/db_dump/db_dump.c
+build_vxworks/db_load/db_load.c
+build_vxworks/db_printlog/db_printlog.c
+build_vxworks/db_recover/db_recover.c
+build_vxworks/db_stat/db_stat.c
+build_vxworks/db_upgrade/db_upgrade.c
+build_vxworks/db_verify/db_verify.c
+build_vxworks/dbdemo/dbdemo.c
+build_win32/dbkill.cpp testutil
+build_win32/dllmain.c dynamic
+build_win32/libdb.def dynamic
+build_win32/libdb.rc dynamic
+build_win32/libdb_tcl.def tcl
+clib/getcwd.c
+clib/getopt.c vx
+clib/memcmp.c
+clib/memmove.c
+clib/raise.c
+clib/snprintf.c vx
+clib/strcasecmp.c dynamic static vx
+clib/strdup.c vx
+clib/strerror.c
+clib/vsnprintf.c vx
+common/db_byteorder.c dynamic static vx
+common/db_err.c dynamic static vx
+common/db_getlong.c dynamic static vx
+common/db_idspace.c dynamic static vx
+common/db_log2.c dynamic static vx
+common/util_arg.c vx
+common/util_cache.c dynamic static vx
+common/util_log.c dynamic static vx
+common/util_sig.c dynamic static vx
+cxx/cxx_db.cpp dynamic static
+cxx/cxx_dbc.cpp dynamic static
+cxx/cxx_dbt.cpp dynamic static
+cxx/cxx_env.cpp dynamic static
+cxx/cxx_except.cpp dynamic static
+cxx/cxx_lock.cpp dynamic static
+cxx/cxx_logc.cpp dynamic static
+cxx/cxx_mpool.cpp dynamic static
+cxx/cxx_txn.cpp dynamic static
+db/crdel_auto.c dynamic static vx
+db/crdel_rec.c dynamic static vx
+db/db.c dynamic static vx
+db/db_am.c dynamic static vx
+db/db_auto.c dynamic static vx
+db/db_cam.c dynamic static vx
+db/db_conv.c dynamic static vx
+db/db_dispatch.c dynamic static vx
+db/db_dup.c dynamic static vx
+db/db_iface.c dynamic static vx
+db/db_join.c dynamic static vx
+db/db_meta.c dynamic static vx
+db/db_method.c dynamic static vx
+db/db_open.c dynamic static vx
+db/db_overflow.c dynamic static vx
+db/db_pr.c dynamic static vx
+db/db_rec.c dynamic static vx
+db/db_reclaim.c dynamic static vx
+db/db_remove.c dynamic static vx
+db/db_rename.c dynamic static vx
+db/db_ret.c dynamic static vx
+db/db_truncate.c dynamic static vx
+db/db_upg.c dynamic static vx
+db/db_upg_opd.c dynamic static vx
+db/db_vrfy.c dynamic static vx
+db/db_vrfyutil.c dynamic static vx
+db185/db185.c
+db_archive/db_archive.c app=db_archive
+db_checkpoint/db_checkpoint.c app=db_checkpoint
+db_deadlock/db_deadlock.c app=db_deadlock
+db_dump/db_dump.c app=db_dump
+db_dump185/db_dump185.c
+db_load/db_load.c app=db_load
+db_printlog/db_printlog.c app=db_printlog
+db_recover/db_recover.c app=db_recover
+db_stat/db_stat.c app=db_stat
+db_upgrade/db_upgrade.c app=db_upgrade
+db_verify/db_verify.c app=db_verify
+dbm/dbm.c dynamic static
+dbreg/dbreg.c dynamic static vx
+dbreg/dbreg_auto.c dynamic static vx
+dbreg/dbreg_rec.c dynamic static vx
+dbreg/dbreg_util.c dynamic static vx
+env/db_salloc.c dynamic static vx
+env/db_shash.c dynamic static vx
+env/env_file.c dynamic static vx
+env/env_method.c dynamic static vx
+env/env_open.c dynamic static vx
+env/env_recover.c dynamic static vx
+env/env_region.c dynamic static vx
+examples_c/bench_001.c
+examples_c/ex_access.c app=ex_access
+examples_c/ex_apprec/ex_apprec.c
+examples_c/ex_apprec/ex_apprec_auto.c
+examples_c/ex_apprec/ex_apprec_rec.c
+examples_c/ex_btrec.c app=ex_btrec
+examples_c/ex_dbclient.c
+examples_c/ex_env.c app=ex_env
+examples_c/ex_lock.c app=ex_lock
+examples_c/ex_mpool.c app=ex_mpool
+examples_c/ex_repquote/ex_rq_client.c
+examples_c/ex_repquote/ex_rq_main.c
+examples_c/ex_repquote/ex_rq_master.c
+examples_c/ex_repquote/ex_rq_net.c
+examples_c/ex_repquote/ex_rq_util.c
+examples_c/ex_thread.c
+examples_c/ex_tpcb.c app=ex_tpcb
+examples_cxx/AccessExample.cpp app=excxx_access
+examples_cxx/BtRecExample.cpp app=excxx_btrec
+examples_cxx/EnvExample.cpp app=excxx_env
+examples_cxx/LockExample.cpp app=excxx_lock
+examples_cxx/MpoolExample.cpp app=excxx_mpool
+examples_cxx/TpcbExample.cpp app=excxx_tpcb
+fileops/fileops_auto.c dynamic static vx
+fileops/fop_basic.c dynamic static vx
+fileops/fop_rec.c dynamic static vx
+fileops/fop_util.c dynamic static vx
+hash/hash.c dynamic static vx
+hash/hash_auto.c dynamic static vx
+hash/hash_conv.c dynamic static vx
+hash/hash_dup.c dynamic static vx
+hash/hash_func.c dynamic static vx
+hash/hash_meta.c dynamic static vx
+hash/hash_method.c dynamic static vx
+hash/hash_open.c dynamic static vx
+hash/hash_page.c dynamic static vx
+hash/hash_rec.c dynamic static vx
+hash/hash_reclaim.c dynamic static vx
+hash/hash_stat.c dynamic static vx
+hash/hash_upgrade.c dynamic static vx
+hash/hash_verify.c dynamic static vx
+hmac/hmac.c dynamic static vx
+hmac/sha1.c dynamic static vx
+hsearch/hsearch.c dynamic static vx
+libdb_java/java_Db.c java
+libdb_java/java_DbEnv.c java
+libdb_java/java_DbLock.c java
+libdb_java/java_DbLogc.c java
+libdb_java/java_DbLsn.c java
+libdb_java/java_DbTxn.c java
+libdb_java/java_DbUtil.c java
+libdb_java/java_Dbc.c java
+libdb_java/java_Dbt.c java
+libdb_java/java_info.c java
+libdb_java/java_locked.c java
+libdb_java/java_stat_auto.c java
+libdb_java/java_util.c java
+lock/lock.c dynamic static vx
+lock/lock_deadlock.c dynamic static vx
+lock/lock_method.c dynamic static vx
+lock/lock_region.c dynamic static vx
+lock/lock_stat.c dynamic static vx
+lock/lock_util.c dynamic static vx
+log/log.c dynamic static vx
+log/log_archive.c dynamic static vx
+log/log_compare.c dynamic static vx
+log/log_get.c dynamic static vx
+log/log_method.c dynamic static vx
+log/log_put.c dynamic static vx
+mp/mp_alloc.c dynamic static vx
+mp/mp_bh.c dynamic static vx
+mp/mp_fget.c dynamic static vx
+mp/mp_fopen.c dynamic static vx
+mp/mp_fput.c dynamic static vx
+mp/mp_fset.c dynamic static vx
+mp/mp_method.c dynamic static vx
+mp/mp_region.c dynamic static vx
+mp/mp_register.c dynamic static vx
+mp/mp_stat.c dynamic static vx
+mp/mp_sync.c dynamic static vx
+mp/mp_trickle.c dynamic static vx
+mutex/mut_fcntl.c
+mutex/mut_pthread.c
+mutex/mut_tas.c vx
+mutex/mut_win32.c dynamic static
+mutex/mutex.c dynamic static vx
+mutex/tm.c
+os/os_abs.c
+os/os_alloc.c dynamic static vx
+os/os_clock.c vx
+os/os_config.c
+os/os_dir.c vx
+os/os_errno.c vx
+os/os_fid.c vx
+os/os_fsync.c vx
+os/os_handle.c vx
+os/os_id.c dynamic static vx
+os/os_map.c
+os/os_method.c dynamic static vx
+os/os_oflags.c dynamic static vx
+os/os_open.c vx
+os/os_region.c dynamic static vx
+os/os_rename.c vx
+os/os_root.c dynamic static vx
+os/os_rpath.c dynamic static vx
+os/os_rw.c vx
+os/os_seek.c vx
+os/os_sleep.c vx
+os/os_spin.c vx
+os/os_stat.c vx
+os/os_tmpdir.c dynamic static vx
+os/os_unlink.c dynamic static vx
+os_vxworks/os_vx_abs.c vx
+os_vxworks/os_vx_config.c vx
+os_vxworks/os_vx_map.c vx
+os_win32/os_abs.c dynamic static
+os_win32/os_clock.c dynamic static
+os_win32/os_config.c dynamic static
+os_win32/os_dir.c dynamic static
+os_win32/os_errno.c dynamic static
+os_win32/os_fid.c dynamic static
+os_win32/os_fsync.c dynamic static
+os_win32/os_handle.c dynamic static
+os_win32/os_map.c dynamic static
+os_win32/os_open.c dynamic static
+os_win32/os_rename.c dynamic static
+os_win32/os_rw.c dynamic static
+os_win32/os_seek.c dynamic static
+os_win32/os_sleep.c dynamic static
+os_win32/os_spin.c dynamic static
+os_win32/os_stat.c dynamic static
+os_win32/os_type.c dynamic static
+qam/qam.c dynamic static vx
+qam/qam_auto.c dynamic static vx
+qam/qam_conv.c dynamic static vx
+qam/qam_files.c dynamic static vx
+qam/qam_method.c dynamic static vx
+qam/qam_open.c dynamic static vx
+qam/qam_rec.c dynamic static vx
+qam/qam_stat.c dynamic static vx
+qam/qam_upgrade.c dynamic static vx
+qam/qam_verify.c dynamic static vx
+rep/rep_method.c dynamic static vx
+rep/rep_record.c dynamic static vx
+rep/rep_region.c dynamic static vx
+rep/rep_util.c dynamic static vx
+rpc_client/client.c vx
+rpc_client/db_server_clnt.c vx
+rpc_client/gen_client.c vx
+rpc_client/gen_client_ret.c vx
+rpc_server/c/db_server_proc.c
+rpc_server/c/db_server_svc.c
+rpc_server/c/db_server_util.c
+rpc_server/c/db_server_xdr.c vx
+rpc_server/c/gen_db_server.c
+rpc_server/cxx/db_server_cxxproc.cpp
+rpc_server/cxx/db_server_cxxutil.cpp
+tcl/tcl_compat.c tcl
+tcl/tcl_db.c tcl
+tcl/tcl_db_pkg.c tcl
+tcl/tcl_dbcursor.c tcl
+tcl/tcl_env.c tcl
+tcl/tcl_internal.c tcl
+tcl/tcl_lock.c tcl
+tcl/tcl_log.c tcl
+tcl/tcl_mp.c tcl
+tcl/tcl_rep.c tcl
+tcl/tcl_txn.c tcl
+tcl/tcl_util.c tcl
+test_perf/db_perf.c app=db_perf
+test_perf/perf_cache_check.c app=db_perf
+test_perf/perf_checkpoint.c app=db_perf
+test_perf/perf_config.c app=db_perf
+test_perf/perf_dbs.c app=db_perf
+test_perf/perf_debug.c app=db_perf
+test_perf/perf_file.c app=db_perf
+test_perf/perf_key.c app=db_perf
+test_perf/perf_log.c app=db_perf
+test_perf/perf_misc.c app=db_perf
+test_perf/perf_op.c app=db_perf
+test_perf/perf_parse.c app=db_perf
+test_perf/perf_rand.c app=db_perf
+test_perf/perf_spawn.c app=db_perf
+test_perf/perf_thread.c app=db_perf
+test_perf/perf_trickle.c app=db_perf
+test_perf/perf_txn.c app=db_perf
+test_perf/perf_util.c app=db_perf
+test_perf/perf_vx.c
+txn/txn.c dynamic static vx
+txn/txn_auto.c dynamic static vx
+txn/txn_method.c dynamic static vx
+txn/txn_rec.c dynamic static vx
+txn/txn_recover.c dynamic static vx
+txn/txn_region.c dynamic static vx
+txn/txn_stat.c dynamic static vx
+txn/txn_util.c dynamic static vx
+xa/xa.c dynamic static vx
+xa/xa_db.c dynamic static vx
+xa/xa_map.c dynamic static vx
diff --git a/bdb/dist/template/.IGNORE_ME b/bdb/dist/template/.IGNORE_ME
deleted file mode 100644
index 558fd496f0c..00000000000
--- a/bdb/dist/template/.IGNORE_ME
+++ /dev/null
@@ -1,3 +0,0 @@
-Some combinations of the gzip and tar archive exploders found
-on Linux systems ignore directories that don't have any files
-(other than symbolic links) in them. So, here's a file.
diff --git a/bdb/dist/rec_ctemp b/bdb/dist/template/rec_ctemp
index 6be6d3166b8..2951189c5bd 100644
--- a/bdb/dist/rec_ctemp
+++ b/bdb/dist/template/rec_ctemp
@@ -1,31 +1,31 @@
/*
- * __PREF_FUNC_recover --
+ * PREF_FUNC_recover --
* Recovery function for FUNC.
*
- * PUBLIC: int __PREF_FUNC_recover
+ * PUBLIC: int PREF_FUNC_recover
* PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
*/
int
-__PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
+PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
DB_ENV *dbenv;
DBT *dbtp;
DB_LSN *lsnp;
db_recops op;
void *info;
{
- __PREF_FUNC_args *argp;
+ PREF_FUNC_args *argp;
DB *file_dbp;
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
int cmp_n, cmp_p, modified, ret;
- REC_PRINT(__PREF_FUNC_print);
- REC_INTRO(__PREF_FUNC_read);
+ REC_PRINT(PREF_FUNC_print);
+ REC_INTRO(PREF_FUNC_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
if (DB_REDO(op)) {
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
} else {
@@ -51,7 +51,7 @@ __PREF_FUNC_recover(dbenv, dbtp, lsnp, op, info)
/* Need to undo update described. */
modified = 1;
}
- if (ret = memp_fput(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
goto out;
*lsnp = argp->prev_lsn;
diff --git a/bdb/dist/template/rec_dbreg b/bdb/dist/template/rec_dbreg
new file mode 100644
index 00000000000..bbdf19d5ffc
--- /dev/null
+++ b/bdb/dist/template/rec_dbreg
@@ -0,0 +1,75 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__dbreg.h"
+#include "dbinc/log.h"
+
+/*
+ * __dbreg_register_recover --
+ * Recovery function for register.
+ *
+ * PUBLIC: int __dbreg_register_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__dbreg_register_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __dbreg_register_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__dbreg_register_print);
+ REC_INTRO(__dbreg_register_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/template/rec_fileops b/bdb/dist/template/rec_fileops
new file mode 100644
index 00000000000..c1487835ea9
--- /dev/null
+++ b/bdb/dist/template/rec_fileops
@@ -0,0 +1,323 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/__fop.h"
+#include "dbinc/log.h"
+
+/*
+ * __fop_create_recover --
+ * Recovery function for create.
+ *
+ * PUBLIC: int __fop_create_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_create_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_create_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_create_print);
+ REC_INTRO(__fop_create_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_remove_recover --
+ * Recovery function for remove.
+ *
+ * PUBLIC: int __fop_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_remove_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_remove_print);
+ REC_INTRO(__fop_remove_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_write_recover --
+ * Recovery function for write.
+ *
+ * PUBLIC: int __fop_write_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_write_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_write_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_write_print);
+ REC_INTRO(__fop_write_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __fop_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_rename_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_rename_print);
+ REC_INTRO(__fop_rename_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
+/*
+ * __fop_file_remove_recover --
+ * Recovery function for file_remove.
+ *
+ * PUBLIC: int __fop_file_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_file_remove_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(__fop_file_remove_print);
+ REC_INTRO(__fop_file_remove_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/dist/vx_2.0/BerkeleyDB.wpj b/bdb/dist/vx_2.0/BerkeleyDB.wpj
new file mode 100644
index 00000000000..78684d90067
--- /dev/null
+++ b/bdb/dist/vx_2.0/BerkeleyDB.wpj
@@ -0,0 +1,251 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUM_debug_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O0 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/.. \
+ -DDIAGNOSTIC \
+ -DDEBUG
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_debug_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_BUILDRULE
+BerkeleyDB.out
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/BerkeleyDB_sim.a
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS
+-mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -O2 \
+ -I$(PRJ_DIR) \
+ -I$(PRJ_DIR)/..
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUM_release_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB.out
+
+<END>
+
+<BEGIN> BUILD_RULE_BerkeleyDB_sim.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM_debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM_release PENTIUM_debug
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
diff --git a/bdb/dist/vx_2.0/wpj.in b/bdb/dist/vx_2.0/wpj.in
new file mode 100644
index 00000000000..2b942bb562c
--- /dev/null
+++ b/bdb/dist/vx_2.0/wpj.in
@@ -0,0 +1,160 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+__DB_APPLICATION_NAME__.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/__DB_APPLICATION_NAME__.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE___DB_APPLICATION_NAME__.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependencies
+
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/__DB_APPLICATION_NAME__.c
+<END>
+
+<BEGIN> userComments
+__DB_APPLICATION_NAME__
+<END>
diff --git a/bdb/dist/vx_3.1/Makefile.custom b/bdb/dist/vx_3.1/Makefile.custom
new file mode 100644
index 00000000000..ca781f7b251
--- /dev/null
+++ b/bdb/dist/vx_3.1/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/bdb/dist/vx_3.1/cdf.1 b/bdb/dist/vx_3.1/cdf.1
new file mode 100644
index 00000000000..17db06f7e61
--- /dev/null
+++ b/bdb/dist/vx_3.1/cdf.1
@@ -0,0 +1,12 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
diff --git a/bdb/dist/vx_3.1/cdf.2 b/bdb/dist/vx_3.1/cdf.2
new file mode 100644
index 00000000000..76f123af9fb
--- /dev/null
+++ b/bdb/dist/vx_3.1/cdf.2
@@ -0,0 +1,9 @@
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
diff --git a/bdb/dist/vx_3.1/cdf.3 b/bdb/dist/vx_3.1/cdf.3
new file mode 100644
index 00000000000..a3146ced95a
--- /dev/null
+++ b/bdb/dist/vx_3.1/cdf.3
@@ -0,0 +1,2 @@
+/* Parameter information */
+
diff --git a/bdb/dist/vx_3.1/component.cdf b/bdb/dist/vx_3.1/component.cdf
new file mode 100644
index 00000000000..91edaa87853
--- /dev/null
+++ b/bdb/dist/vx_3.1/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE___DB_CAPAPPL_NAME__ {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES __DB_APPLICATION_NAME__.o
+ NAME __DB_APPLICATION_NAME__
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module __DB_APPLICATION_NAME__.o {
+
+ NAME __DB_APPLICATION_NAME__.o
+ SRC_PATH_NAME $PRJ_DIR/../__DB_APPLICATION_NAME__.c
+}
+
+/* Parameter information */
+
diff --git a/bdb/dist/vx_3.1/component.wpj b/bdb/dist/vx_3.1/component.wpj
new file mode 100644
index 00000000000..01c51c1b97f
--- /dev/null
+++ b/bdb/dist/vx_3.1/component.wpj
@@ -0,0 +1,475 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/dist/vx_3.1/wpj.1 b/bdb/dist/vx_3.1/wpj.1
new file mode 100644
index 00000000000..414b4e8fa35
--- /dev/null
+++ b/bdb/dist/vx_3.1/wpj.1
@@ -0,0 +1,22 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
diff --git a/bdb/dist/vx_3.1/wpj.2 b/bdb/dist/vx_3.1/wpj.2
new file mode 100644
index 00000000000..0294f763ef7
--- /dev/null
+++ b/bdb/dist/vx_3.1/wpj.2
@@ -0,0 +1,130 @@
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
diff --git a/bdb/dist/vx_3.1/wpj.3 b/bdb/dist/vx_3.1/wpj.3
new file mode 100644
index 00000000000..f06e6253923
--- /dev/null
+++ b/bdb/dist/vx_3.1/wpj.3
@@ -0,0 +1,128 @@
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../..
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
diff --git a/bdb/dist/vx_3.1/wpj.4 b/bdb/dist/vx_3.1/wpj.4
new file mode 100644
index 00000000000..84de6ebf359
--- /dev/null
+++ b/bdb/dist/vx_3.1/wpj.4
@@ -0,0 +1,135 @@
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../.. \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
diff --git a/bdb/dist/vx_3.1/wpj.5 b/bdb/dist/vx_3.1/wpj.5
new file mode 100644
index 00000000000..f4056e7e22a
--- /dev/null
+++ b/bdb/dist/vx_3.1/wpj.5
@@ -0,0 +1,22 @@
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/bdb/dist/vx_buildcd b/bdb/dist/vx_buildcd
new file mode 100755
index 00000000000..a94d78db974
--- /dev/null
+++ b/bdb/dist/vx_buildcd
@@ -0,0 +1,119 @@
+#!/bin/sh
+# $Id: vx_buildcd,v 1.6 2001/11/05 21:05:58 sue Exp $
+#
+# Build the Setup SDK CD image on the VxWorks host machine.
+
+. ./RELEASE
+
+B=`pwd`
+B=$B/..
+D=$B/dist/vx_setup
+C=$D/db.CD
+Q=/export/home/sue/SetupSDK
+S=$Q/resource/mfg/setup
+W=sun4-solaris2
+
+symdoc=$D/docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+symdb=$D/windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+rm -rf $D/docs $D/windlink
+mkdir $D/docs $D/windlink $D/windlink/sleepycat
+ln -s $B/docs $symdoc
+ln -s $B $symdb
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+
+#
+# Remove the old CD directory if it is there.
+if test -d $C; then
+ echo "$C cannot exist."
+ echo "As root, execute 'rm -rf $C'"
+ echo "and then rerun the script"
+ exit 1
+fi
+
+#
+# Check for absolute pathnames in the project files.
+# That is bad, but Tornado insists on putting them in
+# whenever you add new files.
+#
+rm -f $t
+f=`find $B/build_vxworks -name \*.wpj -print`
+for i in $f; do
+ grep -l -- "$B" $i >> $t
+done
+if test -s $t; then
+ echo "The following files contain absolute pathnames."
+ echo "They must be fixed before building the CD image:"
+ cat $t
+ exit 1
+fi
+
+#
+# NOTE: We reuse the same sed script over several files.
+#
+cat <<ENDOFSEDTEXT > $s
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/g
+s#@DB_SETUP_DIR@#$D#g
+ENDOFSEDTEXT
+
+f=$D/setup.pool
+(sed -f $s $D/vx_setup.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/README.TXT
+(sed -f $s $D/README.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/CONFIG.TCL
+(sed -f $s $D/CONFIG.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/filelist.demo
+(sed -f $s $D/vx_demofile.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+# Copy the Sleepycat specific files into the SetupSDK area.
+(cd $D && cp README.TXT $S)
+(cd $D && cp LICENSE.TXT $S)
+(cd $D && cp CONFIG.TCL $S/RESOURCE/TCL)
+(cd $D && cp SETUP.BMP $S/RESOURCE/BITMAPS)
+
+#
+# NOTE: The contents of LIB must be on one, long, single line.
+# Even preserving it with a \ doesn't work for htmlBook.
+#
+f=../docs/LIB
+(echo "Building $f" && rm -f $f)
+cat <<ENDOFLIBTEXT >> $f
+{BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat Software Berkeley DB} {<b>BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</b>} {<b><a href="./index.html">BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</a></b>} {Sleepycat BerkeleyDB} {} {} {}
+ENDOFLIBTEXT
+
+#
+# Start generating the file list.
+f=$D/filelist.all
+
+#
+# Just put everything into the image. But we only want to find regular
+# files; we cannot have all the directories listed too.
+#
+# NOTE: This find is overly aggressive in getting files, particularly
+# for the 'windlink/sleepycat' files. We actually end up with 3 sets of the
+# documentation, the "real" ones in 'docs/BerkeleyDB*', the set found
+# via 'windlink/sleepycat/Berk*/docs' and the one found via our symlink in
+# 'windlink/sleepycat/Berk*/dist/vx_setup/docs/Berk*'.
+#
+# However, we waste a little disk space so that the expression below
+# is trivial and we don't have to maintain it as new files/directories
+# are added to DB.
+#
+(cd $D && find docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name \* -type f -print) > $t
+(cd $D && find windlink/sleepycat/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name docs -prune -o -type f -print) >> $t
+(echo "Building $f" && rm -f $f && cp $t $f)
+#
+# Finally build the CD image!
+#
+env PATH=$Q/$W/bin:$PATH QMS_BASE=$Q WIND_HOST_TYPE=$W \
+pool mfg -d $C -v -nokey BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR < $D/setup.pool
diff --git a/bdb/dist/vx_config.in b/bdb/dist/vx_config.in
new file mode 100644
index 00000000000..43fc8eb71f3
--- /dev/null
+++ b/bdb/dist/vx_config.in
@@ -0,0 +1,381 @@
+/* !!!
+ * The CONFIG_TEST option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* !!!
+ * The DEBUG option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* !!!
+ * The DIAGNOSTIC option may be added using the Tornado project build.
+ * DO NOT modify it here.
+ */
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+#define HAVE_CLOCK_GETTIME 1
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+#define HAVE_MUTEX_SYSTEM_RESOURCES 1
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+#define HAVE_MUTEX_VXWORKS 1
+
+/* Define to 1 to use Windows mutexes. */
+/* #undef HAVE_MUTEX_WIN32 */
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+#define HAVE_SCHED_YIELD 1
+
+/* Define to 1 if you have the `select' function. */
+#define HAVE_SELECT 1
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+/* #undef HAVE_SNPRINTF */
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+/* #undef HAVE_STRDUP */
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+/* #undef HAVE_SYS_STAT_H */
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #undef HAVE_SYS_TYPES_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+#define HAVE_UNLINK_WITH_OPEN_FAILURE 1
+
+/* Define to 1 if you have the `vsnprintf' function. */
+/* #undef HAVE_VSNPRINTF */
+
+/* Define to 1 if building VxWorks. */
+#define HAVE_VXWORKS 1
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+/* #undef HAVE__FSTATI64 */
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_GETOPT
+#define getopt __db_Cgetopt
+#define optarg __db_Coptarg
+#define opterr __db_Copterr
+#define optind __db_Coptind
+#define optopt __db_Coptopt
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * !!!
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on VxWorks.
+ */
+#include "vxWorks.h"
diff --git a/bdb/dist/vx_setup/CONFIG.in b/bdb/dist/vx_setup/CONFIG.in
new file mode 100644
index 00000000000..6ccceee7034
--- /dev/null
+++ b/bdb/dist/vx_setup/CONFIG.in
@@ -0,0 +1,10 @@
+#
+# Install configuration file.
+#
+# Note: This file may be modified during the pool manufacturing process to
+# add additional configuration statements. This file is sourced by
+# INSTW32.TCL.
+#
+
+cdromDescSet "Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@"
+
diff --git a/bdb/dist/vx_setup/LICENSE.TXT b/bdb/dist/vx_setup/LICENSE.TXT
new file mode 100644
index 00000000000..7814c679cd7
--- /dev/null
+++ b/bdb/dist/vx_setup/LICENSE.TXT
@@ -0,0 +1,3 @@
+Copyright (c) 1996-2002
+ Sleepycat Software. All rights reserved.
+See the file LICENSE for redistribution information.
diff --git a/bdb/dist/vx_setup/MESSAGES.TCL b/bdb/dist/vx_setup/MESSAGES.TCL
new file mode 100644
index 00000000000..718a67fbc50
--- /dev/null
+++ b/bdb/dist/vx_setup/MESSAGES.TCL
@@ -0,0 +1,651 @@
+# MESSAGES.TCL - All setup strings.
+
+# modification history
+# --------------------
+# 03q,20apr99,bjl added release notes message for backward compatibility
+# page.
+# 03p,12apr99,wmd Add word about simulator in message about the drivers
+# object product.
+# 03o,03mar99,tcy Adjust setup directory size based on platform (fix for
+# SPR 25228)
+# 03n,24feb99,tcy modified DLL update messages
+# 03m,22feb99,tcy modified to align messages
+# 03l,17feb99,tcy modified message in the finish page for program group
+# installation
+# 03k,11feb99,tcy added messages for backward compatibility page
+# 03j,25jan99,tcy added messages from INSTW32.TCL
+# 03i,25jan99,wmd Reword the message for 5010_DRIVERS_INFO.
+# 03h,09dec98,bjl added messages about manufacturers updating patches.
+# 03g,01dec98,wmd Fix typos.
+# 03f,23nov98,tcy warn user to disable virus protection on Welcome screen
+# 03e,19nov98,wmd fixed minor nits in wording.
+# 03d,19nov98,bjl added web site locations for patchinfo.
+# 03c,18nov98,bjl added formatted patch messages for patchinfo file.
+# 03b,12nov98,tcy added message for not saving installation key
+# 03a,10nov98,tcy added warning message for space in destination directory
+# removed message for checking temporary disk space
+# 02z,27oct98,bjl added recommended patch messages, modified required msg.
+# 02y,26oct98,tcy added message for checking temporary disk space
+# 02x,22oct98,wmd fix messages for clarity.
+# 02w,21oct98,wmd fix message for drv/obj.
+# 02v,20oct98,tcy added message for updating system and changed dcom message
+# 02u,20oct98,bjl added tornado registry name entry message.
+# 02t,19oct98,bjl added tornado registry description message.
+# 02s,16oct98,wmd add new message for driver product warning.
+# 02r,16oct98,wmd fixed README.TXT description.
+# 02q,12oct98,tcy removed extraneous "the" from messages
+# 02p,06oct98,tcy added CD description to Welcome page
+# 02o,29sep98,bjl added required patches message 5000_PATCHES_TEXT.
+# 02n,29sep98,wmd add text for readme page
+# 02m,29sep98,tcy refined DLL registration page text
+# 02l,29sep98,tcy changed message for DCOM
+# 02k,26sep98,tcy added messages for DLL and DCOM pages
+# 02j,24sep98,tcy removed "following" from 1080_WARN_4 message.
+# 02i,17sep98,tcy added comment on size of SETUP files to 1140_COMP_SELECT.
+# 02h,17sep98,wmd reword message 1080_WARN_4.
+# 02g,14sep98,tcy changed 1210_FINISH and 1550_USAGE messages
+# 02f,08sep98,tcy warn user library update may take several minutes
+# 02e,01sep98,wmd reword message for installing over tree.
+# added new messages for license agreement pages.
+# 02d,20aug98,wmd added message for license agreeement.
+# 02c,18aug98,tcy added message for zip-file dialog box
+# 02d,04aug98,wmd added newer/older duplicate file warnings.
+# 02c,24jul98,tcy added system check messages
+# 02b,16jul98,wmd add new messages for T-2.
+# 02a,22jul98,tcy moved license messages to LICW32.TCL;
+# removed portMapper messages
+# 01n,09feb98,pdn updated string 1080_WARN_4
+# 01m,08apr97,pdn added new string for remote icon installing
+# fixed spr#8334
+# 01l,08mar97,tcy fixed language in string id 3340
+# 01k,07mar97,tcy added string id 3340
+# 01j,10feb97,pdn added more license messages.
+# 01i,09feb97,pdn implemented variable argument list for strTableGet(),
+# clean up.
+# 01h,17jan97,jmo fixed language in strings
+# 01g,12dec96,tcy merged in TEXT-only strings
+# 01f,12dec96,pdn added 1080_WARN_4 string warning that CD-ROM
+# revision is older than expected.
+# 01e,27nov96,sj added string for warning against installing in
+# the root of windows drive.
+# 01d,18nov96,tcy added strings for text-based installation script
+# 01c,14nov96,pdn substituted function for some global variables
+# 01b,14nov96,sj added strings from Windows installation script
+# 01a,11nov96,pdn written
+
+proc strTableGet {strId args} {
+ global strTable
+ global setupVals
+ global current_file
+
+ if [regexp {^format.*$} $strTable($strId) junk] {
+ return [eval $strTable($strId)]
+ } {
+ return $strTable($strId)
+ }
+}
+
+set strTable(1000_WELCOME_CD) \
+ "format %s \"[cdNameGet description]\""
+
+set strTable(1000_WELCOME1) \
+ "format %s \"Welcome to the SETUP program. This program will\
+ install \[cdromDescGet\] on your computer.\""
+
+set strTable(1010_WELCOME2) \
+ "It is strongly recommended that you exit all programs and disable virus\
+ protection before running this SETUP program."
+
+set strTable(1020_WELCOME3) \
+ "At any time, you can quit the SETUP program by clicking the <Cancel>\
+ button. You also can go back to previous dialog boxes by clicking the\
+ <Back> button. To accept the current settings for a dialog box and go on\
+ with the installation process, click the <Next> button."
+
+set strTable(3020_WELCOME3) \
+ "format %s \"At any prompt, you can cancel installation \[cdromDescGet\]\
+ by typing \'exit\'. You can also go to the previous question\
+ by typing \'-\'. To accept current settings and go on with\
+ the installation process, press <Return>.\""
+
+set strTable(1030_WELCOME4) \
+ "WARNING: This program is protected by copyright law and international\
+ treaties."
+
+set strTable(1040_WELCOME5) \
+ "Unauthorized reproduction or distribution of this program, or any portion\
+ of it, may result in severe civil and criminal penalties, and will be\
+ prosecuted to the maximum extent possible under law."
+
+set strTable(1050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\] is not\
+ recommended. We suggest that you logoff and logon as a normal\
+ user before running this program.\
+ \n\nClick Next to continue with SETUP anyway.\""
+
+set strTable(3050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\]\
+ is not recommended. We suggest that you logoff and \
+ logon as a normal user before running this program.\
+ \n\nPress <Return> to continue with SETUP anyway.\""
+
+set strTable(1051_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] without System Administrator\
+ privileges is not recommended. Under your present privileges,\
+ SETUP will not offer certain installation options, such as \
+ the installation of some services, etc. Also, the software\
+ will be installed as a personal copy and will not be visible\
+ to other users on this machine.\
+ \n\nTo install \[cdromDescGet\] with access to all its\
+ installation features and options, we suggest that you exit\
+ the installation now and rerun it later with System\
+ Administrator\'s privileges.\n\nClick <Next> to continue with\
+ SETUP anyway.\""
+
+set strTable(1060_REGISTRATION) \
+ "Below, type your name, the name of your company."
+
+set strTable(1070_WARN_1) \
+ "The installation key you entered is invalid. Please enter a valid\
+ installation key."
+
+set strTable(1071_WARN_1) \
+ "Please enter the requested information."
+
+set strTable(1080_WARN_2) \
+ "You entered a key that was not created for this CD-ROM. Please verify\
+ that you are using the appropriate key. If this problem persists, contact\
+ Wind River Systems Sales department for help."
+
+set strTable(1080_WARN_3) \
+ "The installation key you entered is meant for other vendor's CD-ROM.\
+ Please contact the vendor who issued the CD-ROM for a proper key."
+
+set strTable(1085_WARN_4) \
+ "This CD-ROM does not require an installation key. Click the \"Next\"\
+ button to continue the installation."
+
+set strTable(1090_WARN_3) \
+ "format %s \"Can\'t initiate SETUP: \[lindex \$args 0\]. Please correct\
+ the problem then run SETUP again.\""
+
+set strTable(1095_WARN_NO_TCPIP) \
+ "SETUP has detected that your system does not have TCP-IP installed.\
+ To correct the problem, please contact your administrator and then\
+ run SETUP again.\nAborting setup."
+
+set strTable(1097_WARN_NO_LONGFILENAME_SUP) \
+ "SETUP has detected that your system does not have long filename\
+ support. To correct the problem, please contact your administrator\
+ and then run SETUP again.\nAborting setup."
+
+set strTable(1105_FULL_INSTALL) \
+ "Installs the Tornado products, tools, compilers, and other optional\
+ components that you may have purchased."
+
+set strTable(1107_PROGRAM_GROUP) \
+"Installs only the Tornado program group and tools icons for access to\
+ Tornado tools installed on a remote server."
+
+set strTable(1100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP to\
+ install \[cdromDescGet\].\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(1100_REMOTE_DIR) \
+ "format %s \"Please type the name of the directory where Tornado has\
+ already been installed.\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(3100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP\
+ to install \[cdromDescGet\].\""
+
+set strTable(1110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist.\
+ \nDo you want to create it now?"
+
+set strTable(3110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist."
+
+set strTable(3115_DEST_DIR_QUESTION) \
+ "Do you want to create it now? \[y\]"
+
+set strTable(1111_DEST_DIR_WARN) \
+ "format %s \"Installing \[cdromDescGet\] in the root directory is not\
+ recommended.\nClick <Yes> to select another directory.\""
+
+set strTable(1120_DEST_DIR_WARN2) \
+ "format %s \"Creating \[destDirGet\] failed: file exists.\""
+
+set strTable(1121_DEST_DIR_WARN2) \
+ "format %s \"Installing in \[destDirGet\] is not recommended.\
+ \nDo you want to change the installation directory?\""
+
+set strTable(1122_DEST_DIR_WARN2) \
+ "format %s \"Unable to create \[destDirGet\].\""
+
+set strTable(1130_DEST_DIR_WARN3) \
+ "You do not have permission to write files into the installation directory\
+ you entered.\
+ \n\nPlease choose a writable directory."
+
+set strTable(1135_DEST_DIR_WARN4) \
+ "format %s \"The installation directory you entered contains white\
+ space(s). Please select another directory.\""
+
+set strTable(1137_DUP_PRODUCT_WARN) \
+ "format %s \"Reinstalling products may potentially destroy any\
+ modifications you may have made to previously installed files.\
+ Do you wish to continue with the installation or go back to the\
+ '\[strTableGet 1450_TITLE_OPTION\]' page to reconsider your choices?\""
+
+set strTable(3155_COMP_SELECT_QUESTION) \
+ "Do you want to go back and specify a directory on a bigger partition?\
+ \[y\]"
+
+set strTable(1140_COMP_SELECT) \
+ "format %s \"In the option list below, please check all items you wish\
+ to install. SETUP files will be copied to your selected directory and\
+ take up \[setupSizeGet\] MB of disk space.\n\""
+
+set strTable(3140_COMP_SELECT) \
+ "In the option list below, select the item(s) you want to install."
+
+set strTable(3145_COMP_SELECT_CHANGE) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(3145_COMP_SELECT_CHANGE_INVALID) \
+ "The item number(s) you entered is not valid."
+
+set strTable(1150_COMP_SELECT_WARN) \
+ "There is not enough disk space to install the selected component(s).\
+ \n\nDo you want to go back and specify a directory on a bigger disk or\
+ partition?"
+
+set strTable(3150_COMP_SELECT_WARN) \
+ "There is not enough space to install the selected component(s)."
+
+set strTable(1151_COMP_SELECT_WARN) \
+ "At least one component must be selected to continue installation."
+
+set strTable(1160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested.\
+ \n\nThe selected button(s) below indicate the file permissions which\
+ will be set during the installation process.\
+ \n\nPlease adjust these to suit your site requirements."
+
+set strTable(3160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested."
+
+set strTable(3162_PERMISSION) \
+ "The list below indicates the file permissions which will be set during\
+ the installation process. Please adjust these to suit your site\
+ requirements."
+
+set strTable(3165_PERMISSION_QUESTION) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(1161_FOLDER_SELECT) \
+ "SETUP will add program icons to the Program Folder listed below. You may\
+ type a new folder name, or select one from the existing Folders list."
+
+set strTable(1162_FOLDER_SELECT) \
+ "Please enter a valid folder name."
+
+set strTable(1170_FILE_COPY) \
+ "format %s \"SETUP is copying the selected component(s) to the directory\
+ \[destDirGet\].\""
+
+set strTable(1171_FILE_COPY) \
+ "format %s \"SETUP cannot read \[setupFileNameGet 0\] from the CD-ROM.\
+ Please ensure that the CD-ROM is properly mounted.\""
+
+set strTable(1180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries. We recommend that you let\
+ SETUP finish this step, or the libraries will be in an inconsistent\
+ state. Please be patient as the process may take several minutes. \
+ If you want to quit the SETUP program, click <Cancel> and run\
+ the SETUP program again at a later time."
+
+set strTable(3180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries."
+
+set strTable(1190_REGISTRY_HOST) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host.\
+ \n\nPlease enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1191_REGISTRY_DESC) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host."
+
+set strTable(1192_REGISTRY_NAME) \
+ "Please enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1200_FINISH_WARN) \
+ "format %s \"However, there were \[errorCountGet\] error(s) which occured\
+ during the process. Please review the log file\
+ \[destDirDispGet\]/setup.log for more information.\""
+
+set strTable(1210_FINISH) \
+ "format %s \"SETUP has completed installing the selected product(s).\""
+
+set strTable(1212_FINISH) \
+ "SETUP has completed installing the program folders and icons."
+
+set strTable(1213_FINISH) \
+ "Terminating SETUP program."
+
+set strTable(1360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run\
+ the SETUP program at a later time to complete the\
+ installation.\
+ \n\nTo continue installing the program, click <Resume>. \
+ To quit the SETUP program, click <Exit SETUP>.\""
+
+set strTable(3360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run the\
+ SETUP program at a later time to complete the installation.\
+ \n\nTo continue installing the program, Press <Return>. \
+ To quit the SETUP program, type \'exit\'.\""
+
+set strTable(1370_FILE_ACCESS_ERROR) \
+ "format %s \"SETUP cannot create/update file \[lindex \$args 0\]:\
+ \[lindex \$args 1\]\""
+
+set strTable(1380_DEFLATE_ERROR) \
+ "format %s \"SETUP isn\'t able to deflate \[setupFileNameGet 0\]\
+ \n\nPlease select one of the following options\
+ to continue with the SETUP process.\""
+
+set strTable(1390_MEMORY_LOW) \
+ "The system is running out of memory. To continue, close applications\
+ or increase the system swap space."
+
+set strTable(1400_DISK_FULL) \
+ "No disk space left. To continue, free up some disk space."
+
+set strTable(1550_USAGE) \
+ "Usage: SETUP /I\[con\]\]\t\n\
+ /I : Add standard Tornado icons \n\
+ from a remote installation"
+
+set strTable(1410_TITLE_WELCOME) "Welcome"
+set strTable(1420_TITLE_WARNING) "Warning"
+set strTable(1430_TITLE_REGISTRATION) "User Registration"
+set strTable(1440_TITLE_DESTDIR) "Select Directory"
+set strTable(1450_TITLE_OPTION) "Select Products"
+set strTable(1460_TITLE_PERMISSION) "Permission"
+set strTable(1470_TITLE_FILECOPY) "Copying Files"
+set strTable(1480_TITLE_LIBUPDATE) "Update Libraries"
+set strTable(1490_TITLE_REGISTRY_HOST) "Tornado Registry"
+set strTable(1495_TITLE_BACKWARD_COMPATIBILITY) "Backward Compatibility"
+set strTable(1500_TITLE_FINISH) "Finish"
+set strTable(1560_TITLE_FOLDER) "Select Folder"
+set strTable(1563_TITLE_DLL_REG) "Software Registration"
+set strTable(1567_TITLE_DCOM) "DCOM Installation"
+
+set strTable(1570_OPTION_SELECT) \
+ "Choose one of the options listed below, then click the\
+ <Next> button to continue the installation."
+
+set strTable(1576_OPTION_MANUAL) \
+ "Install Tornado Registry manually"
+
+set strTable(1577_OPTION_STARTUP) \
+ "Install Tornado Registry locally in the Startup Group"
+
+set strTable(1578_OPTION_SERVICE) \
+ "Install Tornado Registry locally as a Service"
+
+set strTable(1579_OPTION_REMOTE) \
+ "Configure to use a remote Tornado Registry"
+
+set strTable(1580_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group or as an\
+ NT Service. For more information, consult your Tornado User\'s Guide."
+
+set strTable(1581_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group. For more\
+ information, consult your Tornado User\'s Guide."
+
+set strTable(3000_RETURN_QUESTION) \
+ "Press <Return> to continue"
+
+set strTable(3055_EXIT_QUESTION) \
+ "Type \'exit\' to quit the program or press <Return> to continue"
+
+set strTable(3370_BACK_CALLBACK) \
+ "Cannot go back further."
+
+set strTable(1080_WARN_4) \
+ "The installation key you entered attempted to unlock one or more \
+ products that may have been removed from our product line. \
+ Please compare the unlocked product list on the\
+ \"[strTableGet 1450_TITLE_OPTION]\" screen with your purchased order\
+ list, and contact us if you discover any differences."
+
+set strTable(4000_BASE_INSTALL_WARN) \
+ "format %s \"Warning! Re-installing Tornado over an existing \
+ tree will overwrite any installed patches. \
+ If you proceed with the installation, please \
+ re-install patches if any.\""
+
+set strTable(4000_BASE_INSTALL_WARN_1) \
+ "Select <Install> to overwrite existing Tornado installation,\
+ or choose <Select Path> to enable you to back up to the \'Select\
+ Directory\' page to enter an alternate path."
+
+set strTable(4010_FILE_EXISTS_OLDER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is older. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_NEWER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is newer. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_WARN_1) \
+ "Overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_2) \
+ "Do not overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_3) \
+ "Overwrite ALL files, do not show this dialog again."
+
+set strTable(4020_ANALYZING_BANNER) \
+ "Analyzing installation files, please wait..."
+
+set strTable(4030_NO_ZIP_FILE) \
+ "format %s \"SETUP cannot find the ZIP files for installing\
+ \[cdromDescGet\] in the default directory.\n\n\
+ Please type the name of the WIND\
+ directory containing the ZIP files.\n\nClick the\
+ <Browse> button to choose the directory interactively.\""
+
+set strTable(4040_LIC_TEXT) \
+ "Attention: By clicking on the \"I accept\" button or by\
+ Installing the software you are consenting to be bound by\
+ the terms of this agreement (this \"Agreement\"). If you do\
+ not agree to all of the terms, click the \"I don't Accept\" button\
+ and do not install this software. A copy of this Agreement can be viewed\
+ in the Setup directory under the destination path that you have\
+ designated after the installation is completed."
+
+set strTable(4050_PROJECT_TEXT) \
+ "Please enter your project name, and the number of licensed\
+ users on the project in the spaces below."
+
+set strTable(4060_LICENSE_TEXT) \
+ "By clicking on the \"I accept\" button \
+ you are consenting to be bound by the terms of this agreement.\
+ If you do not agree to all of the terms, click the \"Cancel\"\
+ button and do not install this software."
+
+set strTable(4070_DLL_TEXT) \
+ "SETUP is registering software on your machine. This will take a few\
+ minutes."
+
+set strTable(4080_DCOM_TEXT) \
+ "Setup has detected that your COM/DCOM DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding.\
+ \n\nWould you like to install \"DCOM95\" now?"
+
+set strTable(4082_DCOM95_AND_COMCTL_TEXT) \
+ "Setup has detected that your COM/DCOM and Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You must reboot your system after DLL files have been\
+ installed. After rebooting, please rerun SETUP to continue with\
+ installation.\
+ \n\n\
+ Note: 401comupd.exe and DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding\
+ \n\nWould you like to install \"401comupd.exe\" and \"DCOM95\" now?"
+
+set strTable(4085_COMCTL_UPDATE_TEXT) \
+ "Setup has detected that your Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The 401comupd.exe installation program updates your system DLLs. You\
+ should save all open documents and close all programs before installing\
+ 401comupd.exe.\
+ \n\nWould you like to install \"401comupd.exe\" now?"
+
+set strTable(4090_README_TEXT) \
+ "Please read the README file contents that are displayed below.\
+ It contains important information that will enable you to install\
+ and successfully run the BerkeleyDB product. For your convenience\
+ this file is copied to your installation directory path."
+
+set strTable(5000_PATCHES_REQUIRED_TEXT) \
+ "SETUP has detected that required operating system patches\
+ have not been installed on this machine. These patches are\
+ necessary for the correct operation of SETUP and Tornado. Please refer\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\
+ you can continue with installation:\n\n"
+
+set strTable(5001_PATCHES_RECOMMENDED_TEXT) \
+ "\n\nSETUP has also detected that recommended operating system patches\
+ have not been installed. It is recommended that these patches are\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5002_PATCHES_RECOMMENDED_TEXT) \
+ "SETUP has detected that some operating system patches have not been\
+ installed on this machine. It is recommended that these\
+ patches are installed before starting Tornado to ensure correct\
+ operation. Please refer to the Tornado Release Notes\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5003_PATCHES_REQUIRED_FORMATTED_TEXT) \
+ "\n SETUP has detected that required operating system patches\n\
+ have not been installed on this machine. These patches are\n\
+ necessary for the correct operation of SETUP and Tornado. Please refer\n\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\n\
+ you can continue with installation:\n\n"
+
+set strTable(5004_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n\n SETUP has also detected that recommended operating system patches\n\
+ have not been installed. It is recommended that these patches are\n\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5005_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n SETUP has detected that some operating system patches have not been\n\
+ installed on this machine. It is recommended that these\n\
+ patches are installed before starting Tornado to ensure correct\n\
+ operation. Please refer to the Tornado Release Notes\n\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5006_PATCHES_SUN_LOCATION) \
+ "\nPatches for Sun machines are available at http://sunsolve.sun.com.\n"
+
+set strTable(5007_PATCHES_HP_LOCATION) \
+ "\nPatches for HP machines are available at:\n\
+ http://us-support.external.hp.com (US, Canada, Asia-Pacific, and\
+ Latin-America)\n\
+ http://europe-support.external.hp.com (Europe)\n"
+
+set strTable(5008_PATCHES_UPDATE) \
+ "\nNote: System vendors very frequently update and replace patches.\
+ If a specific patch is no longer available, please use the\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5009_PATCHES_UPDATE_FORMATTED) \
+ "\n Note: System vendors very frequently update and replace patches.\n\
+ If a specific patch is no longer available, please use the\n\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5010_DRIVERS_INFO) \
+ "The installation of the Driver component is required because\n\
+ you have selected the basic Tornado product for installation.\n\n\
+ If you wish to uncheck this item you must uncheck either the\n\
+ basic Tornado and/or Tornado Simulator product(s) or go to the\n\
+ 'Details' button for Tornado and uncheck both the Simulator and\n\
+ the Tornado Object parts."
+
+set strTable(5020_DO_NOT_SAVE_KEY_FOR_FAE) \
+ "The installation key you are about to enter will NOT\
+ be saved in the system registry.\nIs this what you want?"
+
+set strTable(5030_BACKWARD_COMPATIBILITY) \
+ "While the portmapper is not needed for Tornado 2.0, it is\
+ included in this release for development environments in\
+ which both Tornado 2.0 and Tornado 1.0.1 are in use.\
+ \n\nWould you like to use your Tornado 1.0.x tools with Tornado 2.0?"
+
+set strTable(5040_BACKWARD_COMPATIBILITY) \
+ "Note:\
+ \n\nIf you have selected to install the Tornado Registry as\
+ a service, there is no way to retain backward compatibility\
+ with Tornado 1.0.x."
+
+set strTable(5050_BACKWARD_COMPATIBILITY) \
+ "For more information on backward compatibility,\
+ please consult the Tornado 2.0 Release Notes."
diff --git a/bdb/dist/vx_setup/README.in b/bdb/dist/vx_setup/README.in
new file mode 100644
index 00000000000..f96948c37ba
--- /dev/null
+++ b/bdb/dist/vx_setup/README.in
@@ -0,0 +1,7 @@
+README.TXT: Sleepycat Software Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@ Release v@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Information on known problems, changes introduced with the
+current revision of the CD-ROM, and other product bulletins
+can be obtained from the Sleepycat Software web site:
+
+ http://www.sleepycat.com/
diff --git a/bdb/dist/vx_setup/SETUP.BMP b/bdb/dist/vx_setup/SETUP.BMP
new file mode 100644
index 00000000000..2918480b8c2
--- /dev/null
+++ b/bdb/dist/vx_setup/SETUP.BMP
Binary files differ
diff --git a/bdb/dist/vx_setup/vx_allfile.in b/bdb/dist/vx_setup/vx_allfile.in
new file mode 100644
index 00000000000..61a1b8ee805
--- /dev/null
+++ b/bdb/dist/vx_setup/vx_allfile.in
@@ -0,0 +1,5 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wsp
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_config.h
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_int.h
diff --git a/bdb/dist/vx_setup/vx_demofile.in b/bdb/dist/vx_setup/vx_demofile.in
new file mode 100644
index 00000000000..42a698ea367
--- /dev/null
+++ b/bdb/dist/vx_setup/vx_demofile.in
@@ -0,0 +1,3 @@
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.wpj
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/README
+windlink/sleepycat/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/dbdemo/dbdemo.c
diff --git a/bdb/dist/vx_setup/vx_setup.in b/bdb/dist/vx_setup/vx_setup.in
new file mode 100644
index 00000000000..7bc3f510cfa
--- /dev/null
+++ b/bdb/dist/vx_setup/vx_setup.in
@@ -0,0 +1,13 @@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.all
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-Demo
+@DB_SETUP_DIR@
+BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ Demo program
+demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.demo
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
diff --git a/bdb/dist/win_config.in b/bdb/dist/win_config.in
new file mode 100644
index 00000000000..09acab28806
--- /dev/null
+++ b/bdb/dist/win_config.in
@@ -0,0 +1,439 @@
+/* Define to 1 if you want to build a version for running the test suite. */
+/* #undef CONFIG_TEST */
+
+/* Define to 1 if you want a debugging version. */
+/* #undef DEBUG */
+#if defined(_DEBUG)
+#if !defined(DEBUG)
+#define DEBUG 1
+#endif
+#endif
+
+/* Define to 1 if you want a version that logs read operations. */
+/* #undef DEBUG_ROP */
+
+/* Define to 1 if you want a version that logs write operations. */
+/* #undef DEBUG_WOP */
+
+/* Define to 1 if you want a version with run-time diagnostic checking. */
+/* #undef DIAGNOSTIC */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef HAVE_CLOCK_GETTIME */
+
+/* Define to 1 if Berkeley DB release includes strong cryptography. */
+/* #undef HAVE_CRYPTO */
+
+/* Define to 1 if you have the `directio' function. */
+/* #undef HAVE_DIRECTIO */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_DIRENT_H */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef HAVE_DLFCN_H */
+
+/* Define to 1 if you have EXIT_SUCCESS/EXIT_FAILURE #defines. */
+#define HAVE_EXIT_SUCCESS 1
+
+/* Define to 1 if fcntl/F_SETFD denies child access to file descriptors. */
+/* #undef HAVE_FCNTL_F_SETFD */
+
+/* Define to 1 if allocated filesystem blocks are not zeroed. */
+#define HAVE_FILESYSTEM_NOTZERO 1
+
+/* Define to 1 if you have the `getcwd' function. */
+#define HAVE_GETCWD 1
+
+/* Define to 1 if you have the `getopt' function. */
+/* #undef HAVE_GETOPT */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #undef HAVE_GETTIMEOFDAY */
+
+/* Define to 1 if you have the `getuid' function. */
+/* #undef HAVE_GETUID */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef HAVE_INTTYPES_H */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef HAVE_LIBNSL */
+
+/* Define to 1 if you have the `memcmp' function. */
+#define HAVE_MEMCMP 1
+
+/* Define to 1 if you have the `memcpy' function. */
+#define HAVE_MEMCPY 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mlock' function. */
+/* #undef HAVE_MLOCK */
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef HAVE_MMAP */
+
+/* Define to 1 if you have the `munlock' function. */
+/* #undef HAVE_MUNLOCK */
+
+/* Define to 1 if you have the `munmap' function. */
+/* #undef HAVE_MUNMAP */
+
+/* Define to 1 to use the GCC compiler and 68K assembly language mutexes. */
+/* #undef HAVE_MUTEX_68K_GCC_ASSEMBLY */
+
+/* Define to 1 to use the AIX _check_lock mutexes. */
+/* #undef HAVE_MUTEX_AIX_CHECK_LOCK */
+
+/* Define to 1 to use the GCC compiler and Alpha assembly language mutexes. */
+/* #undef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and ARM assembly language mutexes. */
+/* #undef HAVE_MUTEX_ARM_GCC_ASSEMBLY */
+
+/* Define to 1 to use the UNIX fcntl system call mutexes. */
+/* #undef HAVE_MUTEX_FCNTL */
+
+/* Define to 1 to use the GCC compiler and PaRisc assembly language mutexes.
+ */
+/* #undef HAVE_MUTEX_HPPA_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on HP-UX. */
+/* #undef HAVE_MUTEX_HPPA_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and IA64 assembly language mutexes. */
+/* #undef HAVE_MUTEX_IA64_GCC_ASSEMBLY */
+
+/* Define to 1 to use the msem_XXX mutexes on systems other than HP-UX. */
+/* #undef HAVE_MUTEX_MSEM_INIT */
+
+/* Define to 1 to use the GCC compiler and Apple PowerPC assembly language. */
+/* #undef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY */
+
+/* Define to 1 to use the GCC compiler and generic PowerPC assembly language.
+ */
+/* #undef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY */
+
+/* Define to 1 to use POSIX 1003.1 pthread_XXX mutexes. */
+/* #undef HAVE_MUTEX_PTHREADS */
+
+/* Define to 1 to use Reliant UNIX initspin mutexes. */
+/* #undef HAVE_MUTEX_RELIANTUNIX_INITSPIN */
+
+/* Define to 1 to use the GCC compiler and S/390 assembly language mutexes. */
+/* #undef HAVE_MUTEX_S390_GCC_ASSEMBLY */
+
+/* Define to 1 to use the SCO compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY */
+
+/* Define to 1 to use the obsolete POSIX 1003.1 sema_XXX mutexes. */
+/* #undef HAVE_MUTEX_SEMA_INIT */
+
+/* Define to 1 to use the SGI XXX_lock mutexes. */
+/* #undef HAVE_MUTEX_SGI_INIT_LOCK */
+
+/* Define to 1 to use the Solaris _lock_XXX mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LOCK_TRY */
+
+/* Define to 1 to use the Solaris lwp threads mutexes. */
+/* #undef HAVE_MUTEX_SOLARIS_LWP */
+
+/* Define to 1 to use the GCC compiler and Sparc assembly language mutexes. */
+/* #undef HAVE_MUTEX_SPARC_GCC_ASSEMBLY */
+
+/* Define to 1 if mutexes hold system resources. */
+/* #undef HAVE_MUTEX_SYSTEM_RESOURCES */
+
+/* Define to 1 if fast mutexes are available. */
+#define HAVE_MUTEX_THREADS 1
+
+/* Define to 1 to configure mutexes intra-process only. */
+/* #undef HAVE_MUTEX_THREAD_ONLY */
+
+/* Define to 1 to use the UNIX International mutexes. */
+/* #undef HAVE_MUTEX_UI_THREADS */
+
+/* Define to 1 to use the UTS compiler and assembly language mutexes. */
+/* #undef HAVE_MUTEX_UTS_CC_ASSEMBLY */
+
+/* Define to 1 to use VMS mutexes. */
+/* #undef HAVE_MUTEX_VMS */
+
+/* Define to 1 to use VxWorks mutexes. */
+/* #undef HAVE_MUTEX_VXWORKS */
+
+/* Define to 1 to use Windows mutexes. */
+#define HAVE_MUTEX_WIN32 1
+
+/* Define to 1 to use the GCC compiler and x86 assembly language mutexes. */
+/* #undef HAVE_MUTEX_X86_GCC_ASSEMBLY */
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the O_DIRECT flag. */
+/* #undef HAVE_O_DIRECT */
+
+/* Define to 1 if you have the `pread' function. */
+/* #undef HAVE_PREAD */
+
+/* Define to 1 if you have the `pstat_getdynamic' function. */
+/* #undef HAVE_PSTAT_GETDYNAMIC */
+
+/* Define to 1 if you have the `pwrite' function. */
+/* #undef HAVE_PWRITE */
+
+/* Define to 1 if building on QNX. */
+/* #undef HAVE_QNX */
+
+/* Define to 1 if you have the `qsort' function. */
+#define HAVE_QSORT 1
+
+/* Define to 1 if you have the `raise' function. */
+#define HAVE_RAISE 1
+
+/* Define to 1 if building RPC client/server. */
+/* #undef HAVE_RPC */
+
+/* Define to 1 if you have the `sched_yield' function. */
+/* #undef HAVE_SCHED_YIELD */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef HAVE_SELECT */
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `snprintf' function. */
+#define HAVE_SNPRINTF 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #undef HAVE_STDINT_H */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strcasecmp' function. */
+/* #undef HAVE_STRCASECMP */
+
+/* Define to 1 if you have the `strdup' function. */
+#define HAVE_STRDUP 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoul' function. */
+#define HAVE_STRTOUL 1
+
+/* Define to 1 if `st_blksize' is member of `struct stat'. */
+/* #undef HAVE_STRUCT_STAT_ST_BLKSIZE */
+
+/* Define to 1 if you have the `sysconf' function. */
+/* #undef HAVE_SYSCONF */
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #undef HAVE_SYS_TIME_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #undef HAVE_UNISTD_H */
+
+/* Define to 1 if unlink of file with open file descriptors will fail. */
+/* #undef HAVE_UNLINK_WITH_OPEN_FAILURE */
+
+/* Define to 1 if you have the `vsnprintf' function. */
+#define HAVE_VSNPRINTF 1
+
+/* Define to 1 if building VxWorks. */
+/* #undef HAVE_VXWORKS */
+
+/* Define to 1 if you have the `yield' function. */
+/* #undef HAVE_YIELD */
+
+/* Define to 1 if you have the `_fstati64' function. */
+#define HAVE__FSTATI64 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "support@sleepycat.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "Berkeley DB"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "Berkeley DB __EDIT_DB_VERSION__"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "db-__EDIT_DB_VERSION__"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "__EDIT_DB_VERSION__"
+
+/* Define to 1 if the `S_IS*' macros in <sys/stat.h> do not work properly. */
+/* #undef STAT_MACROS_BROKEN */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+/* #undef TIME_WITH_SYS_TIME */
+
+/* Define to 1 to mask harmless unitialized memory read/writes. */
+/* #undef UMRW */
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+/* #undef _FILE_OFFSET_BITS */
+
+/* Define for large files, on AIX-style hosts. */
+/* #undef _LARGE_FILES */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/*
+ * Exit success/failure macros.
+ */
+#ifndef HAVE_EXIT_SUCCESS
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+#endif
+
+/*
+ * Don't step on the namespace. Other libraries may have their own
+ * implementations of these functions, we don't want to use their
+ * implementations or force them to use ours based on the load order.
+ */
+#ifndef HAVE_GETCWD
+#define getcwd __db_Cgetcwd
+#endif
+#ifndef HAVE_MEMCMP
+#define memcmp __db_Cmemcmp
+#endif
+#ifndef HAVE_MEMCPY
+#define memcpy __db_Cmemcpy
+#endif
+#ifndef HAVE_MEMMOVE
+#define memmove __db_Cmemmove
+#endif
+#ifndef HAVE_RAISE
+#define raise __db_Craise
+#endif
+#ifndef HAVE_SNPRINTF
+#define snprintf __db_Csnprintf
+#endif
+#ifndef HAVE_STRCASECMP
+#define strcasecmp __db_Cstrcasecmp
+#define strncasecmp __db_Cstrncasecmp
+#endif
+#ifndef HAVE_STRERROR
+#define strerror __db_Cstrerror
+#endif
+#ifndef HAVE_VSNPRINTF
+#define vsnprintf __db_Cvsnprintf
+#endif
+
+/*
+ * XXX
+ * The following is not part of the automatic configuration setup, but
+ * provides the information necessary to build Berkeley DB on Windows.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <direct.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <memory.h>
+#include <process.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <errno.h>
+
+/*
+ * To build Tcl interface libraries, the include path must be configured to
+ * use the directory containing <tcl.h>, usually the include directory in
+ * the Tcl distribution.
+ */
+#ifdef DB_TCL_SUPPORT
+#include <tcl.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/*
+ * All of the necessary includes have been included, ignore the #includes
+ * in the Berkeley DB source files.
+ */
+#define NO_SYSTEM_INCLUDES
+
+/*
+ * Win32 has getcwd, snprintf and vsnprintf, but under different names.
+ */
+#define getcwd(buf, size) _getcwd(buf, size)
+#define snprintf _snprintf
+#define vsnprintf _vsnprintf
+
+/*
+ * Win32 does not define getopt and friends in any header file, so we must.
+ */
+#if defined(__cplusplus)
+extern "C" {
+#endif
+extern int optind;
+extern char *optarg;
+extern int getopt(int, char * const *, const char *);
+#if defined(__cplusplus)
+}
+#endif
+
+/*
+ * We use DB_WIN32 much as one would use _WIN32, to determine that we're
+ * using an operating system environment that supports Win32 calls
+ * and semantics. We don't use _WIN32 because cygwin/gcc also defines
+ * that, even though it closely emulates the Unix environment.
+ */
+#define DB_WIN32 1
+
+/*
+ * This is a grievous hack -- once we've included windows.h, we have no choice
+ * but to use ANSI-style varargs (because it pulls in stdarg.h for us). DB's
+ * code decides which type of varargs to use based on the state of __STDC__.
+ * Sensible. Unfortunately, Microsoft's compiler _doesn't_ define __STDC__
+ * unless you invoke it with arguments turning OFF all vendor extensions. Even
+ * more unfortunately, if we do that, it fails to parse windows.h!!!!! So, we
+ * define __STDC__ here, after windows.h comes in. Note: the compiler knows
+ * we've defined it, and starts enforcing strict ANSI compilance from this point
+ * on.
+ */
+#define __STDC__ 1
diff --git a/bdb/dist/win_exports.in b/bdb/dist/win_exports.in
new file mode 100644
index 00000000000..52df529d028
--- /dev/null
+++ b/bdb/dist/win_exports.in
@@ -0,0 +1,134 @@
+# $Id: win_exports.in,v 1.25 2002/08/29 14:22:21 margo Exp $
+
+# Standard interfaces.
+ db_create
+ db_env_create
+ db_strerror
+ db_version
+ db_xa_switch
+ log_compare
+ txn_abort
+ txn_begin
+ txn_commit
+
+# Library configuration interfaces.
+ db_env_set_func_close
+ db_env_set_func_dirfree
+ db_env_set_func_dirlist
+ db_env_set_func_exists
+ db_env_set_func_free
+ db_env_set_func_fsync
+ db_env_set_func_ioinfo
+ db_env_set_func_malloc
+ db_env_set_func_map
+ db_env_set_func_open
+ db_env_set_func_read
+ db_env_set_func_realloc
+ db_env_set_func_rename
+ db_env_set_func_seek
+ db_env_set_func_sleep
+ db_env_set_func_unlink
+ db_env_set_func_unmap
+ db_env_set_func_write
+ db_env_set_func_yield
+
+# Needed for application-specific logging and recovery routines.
+ __db_add_recovery
+
+# These are needed to link the tcl library.
+ __db_dbm_close
+ __db_dbm_delete
+ __db_dbm_fetch
+ __db_dbm_firstkey
+ __db_dbm_init
+ __db_dbm_nextkey
+ __db_dbm_store
+ __db_hcreate
+ __db_hdestroy
+ __db_hsearch
+ __db_loadme
+ __db_ndbm_clearerr
+ __db_ndbm_close
+ __db_ndbm_delete
+ __db_ndbm_dirfno
+ __db_ndbm_error
+ __db_ndbm_fetch
+ __db_ndbm_firstkey
+ __db_ndbm_nextkey
+ __db_ndbm_open
+ __db_ndbm_pagfno
+ __db_ndbm_rdonly
+ __db_ndbm_store
+ __db_panic
+ __db_r_attach
+ __db_r_detach
+ __db_win32_mutex_init
+ __db_win32_mutex_lock
+ __db_win32_mutex_unlock
+ __ham_func2
+ __ham_func3
+ __ham_func4
+ __ham_func5
+ __ham_test
+ __lock_dump_region
+ __memp_dump_region
+ __os_calloc
+ __os_closehandle
+ __os_free
+ __os_ioinfo
+ __os_malloc
+ __os_open
+ __os_openhandle
+ __os_read
+ __os_realloc
+ __os_strdup
+ __os_umalloc
+ __os_write
+
+#These are needed for linking tools or java.
+ __bam_init_print
+ __bam_pgin
+ __bam_pgout
+ __crdel_init_print
+ __db_dispatch
+ __db_dump
+ __db_e_stat
+ __db_err
+ __db_getlong
+ __db_getulong
+ __db_global_values
+ __db_init_print
+ __db_inmemdbflags
+ __db_isbigendian
+ __db_omode
+ __db_overwrite
+ __db_pgin
+ __db_pgout
+ __db_prdbt
+ __db_prfooter
+ __db_prheader
+ __db_rpath
+ __db_util_cache
+ __db_util_interrupted
+ __db_util_logset
+ __db_util_siginit
+ __db_util_sigresend
+ __db_verify_callback
+ __db_verify_internal
+ __dbreg_init_print
+ __fop_init_print
+ __ham_get_meta
+ __ham_init_print
+ __ham_pgin
+ __ham_pgout
+ __ham_release_meta
+ __os_clock
+ __os_get_errno
+ __os_id
+ __os_set_errno
+ __os_sleep
+ __os_ufree
+ __os_yield
+ __qam_init_print
+ __qam_pgin_out
+ __txn_init_print
diff --git a/bdb/env/db_salloc.c b/bdb/env/db_salloc.c
index 4780107c593..1ef768d4114 100644
--- a/bdb/env/db_salloc.c
+++ b/bdb/env/db_salloc.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_salloc.c,v 11.10 2000/12/06 19:55:44 ubell Exp $";
+static const char revid[] = "$Id: db_salloc.c,v 11.16 2002/08/24 20:27:25 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -59,8 +59,8 @@ __db_shalloc_init(area, size)
}
/*
- * __db_shalloc --
- * Allocate some space from the shared region.
+ * __db_shalloc_size --
+ * Return the space needed for an allocation, including alignment.
*
* PUBLIC: int __db_shalloc_size __P((size_t, size_t));
*/
@@ -81,7 +81,7 @@ __db_shalloc_size(len, align)
if (align <= sizeof(db_align_t))
align = sizeof(db_align_t);
- return (ALIGN(len, align) + sizeof (struct __data));
+ return ((int)(ALIGN(len, align) + sizeof (struct __data)));
}
/*
@@ -284,28 +284,6 @@ __db_shalloc_free(regionp, ptr)
}
/*
- * __db_shalloc_count --
- * Return the amount of memory on the free list.
- *
- * PUBLIC: size_t __db_shalloc_count __P((void *));
- */
-size_t
-__db_shalloc_count(addr)
- void *addr;
-{
- struct __data *elp;
- size_t count;
-
- count = 0;
- for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
- elp != NULL;
- elp = SH_LIST_NEXT(elp, links, __data))
- count += elp->len;
-
- return (count);
-}
-
-/*
* __db_shsizeof --
* Return the size of a shalloc'd piece of memory.
*
@@ -355,6 +333,6 @@ __db_shalloc_dump(addr, fp)
for (elp = SH_LIST_FIRST((struct __head *)addr, __data);
elp != NULL;
elp = SH_LIST_NEXT(elp, links, __data))
- fprintf(fp, "%#lx: %lu\t", (u_long)elp, (u_long)elp->len);
+ fprintf(fp, "%#lx: %lu\t", P_TO_ULONG(elp), (u_long)elp->len);
fprintf(fp, "\n");
}
diff --git a/bdb/env/db_shash.c b/bdb/env/db_shash.c
index 1c33b383098..743a126307d 100644
--- a/bdb/env/db_shash.c
+++ b/bdb/env/db_shash.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_shash.c,v 11.3 2000/02/14 02:59:49 bostic Exp $";
+static const char revid[] = "$Id: db_shash.c,v 11.6 2002/03/01 17:22:16 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -30,6 +30,7 @@ static const struct {
u_int32_t power;
u_int32_t prime;
} list[] = {
+ { 32, 37}, /* 2^5 */
{ 64, 67}, /* 2^6 */
{ 128, 131}, /* 2^7 */
{ 256, 257}, /* 2^8 */
@@ -89,8 +90,8 @@ __db_tablesize(n_buckets)
*
* Ref: Sedgewick, Algorithms in C, "Hash Functions"
*/
- if (n_buckets < 64)
- n_buckets = 64;
+ if (n_buckets < 32)
+ n_buckets = 32;
for (i = 0;; ++i) {
if (list[i].power == 0) {
diff --git a/bdb/env/env_file.c b/bdb/env/env_file.c
new file mode 100644
index 00000000000..f221fd8d701
--- /dev/null
+++ b/bdb/env/env_file.c
@@ -0,0 +1,166 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_file.c,v 1.5 2002/03/08 17:47:18 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+static int __db_overwrite_pass __P((DB_ENV *,
+ const char *, DB_FH *, u_int32_t, u_int32_t, u_int32_t));
+
+/*
+ * __db_fileinit --
+ * Initialize a regular file, optionally zero-filling it as well.
+ *
+ * PUBLIC: int __db_fileinit __P((DB_ENV *, DB_FH *, size_t, int));
+ */
+int
+__db_fileinit(dbenv, fhp, size, zerofill)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ size_t size;
+ int zerofill;
+{
+ db_pgno_t pages;
+ size_t i;
+ size_t nw;
+ u_int32_t relative;
+ int ret;
+ char buf[OS_VMPAGESIZE];
+
+ /* Write nuls to the new bytes. */
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * Extend the region by writing the last page. If the region is >4Gb,
+ * increment may be larger than the maximum possible seek "relative"
+ * argument, as it's an unsigned 32-bit value. Break the offset into
+ * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
+ * than any memory I expect to see for awhile).
+ */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
+ return (ret);
+ pages = (db_pgno_t)((size - OS_VMPAGESIZE) / MEGABYTE);
+ relative = (u_int32_t)((size - OS_VMPAGESIZE) % MEGABYTE);
+ if ((ret = __os_seek(dbenv,
+ fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ return (ret);
+
+ /*
+ * We may want to guarantee that there is enough disk space for the
+ * file, so we also write a byte to each page. We write the byte
+ * because reading it is insufficient on systems smart enough not to
+ * instantiate disk pages to satisfy a read (e.g., Solaris).
+ */
+ if (zerofill) {
+ pages = (db_pgno_t)(size / MEGABYTE);
+ relative = (u_int32_t)(size % MEGABYTE);
+ if ((ret = __os_seek(dbenv, fhp,
+ MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
+ return (ret);
+
+ /* Write a byte to each page. */
+ for (i = 0; i < size; i += OS_VMPAGESIZE) {
+ if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
+ return (ret);
+ if ((ret = __os_seek(dbenv, fhp,
+ 0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
+ return (ret);
+ }
+ }
+ return (0);
+}
+
+/*
+ * __db_overwrite --
+ * Overwrite a file.
+ *
+ * PUBLIC: int __db_overwrite __P((DB_ENV *, const char *));
+ */
+int
+__db_overwrite(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+ DB_FH fh, *fhp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ fhp = &fh;
+ if ((ret = __os_open(dbenv, path, DB_OSO_REGION, 0, fhp)) == 0 &&
+ (ret = __os_ioinfo(dbenv, path, fhp, &mbytes, &bytes, NULL)) == 0) {
+ /*
+ * !!!
+ * Overwrite a regular file with alternating 0xff, 0x00 and 0xff
+ * byte patterns. Implies a fixed-block filesystem, journaling
+ * or logging filesystems will require operating system support.
+ */
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0x00)) != 0)
+ goto err;
+ if ((ret = __db_overwrite_pass(
+ dbenv, path, fhp, mbytes, bytes, 0xff)) != 0)
+ goto err;
+ } else
+ __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+err: if (F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ return (ret);
+}
+
+/*
+ * __db_overwrite_pass --
+ * A single pass over the file, writing the specified byte pattern.
+ */
+static int
+__db_overwrite_pass(dbenv, path, fhp, mbytes, bytes, pattern)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t mbytes, bytes, pattern;
+{
+ size_t len, nw;
+ int i, ret;
+ char buf[8 * 1024];
+
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ memset(buf, pattern, sizeof(buf));
+
+ for (; mbytes > 0; --mbytes)
+ for (i = MEGABYTE / sizeof(buf); i > 0; --i)
+ if ((ret =
+ __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
+ goto err;
+ for (; bytes > 0; bytes -= (u_int32_t)len) {
+ len = bytes < sizeof(buf) ? bytes : sizeof(buf);
+ if ((ret = __os_write(dbenv, fhp, buf, len, &nw)) != 0)
+ goto err;
+ }
+
+ if ((ret = __os_fsync(dbenv, fhp)) != 0)
+err: __db_err(dbenv, "%s: %s", path, db_strerror(ret));
+
+ return (ret);
+}
diff --git a/bdb/env/env_method.c b/bdb/env/env_method.c
index 8acb293acca..b51237ec44a 100644
--- a/bdb/env/env_method.c
+++ b/bdb/env/env_method.c
@@ -1,24 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: env_method.c,v 11.31 2000/11/30 00:58:35 ubell Exp $";
+static const char revid[] = "$Id: env_method.c,v 11.87 2002/08/29 14:22:21 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#include <string.h>
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
+#include <string.h>
#endif
/*
@@ -29,41 +29,50 @@ static const char revid[] = "$Id: env_method.c,v 11.31 2000/11/30 00:58:35 ubell
#define DB_INITIALIZE_DB_GLOBALS 1
#include "db_int.h"
-#include "db_shash.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "lock.h"
-#include "log.h"
-#include "mp.h"
-#include "txn.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
#endif
static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_init __P((DB_ENV *));
+static int __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __dbenv_set_app_dispatch __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
-static int __dbenv_set_mutexlocks __P((DB_ENV *, int));
-static void __dbenv_set_noticecall
- __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+static void __dbenv_set_noticecall __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
-static int __dbenv_set_recovery_init __P((DB_ENV *, int (*)(DB_ENV *)));
-static int __dbenv_set_server_noclnt
- __P((DB_ENV *, char *, long, long, u_int32_t));
+static int __dbenv_set_rpc_server_noclnt
+ __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
/*
* db_env_create --
* DB_ENV constructor.
+ *
+ * EXTERN: int db_env_create __P((DB_ENV **, u_int32_t));
*/
int
db_env_create(dbenvpp, flags)
@@ -75,6 +84,11 @@ db_env_create(dbenvpp, flags)
/*
* !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * !!!
* We can't call the flags-checking routines, we don't have an
* environment yet.
*/
@@ -91,7 +105,7 @@ db_env_create(dbenvpp, flags)
ret = __dbenv_init(dbenv);
if (ret != 0) {
- __os_free(dbenv, sizeof(*dbenv));
+ __os_free(NULL, dbenv);
return (ret);
}
@@ -102,14 +116,17 @@ db_env_create(dbenvpp, flags)
/*
* __dbenv_init --
* Initialize a DB_ENV structure.
- *
- * PUBLIC: int __dbenv_init __P((DB_ENV *));
*/
-int
+static int
__dbenv_init(dbenv)
DB_ENV *dbenv;
{
/*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
* Set up methods that are the same in both normal and RPC
*/
dbenv->err = __dbenv_err;
@@ -121,44 +138,54 @@ __dbenv_init(dbenv)
#ifdef HAVE_RPC
if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
dbenv->close = __dbcl_env_close;
- dbenv->open = __dbcl_env_open;
+ dbenv->dbremove = __dbcl_env_dbremove;
+ dbenv->dbrename = __dbcl_env_dbrename;
+ dbenv->open = __dbcl_env_open_wrap;
dbenv->remove = __dbcl_env_remove;
+ dbenv->set_alloc = __dbcl_env_alloc;
+ dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_encrypt = __dbcl_env_encrypt;
dbenv->set_feedback = __dbcl_env_set_feedback;
dbenv->set_flags = __dbcl_env_flags;
- dbenv->set_mutexlocks = __dbcl_set_mutex_locks;
dbenv->set_noticecall = __dbcl_env_noticecall;
dbenv->set_paniccall = __dbcl_env_paniccall;
- dbenv->set_recovery_init = __dbcl_set_recovery_init;
- dbenv->set_server = __dbcl_envserver;
+ dbenv->set_rpc_server = __dbcl_envrpcserver;
dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tas_spins = __dbcl_set_tas_spins;
+ dbenv->set_timeout = __dbcl_set_timeout;
dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
dbenv->set_verbose = __dbcl_set_verbose;
} else {
#endif
dbenv->close = __dbenv_close;
+ dbenv->dbremove = __dbenv_dbremove;
+ dbenv->dbrename = __dbenv_dbrename;
dbenv->open = __dbenv_open;
dbenv->remove = __dbenv_remove;
+ dbenv->set_alloc = __dbenv_set_alloc;
+ dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_encrypt = __dbenv_set_encrypt;
dbenv->set_feedback = __dbenv_set_feedback;
dbenv->set_flags = __dbenv_set_flags;
- dbenv->set_mutexlocks = __dbenv_set_mutexlocks;
dbenv->set_noticecall = __dbenv_set_noticecall;
dbenv->set_paniccall = __dbenv_set_paniccall;
- dbenv->set_recovery_init = __dbenv_set_recovery_init;
- dbenv->set_server = __dbenv_set_server_noclnt;
+ dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tas_spins = __dbenv_set_tas_spins;
dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
dbenv->set_verbose = __dbenv_set_verbose;
#ifdef HAVE_RPC
}
#endif
dbenv->shm_key = INVALID_REGION_SEGID;
- dbenv->db_mutexlocks = 1;
+ dbenv->db_ref = 0;
__log_dbenv_create(dbenv); /* Subsystem specific. */
__lock_dbenv_create(dbenv);
__memp_dbenv_create(dbenv);
+ __rep_dbenv_create(dbenv);
__txn_dbenv_create(dbenv);
return (0);
@@ -179,16 +206,7 @@ __dbenv_err(dbenv, error, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
-
-#ifdef __STDC__
- va_start(ap, fmt);
-#else
- va_start(ap);
-#endif
- __db_real_err(dbenv, error, 1, 1, fmt, ap);
-
- va_end(ap);
+ DB_REAL_ERR(dbenv, error, 1, 1, fmt);
}
/*
@@ -205,16 +223,109 @@ __dbenv_errx(dbenv, fmt, va_alist)
va_dcl
#endif
{
- va_list ap;
+ DB_REAL_ERR(dbenv, 0, 0, 1, fmt);
+}
-#ifdef __STDC__
- va_start(ap, fmt);
+static int
+__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+ DB_ENV *dbenv;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_alloc");
+
+ dbenv->db_malloc = mal_func;
+ dbenv->db_realloc = real_func;
+ dbenv->db_free = free_func;
+ return (0);
+}
+
+/*
+ * __dbenv_set_app_dispatch --
+ * Set the transaction abort recover function.
+ */
+static int
+__dbenv_set_app_dispatch(dbenv, app_dispatch)
+ DB_ENV *dbenv;
+ int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_app_dispatch");
+
+ dbenv->app_dispatch = app_dispatch;
+ return (0);
+}
+
+static int
+__dbenv_set_encrypt(dbenv, passwd, flags)
+ DB_ENV *dbenv;
+ const char *passwd;
+ u_int32_t flags;
+{
+#ifdef HAVE_CRYPTO
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_encrypt");
+#define OK_CRYPTO_FLAGS (DB_ENCRYPT_AES)
+
+ if (flags != 0 && LF_ISSET(~OK_CRYPTO_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_encrypt", 0));
+
+ if (passwd == NULL || strlen(passwd) == 0) {
+ __db_err(dbenv, "Empty password specified to set_encrypt");
+ return (EINVAL);
+ }
+ if (!CRYPTO_ON(dbenv)) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_CIPHER), &db_cipher))
+ != 0)
+ goto err;
+ dbenv->crypto_handle = db_cipher;
+ } else
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+
+ if (dbenv->passwd != NULL)
+ __os_free(dbenv, dbenv->passwd);
+ if ((ret = __os_strdup(dbenv, passwd, &dbenv->passwd)) != 0) {
+ __os_free(dbenv, db_cipher);
+ goto err;
+ }
+ /*
+ * We're going to need this often enough to keep around
+ */
+ dbenv->passwd_len = strlen(dbenv->passwd) + 1;
+ /*
+ * The MAC key is for checksumming, and is separate from
+ * the algorithm. So initialize it here, even if they
+ * are using CIPHER_ANY.
+ */
+ __db_derive_mac((u_int8_t *)dbenv->passwd,
+ dbenv->passwd_len, db_cipher->mac_key);
+ switch (flags) {
+ case 0:
+ F_SET(db_cipher, CIPHER_ANY);
+ break;
+ case DB_ENCRYPT_AES:
+ if ((ret = __crypto_algsetup(dbenv, db_cipher, CIPHER_AES, 0))
+ != 0)
+ goto err1;
+ break;
+ }
+ return (0);
+
+err1:
+ __os_free(dbenv, dbenv->passwd);
+ __os_free(dbenv, db_cipher);
+ dbenv->crypto_handle = NULL;
+err:
+ return (ret);
#else
- va_start(ap);
-#endif
- __db_real_err(dbenv, 0, 0, 1, fmt, ap);
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(passwd, NULL);
+ COMPQUIET(flags, 0);
- va_end(ap);
+ return (__db_eopnotsup(dbenv));
+#endif
}
static int
@@ -223,11 +334,23 @@ __dbenv_set_flags(dbenv, flags, onoff)
u_int32_t flags;
int onoff;
{
-#define OK_FLAGS (DB_CDB_ALLDB | DB_NOMMAP | DB_TXN_NOSYNC)
+#define OK_FLAGS \
+ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \
+ DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
+ DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TXN_NOSYNC | \
+ DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
if (LF_ISSET(~OK_FLAGS))
- return (__db_ferr(dbenv, "DBENV->set_flags", 0));
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
+ if (onoff && LF_ISSET(DB_TXN_WRITE_NOSYNC) && LF_ISSET(DB_TXN_NOSYNC))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 1));
+ if (LF_ISSET(DB_AUTO_COMMIT)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_AUTO_COMMIT);
+ else
+ F_CLR(dbenv, DB_ENV_AUTO_COMMIT);
+ }
if (LF_ISSET(DB_CDB_ALLDB)) {
ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
if (onoff)
@@ -235,18 +358,72 @@ __dbenv_set_flags(dbenv, flags, onoff)
else
F_CLR(dbenv, DB_ENV_CDB_ALLDB);
}
+ if (LF_ISSET(DB_DIRECT_DB)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_DB);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_DB);
+ }
+ if (LF_ISSET(DB_DIRECT_LOG)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_LOG);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_LOG);
+ }
+ if (LF_ISSET(DB_NOLOCKING)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ else
+ F_CLR(dbenv, DB_ENV_NOLOCKING);
+ }
if (LF_ISSET(DB_NOMMAP)) {
if (onoff)
F_SET(dbenv, DB_ENV_NOMMAP);
else
F_CLR(dbenv, DB_ENV_NOMMAP);
}
+ if (LF_ISSET(DB_NOPANIC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOPANIC);
+ else
+ F_CLR(dbenv, DB_ENV_NOPANIC);
+ }
+ if (LF_ISSET(DB_OVERWRITE)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_OVERWRITE);
+ else
+ F_CLR(dbenv, DB_ENV_OVERWRITE);
+ }
+ if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv,
+ "set_flags: DB_PANIC_ENVIRONMENT");
+ PANIC_SET(dbenv, onoff);
+ }
+ if (LF_ISSET(DB_REGION_INIT)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_REGION_INIT");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_REGION_INIT);
+ else
+ F_CLR(dbenv, DB_ENV_REGION_INIT);
+ }
if (LF_ISSET(DB_TXN_NOSYNC)) {
if (onoff)
F_SET(dbenv, DB_ENV_TXN_NOSYNC);
else
F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
}
+ if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ }
+ if (LF_ISSET(DB_YIELDCPU)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_YIELDCPU);
+ else
+ F_CLR(dbenv, DB_ENV_YIELDCPU);
+ }
return (0);
}
@@ -267,7 +444,7 @@ __dbenv_set_data_dir(dbenv, dir)
dbenv->data_cnt *= 2;
if ((ret = __os_realloc(dbenv,
dbenv->data_cnt * sizeof(char **),
- NULL, &dbenv->db_data_dir)) != 0)
+ &dbenv->db_data_dir)) != 0)
return (ret);
}
return (__os_strdup(dbenv,
@@ -314,16 +491,7 @@ __dbenv_set_noticecall(dbenv, noticecall)
{
dbenv->db_noticecall = noticecall;
}
-
-static int
-__dbenv_set_mutexlocks(dbenv, onoff)
- DB_ENV *dbenv;
- int onoff;
-{
- dbenv->db_mutexlocks = onoff;
- return (0);
-}
-
+
static int
__dbenv_set_paniccall(dbenv, paniccall)
DB_ENV *dbenv;
@@ -334,25 +502,22 @@ __dbenv_set_paniccall(dbenv, paniccall)
}
static int
-__dbenv_set_recovery_init(dbenv, recovery_init)
+__dbenv_set_shm_key(dbenv, shm_key)
DB_ENV *dbenv;
- int (*recovery_init) __P((DB_ENV *));
+ long shm_key; /* !!!: really a key_t. */
{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_recovery_init");
-
- dbenv->db_recovery_init = recovery_init;
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+ dbenv->shm_key = shm_key;
return (0);
}
static int
-__dbenv_set_shm_key(dbenv, shm_key)
+__dbenv_set_tas_spins(dbenv, tas_spins)
DB_ENV *dbenv;
- long shm_key; /* !!!: really a key_t. */
+ u_int32_t tas_spins;
{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
-
- dbenv->shm_key = shm_key;
+ dbenv->tas_spins = tas_spins;
return (0);
}
@@ -362,7 +527,7 @@ __dbenv_set_tmp_dir(dbenv, dir)
const char *dir;
{
if (dbenv->db_tmp_dir != NULL)
- __os_freestr(dbenv->db_tmp_dir);
+ __os_free(dbenv, dbenv->db_tmp_dir);
return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
}
@@ -376,6 +541,7 @@ __dbenv_set_verbose(dbenv, which, onoff)
case DB_VERB_CHKPOINT:
case DB_VERB_DEADLOCK:
case DB_VERB_RECOVERY:
+ case DB_VERB_REPLICATION:
case DB_VERB_WAITSFOR:
if (onoff)
FLD_SET(dbenv->verbose, which);
@@ -399,7 +565,7 @@ __db_mi_env(dbenv, name)
DB_ENV *dbenv;
const char *name;
{
- __db_err(dbenv, "%s: method meaningless in shared environment", name);
+ __db_err(dbenv, "%s: method not permitted in shared environment", name);
return (EINVAL);
}
@@ -415,59 +581,63 @@ __db_mi_open(dbenv, name, after)
const char *name;
int after;
{
- __db_err(dbenv,
- "%s: method meaningless %s open", name, after ? "after" : "before");
+ __db_err(dbenv, "%s: method not permitted %s open",
+ name, after ? "after" : "before");
return (EINVAL);
}
/*
* __db_env_config --
- * Method or function called without subsystem being configured.
+ * Method or function called without required configuration.
*
- * PUBLIC: int __db_env_config __P((DB_ENV *, int));
+ * PUBLIC: int __db_env_config __P((DB_ENV *, char *, u_int32_t));
*/
int
-__db_env_config(dbenv, subsystem)
+__db_env_config(dbenv, i, flags)
DB_ENV *dbenv;
- int subsystem;
+ char *i;
+ u_int32_t flags;
{
- const char *name;
+ char *sub;
- switch (subsystem) {
+ switch (flags) {
case DB_INIT_LOCK:
- name = "lock";
+ sub = "locking";
break;
case DB_INIT_LOG:
- name = "log";
+ sub = "logging";
break;
case DB_INIT_MPOOL:
- name = "mpool";
+ sub = "memory pool";
break;
case DB_INIT_TXN:
- name = "txn";
+ sub = "transaction";
break;
default:
- name = "unknown";
+ sub = "<unspecified>";
break;
}
__db_err(dbenv,
- "%s interface called with environment not configured for that subsystem",
- name);
+ "%s interface requires an environment configured for the %s subsystem",
+ i, sub);
return (EINVAL);
}
static int
-__dbenv_set_server_noclnt(dbenv, host, tsec, ssec, flags)
+__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
DB_ENV *dbenv;
- char *host;
+ void *cl;
+ const char *host;
long tsec, ssec;
u_int32_t flags;
{
COMPQUIET(host, NULL);
+ COMPQUIET(cl, NULL);
COMPQUIET(tsec, 0);
COMPQUIET(ssec, 0);
COMPQUIET(flags, 0);
- __db_err(dbenv, "set_server method meaningless in non-RPC enviroment");
+ __db_err(dbenv,
+ "set_rpc_server method not permitted in non-RPC environment");
return (__db_eopnotsup(dbenv));
}
diff --git a/bdb/env/env_method.c.b b/bdb/env/env_method.c.b
new file mode 100644
index 00000000000..b6802b8a77c
--- /dev/null
+++ b/bdb/env/env_method.c.b
@@ -0,0 +1,643 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: env_method.c,v 11.87 2002/08/29 14:22:21 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+/*
+ * This is the file that initializes the global array. Do it this way because
+ * people keep changing one without changing the other. Having declaration and
+ * initialization in one file will hopefully fix that.
+ */
+#define DB_INITIALIZE_DB_GLOBALS 1
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static void __dbenv_err __P((const DB_ENV *, int, const char *, ...));
+static void __dbenv_errx __P((const DB_ENV *, const char *, ...));
+static int __dbenv_init __P((DB_ENV *));
+static int __dbenv_set_alloc __P((DB_ENV *, void *(*)(size_t),
+ void *(*)(void *, size_t), void (*)(void *)));
+static int __dbenv_set_app_dispatch __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __dbenv_set_data_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_encrypt __P((DB_ENV *, const char *, u_int32_t));
+static void __dbenv_set_errcall __P((DB_ENV *, void (*)(const char *, char *)));
+static void __dbenv_set_errfile __P((DB_ENV *, FILE *));
+static void __dbenv_set_errpfx __P((DB_ENV *, const char *));
+static int __dbenv_set_feedback __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
+static int __dbenv_set_flags __P((DB_ENV *, u_int32_t, int));
+static void __dbenv_set_noticecall __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
+static int __dbenv_set_paniccall __P((DB_ENV *, void (*)(DB_ENV *, int)));
+static int __dbenv_set_rpc_server_noclnt
+ __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
+static int __dbenv_set_shm_key __P((DB_ENV *, long));
+static int __dbenv_set_tas_spins __P((DB_ENV *, u_int32_t));
+static int __dbenv_set_tmp_dir __P((DB_ENV *, const char *));
+static int __dbenv_set_verbose __P((DB_ENV *, u_int32_t, int));
+
+/*
+ * db_env_create --
+ * DB_ENV constructor.
+ *
+ * EXTERN: int db_env_create __P((DB_ENV **, u_int32_t));
+ */
+int
+db_env_create(dbenvpp, flags)
+ DB_ENV **dbenvpp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * !!!
+ * We can't call the flags-checking routines, we don't have an
+ * environment yet.
+ */
+ if (flags != 0 && flags != DB_CLIENT)
+ return (EINVAL);
+
+ if ((ret = __os_calloc(NULL, 1, sizeof(*dbenv), &dbenv)) != 0)
+ return (ret);
+
+#ifdef HAVE_RPC
+ if (LF_ISSET(DB_CLIENT))
+ F_SET(dbenv, DB_ENV_RPCCLIENT);
+#endif
+ ret = __dbenv_init(dbenv);
+
+ if (ret != 0) {
+ __os_free(NULL, dbenv);
+ return (ret);
+ }
+
+ *dbenvpp = dbenv;
+ return (0);
+}
+
+/*
+ * __dbenv_init --
+ * Initialize a DB_ENV structure.
+ */
+static int
+__dbenv_init(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
+ * Set up methods that are the same in both normal and RPC
+ */
+ dbenv->err = __dbenv_err;
+ dbenv->errx = __dbenv_errx;
+ dbenv->set_errcall = __dbenv_set_errcall;
+ dbenv->set_errfile = __dbenv_set_errfile;
+ dbenv->set_errpfx = __dbenv_set_errpfx;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->close = __dbcl_env_close;
+ dbenv->dbremove = __dbcl_env_dbremove;
+ dbenv->dbrename = __dbcl_env_dbrename;
+ dbenv->open = __dbcl_env_open_wrap;
+ dbenv->remove = __dbcl_env_remove;
+ dbenv->set_alloc = __dbcl_env_alloc;
+ dbenv->set_app_dispatch = __dbcl_set_app_dispatch;
+ dbenv->set_data_dir = __dbcl_set_data_dir;
+ dbenv->set_encrypt = __dbcl_env_encrypt;
+ dbenv->set_feedback = __dbcl_env_set_feedback;
+ dbenv->set_flags = __dbcl_env_flags;
+ dbenv->set_noticecall = __dbcl_env_noticecall;
+ dbenv->set_paniccall = __dbcl_env_paniccall;
+ dbenv->set_rpc_server = __dbcl_envrpcserver;
+ dbenv->set_shm_key = __dbcl_set_shm_key;
+ dbenv->set_tas_spins = __dbcl_set_tas_spins;
+ dbenv->set_timeout = __dbcl_set_timeout;
+ dbenv->set_tmp_dir = __dbcl_set_tmp_dir;
+ dbenv->set_verbose = __dbcl_set_verbose;
+ } else {
+#endif
+ dbenv->close = __dbenv_close;
+ dbenv->dbremove = __dbenv_dbremove;
+ dbenv->dbrename = __dbenv_dbrename;
+ dbenv->open = __dbenv_open;
+ dbenv->remove = __dbenv_remove;
+ dbenv->set_alloc = __dbenv_set_alloc;
+ dbenv->set_app_dispatch = __dbenv_set_app_dispatch;
+ dbenv->set_data_dir = __dbenv_set_data_dir;
+ dbenv->set_encrypt = __dbenv_set_encrypt;
+ dbenv->set_feedback = __dbenv_set_feedback;
+ dbenv->set_flags = __dbenv_set_flags;
+ dbenv->set_noticecall = __dbcl_env_noticecall;
+ dbenv->set_paniccall = __dbenv_set_paniccall;
+ dbenv->set_rpc_server = __dbenv_set_rpc_server_noclnt;
+ dbenv->set_shm_key = __dbenv_set_shm_key;
+ dbenv->set_tas_spins = __dbenv_set_tas_spins;
+ dbenv->set_tmp_dir = __dbenv_set_tmp_dir;
+ dbenv->set_verbose = __dbenv_set_verbose;
+#ifdef HAVE_RPC
+ }
+#endif
+ dbenv->shm_key = INVALID_REGION_SEGID;
+ dbenv->db_ref = 0;
+
+ __log_dbenv_create(dbenv); /* Subsystem specific. */
+ __lock_dbenv_create(dbenv);
+ __memp_dbenv_create(dbenv);
+ __rep_dbenv_create(dbenv);
+ __txn_dbenv_create(dbenv);
+
+ return (0);
+}
+
+/*
+ * __dbenv_err --
+ * Error message, including the standard error string.
+ */
+static void
+#ifdef __STDC__
+__dbenv_err(const DB_ENV *dbenv, int error, const char *fmt, ...)
+#else
+__dbenv_err(dbenv, error, fmt, va_alist)
+ const DB_ENV *dbenv;
+ int error;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, error, 1, 1, fmt);
+}
+
+/*
+ * __dbenv_errx --
+ * Error message.
+ */
+static void
+#ifdef __STDC__
+__dbenv_errx(const DB_ENV *dbenv, const char *fmt, ...)
+#else
+__dbenv_errx(dbenv, fmt, va_alist)
+ const DB_ENV *dbenv;
+ const char *fmt;
+ va_dcl
+#endif
+{
+ DB_REAL_ERR(dbenv, 0, 0, 1, fmt);
+}
+
+static int
+__dbenv_set_alloc(dbenv, mal_func, real_func, free_func)
+ DB_ENV *dbenv;
+ void *(*mal_func) __P((size_t));
+ void *(*real_func) __P((void *, size_t));
+ void (*free_func) __P((void *));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_alloc");
+
+ dbenv->db_malloc = mal_func;
+ dbenv->db_realloc = real_func;
+ dbenv->db_free = free_func;
+ return (0);
+}
+
+/*
+ * __dbenv_set_app_dispatch --
+ * Set the transaction abort recover function.
+ */
+static int
+__dbenv_set_app_dispatch(dbenv, app_dispatch)
+ DB_ENV *dbenv;
+ int (*app_dispatch) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_app_dispatch");
+
+ dbenv->app_dispatch = app_dispatch;
+ return (0);
+}
+
+static int
+__dbenv_set_encrypt(dbenv, passwd, flags)
+ DB_ENV *dbenv;
+ const char *passwd;
+ u_int32_t flags;
+{
+#ifdef HAVE_CRYPTO
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_encrypt");
+#define OK_CRYPTO_FLAGS (DB_ENCRYPT_AES)
+
+ if (flags != 0 && LF_ISSET(~OK_CRYPTO_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_encrypt", 0));
+
+ if (passwd == NULL || strlen(passwd) == 0) {
+ __db_err(dbenv, "Empty password specified to set_encrypt");
+ return (EINVAL);
+ }
+ if (!CRYPTO_ON(dbenv)) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_CIPHER), &db_cipher))
+ != 0)
+ goto err;
+ dbenv->crypto_handle = db_cipher;
+ } else
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+
+ if (dbenv->passwd != NULL)
+ __os_free(dbenv, dbenv->passwd);
+ if ((ret = __os_strdup(dbenv, passwd, &dbenv->passwd)) != 0) {
+ __os_free(dbenv, db_cipher);
+ goto err;
+ }
+ /*
+ * We're going to need this often enough to keep around
+ */
+ dbenv->passwd_len = strlen(dbenv->passwd) + 1;
+ /*
+ * The MAC key is for checksumming, and is separate from
+ * the algorithm. So initialize it here, even if they
+ * are using CIPHER_ANY.
+ */
+ __db_derive_mac((u_int8_t *)dbenv->passwd,
+ dbenv->passwd_len, db_cipher->mac_key);
+ switch (flags) {
+ case 0:
+ F_SET(db_cipher, CIPHER_ANY);
+ break;
+ case DB_ENCRYPT_AES:
+ if ((ret = __crypto_algsetup(dbenv, db_cipher, CIPHER_AES, 0))
+ != 0)
+ goto err1;
+ break;
+ }
+ return (0);
+
+err1:
+ __os_free(dbenv, dbenv->passwd);
+ __os_free(dbenv, db_cipher);
+ dbenv->crypto_handle = NULL;
+err:
+ return (ret);
+#else
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(passwd, NULL);
+ COMPQUIET(flags, 0);
+
+ return (__db_eopnotsup(dbenv));
+#endif
+}
+
+static int
+__dbenv_set_flags(dbenv, flags, onoff)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int onoff;
+{
+#define OK_FLAGS \
+ (DB_AUTO_COMMIT | DB_CDB_ALLDB | DB_DIRECT_DB | DB_DIRECT_LOG | \
+ DB_NOLOCKING | DB_NOMMAP | DB_NOPANIC | DB_OVERWRITE | \
+ DB_PANIC_ENVIRONMENT | DB_REGION_INIT | DB_TXN_NOSYNC | \
+ DB_TXN_WRITE_NOSYNC | DB_YIELDCPU)
+
+ if (LF_ISSET(~OK_FLAGS))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 0));
+ if (onoff && LF_ISSET(DB_TXN_WRITE_NOSYNC) && LF_ISSET(DB_TXN_NOSYNC))
+ return (__db_ferr(dbenv, "DB_ENV->set_flags", 1));
+
+ if (LF_ISSET(DB_AUTO_COMMIT)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_AUTO_COMMIT);
+ else
+ F_CLR(dbenv, DB_ENV_AUTO_COMMIT);
+ }
+ if (LF_ISSET(DB_CDB_ALLDB)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_CDB_ALLDB");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_CDB_ALLDB);
+ else
+ F_CLR(dbenv, DB_ENV_CDB_ALLDB);
+ }
+ if (LF_ISSET(DB_DIRECT_DB)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_DB);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_DB);
+ }
+ if (LF_ISSET(DB_DIRECT_LOG)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_DIRECT_LOG);
+ else
+ F_CLR(dbenv, DB_ENV_DIRECT_LOG);
+ }
+ if (LF_ISSET(DB_NOLOCKING)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ else
+ F_CLR(dbenv, DB_ENV_NOLOCKING);
+ }
+ if (LF_ISSET(DB_NOMMAP)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOMMAP);
+ else
+ F_CLR(dbenv, DB_ENV_NOMMAP);
+ }
+ if (LF_ISSET(DB_NOPANIC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_NOPANIC);
+ else
+ F_CLR(dbenv, DB_ENV_NOPANIC);
+ }
+ if (LF_ISSET(DB_OVERWRITE)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_OVERWRITE);
+ else
+ F_CLR(dbenv, DB_ENV_OVERWRITE);
+ }
+ if (LF_ISSET(DB_PANIC_ENVIRONMENT)) {
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv,
+ "set_flags: DB_PANIC_ENVIRONMENT");
+ PANIC_SET(dbenv, onoff);
+ }
+ if (LF_ISSET(DB_REGION_INIT)) {
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_flags: DB_REGION_INIT");
+ if (onoff)
+ F_SET(dbenv, DB_ENV_REGION_INIT);
+ else
+ F_CLR(dbenv, DB_ENV_REGION_INIT);
+ }
+ if (LF_ISSET(DB_TXN_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_NOSYNC);
+ }
+ if (LF_ISSET(DB_TXN_WRITE_NOSYNC)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ else
+ F_CLR(dbenv, DB_ENV_TXN_WRITE_NOSYNC);
+ }
+ if (LF_ISSET(DB_YIELDCPU)) {
+ if (onoff)
+ F_SET(dbenv, DB_ENV_YIELDCPU);
+ else
+ F_CLR(dbenv, DB_ENV_YIELDCPU);
+ }
+ return (0);
+}
+
+static int
+__dbenv_set_data_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ int ret;
+
+#define DATA_INIT_CNT 20 /* Start with 20 data slots. */
+ if (dbenv->db_data_dir == NULL) {
+ if ((ret = __os_calloc(dbenv, DATA_INIT_CNT,
+ sizeof(char **), &dbenv->db_data_dir)) != 0)
+ return (ret);
+ dbenv->data_cnt = DATA_INIT_CNT;
+ } else if (dbenv->data_next == dbenv->data_cnt - 1) {
+ dbenv->data_cnt *= 2;
+ if ((ret = __os_realloc(dbenv,
+ dbenv->data_cnt * sizeof(char **),
+ &dbenv->db_data_dir)) != 0)
+ return (ret);
+ }
+ return (__os_strdup(dbenv,
+ dir, &dbenv->db_data_dir[dbenv->data_next++]));
+}
+
+static void
+__dbenv_set_errcall(dbenv, errcall)
+ DB_ENV *dbenv;
+ void (*errcall) __P((const char *, char *));
+{
+ dbenv->db_errcall = errcall;
+}
+
+static void
+__dbenv_set_errfile(dbenv, errfile)
+ DB_ENV *dbenv;
+ FILE *errfile;
+{
+ dbenv->db_errfile = errfile;
+}
+
+static void
+__dbenv_set_errpfx(dbenv, errpfx)
+ DB_ENV *dbenv;
+ const char *errpfx;
+{
+ dbenv->db_errpfx = errpfx;
+}
+
+static int
+__dbenv_set_feedback(dbenv, feedback)
+ DB_ENV *dbenv;
+ void (*feedback) __P((DB_ENV *, int, int));
+{
+ dbenv->db_feedback = feedback;
+ return (0);
+}
+
+static void
+__dbenv_set_noticecall(dbenv, noticecall)
+ DB_ENV *dbenv;
+ void (*noticecall) __P((DB_ENV *, db_notices));
+{
+ dbenv->db_noticecall = noticecall;
+}
+
+static int
+__dbenv_set_paniccall(dbenv, paniccall)
+ DB_ENV *dbenv;
+ void (*paniccall) __P((DB_ENV *, int));
+{
+ dbenv->db_paniccall = paniccall;
+ return (0);
+}
+
+static int
+__dbenv_set_shm_key(dbenv, shm_key)
+ DB_ENV *dbenv;
+ long shm_key; /* !!!: really a key_t. */
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_shm_key");
+
+ dbenv->shm_key = shm_key;
+ return (0);
+}
+
+static int
+__dbenv_set_tas_spins(dbenv, tas_spins)
+ DB_ENV *dbenv;
+ u_int32_t tas_spins;
+{
+ dbenv->tas_spins = tas_spins;
+ return (0);
+}
+
+static int
+__dbenv_set_tmp_dir(dbenv, dir)
+ DB_ENV *dbenv;
+ const char *dir;
+{
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ return (__os_strdup(dbenv, dir, &dbenv->db_tmp_dir));
+}
+
+static int
+__dbenv_set_verbose(dbenv, which, onoff)
+ DB_ENV *dbenv;
+ u_int32_t which;
+ int onoff;
+{
+ switch (which) {
+ case DB_VERB_CHKPOINT:
+ case DB_VERB_DEADLOCK:
+ case DB_VERB_RECOVERY:
+ case DB_VERB_REPLICATION:
+ case DB_VERB_WAITSFOR:
+ if (onoff)
+ FLD_SET(dbenv->verbose, which);
+ else
+ FLD_CLR(dbenv->verbose, which);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/*
+ * __db_mi_env --
+ * Method illegally called with public environment.
+ *
+ * PUBLIC: int __db_mi_env __P((DB_ENV *, const char *));
+ */
+int
+__db_mi_env(dbenv, name)
+ DB_ENV *dbenv;
+ const char *name;
+{
+ __db_err(dbenv, "%s: method not permitted in shared environment", name);
+ return (EINVAL);
+}
+
+/*
+ * __db_mi_open --
+ * Method illegally called after open.
+ *
+ * PUBLIC: int __db_mi_open __P((DB_ENV *, const char *, int));
+ */
+int
+__db_mi_open(dbenv, name, after)
+ DB_ENV *dbenv;
+ const char *name;
+ int after;
+{
+ __db_err(dbenv, "%s: method not permitted %s open",
+ name, after ? "after" : "before");
+ return (EINVAL);
+}
+
+/*
+ * __db_env_config --
+ * Method or function called without required configuration.
+ *
+ * PUBLIC: int __db_env_config __P((DB_ENV *, char *, u_int32_t));
+ */
+int
+__db_env_config(dbenv, i, flags)
+ DB_ENV *dbenv;
+ char *i;
+ u_int32_t flags;
+{
+ char *sub;
+
+ switch (flags) {
+ case DB_INIT_LOCK:
+ sub = "locking";
+ break;
+ case DB_INIT_LOG:
+ sub = "logging";
+ break;
+ case DB_INIT_MPOOL:
+ sub = "memory pool";
+ break;
+ case DB_INIT_TXN:
+ sub = "transaction";
+ break;
+ default:
+ sub = "<unspecified>";
+ break;
+ }
+ __db_err(dbenv,
+ "%s interface requires an environment configured for the %s subsystem",
+ i, sub);
+ return (EINVAL);
+}
+
+static int
+__dbenv_set_rpc_server_noclnt(dbenv, cl, host, tsec, ssec, flags)
+ DB_ENV *dbenv;
+ void *cl;
+ const char *host;
+ long tsec, ssec;
+ u_int32_t flags;
+{
+ COMPQUIET(host, NULL);
+ COMPQUIET(cl, NULL);
+ COMPQUIET(tsec, 0);
+ COMPQUIET(ssec, 0);
+ COMPQUIET(flags, 0);
+
+ __db_err(dbenv,
+ "set_rpc_server method not permitted in non-RPC environment");
+ return (__db_eopnotsup(dbenv));
+}
diff --git a/bdb/env/env_open.c b/bdb/env/env_open.c
index 2007b4266c0..ae8399f61cd 100644
--- a/bdb/env/env_open.c
+++ b/bdb/env/env_open.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: env_open.c,v 11.34 2000/12/21 19:20:00 bostic Exp $";
+static const char revid[] = "$Id: env_open.c,v 11.111 2002/09/03 01:20:51 mjc Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -21,26 +21,30 @@ static const char revid[] = "$Id: env_open.c,v 11.34 2000/12/21 19:20:00 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "qam.h"
-#include "lock.h"
-#include "log.h"
-#include "mp.h"
-#include "txn.h"
-#include "clib_ext.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/fop.h"
-static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
-static int __dbenv_refresh __P((DB_ENV *));
-static int __db_home __P((DB_ENV *, const char *, u_int32_t));
static int __db_parse __P((DB_ENV *, char *));
static int __db_tmp_open __P((DB_ENV *, u_int32_t, char *, DB_FH *));
+static int __dbenv_config __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_iremove __P((DB_ENV *, const char *, u_int32_t));
+static int __dbenv_refresh __P((DB_ENV *, u_int32_t));
/*
* db_version --
* Return version information.
+ *
+ * EXTERN: char *db_version __P((int *, int *, int *));
*/
char *
db_version(majverp, minverp, patchp)
@@ -68,9 +72,11 @@ __dbenv_open(dbenv, db_home, flags, mode)
u_int32_t flags;
int mode;
{
- DB_ENV *rm_dbenv;
+ DB_MPOOL *dbmp;
int ret;
- u_int32_t init_flags;
+ u_int32_t init_flags, orig_flags;
+
+ orig_flags = dbenv->flags;
#undef OKFLAGS
#define OKFLAGS \
@@ -86,7 +92,7 @@ __dbenv_open(dbenv, db_home, flags, mode)
/*
* Flags saved in the init_flags field of the environment, representing
- * flags to DBENV->set_flags and DBENV->open that need to be set.
+ * flags to DB_ENV->set_flags and DB_ENV->open that need to be set.
*/
#define DB_INITENV_CDB 0x0001 /* DB_INIT_CDB */
#define DB_INITENV_CDB_ALLDB 0x0002 /* DB_INIT_CDB_ALLDB */
@@ -95,20 +101,36 @@ __dbenv_open(dbenv, db_home, flags, mode)
#define DB_INITENV_MPOOL 0x0010 /* DB_INIT_MPOOL */
#define DB_INITENV_TXN 0x0020 /* DB_INIT_TXN */
- if ((ret = __db_fchk(dbenv, "DBENV->open", flags, OKFLAGS)) != 0)
+ if ((ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS)) != 0)
return (ret);
if (LF_ISSET(DB_INIT_CDB) &&
- (ret = __db_fchk(dbenv, "DBENV->open", flags, OKFLAGS_CDB)) != 0)
+ (ret = __db_fchk(dbenv, "DB_ENV->open", flags, OKFLAGS_CDB)) != 0)
return (ret);
if ((ret = __db_fcchk(dbenv,
- "DBENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
+ "DB_ENV->open", flags, DB_PRIVATE, DB_SYSTEM_MEM)) != 0)
return (ret);
- if ((ret = __db_fcchk(dbenv, "DBENV->open", flags, DB_JOINENV,
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->open", flags, DB_RECOVER, DB_RECOVER_FATAL)) != 0)
+ return (ret);
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->open", flags, DB_JOINENV,
DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
DB_INIT_TXN | DB_PRIVATE)) != 0)
return (ret);
/*
+ * Currently we support one kind of mutex that is intra-process only,
+ * POSIX 1003.1 pthreads, because a variety of systems don't support
+ * the full pthreads API, and our only alternative is test-and-set.
+ */
+#ifdef HAVE_MUTEX_THREAD_ONLY
+ if (!LF_ISSET(DB_PRIVATE)) {
+ __db_err(dbenv,
+ "Berkeley DB library configured to support only DB_PRIVATE environments");
+ return (EINVAL);
+ }
+#endif
+
+ /*
* If we're doing recovery, destroy the environment so that we create
* all the regions from scratch. I'd like to reuse already created
* regions, but that's hard. We would have to create the environment
@@ -126,24 +148,24 @@ __dbenv_open(dbenv, db_home, flags, mode)
* not, we just want to nail any files that are left-over for whatever
* reason, from whatever session.
*/
- if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL)) {
- if ((ret = db_env_create(&rm_dbenv, 0)) != 0)
- return (ret);
- if ((ret = dbenv->remove(rm_dbenv, db_home, DB_FORCE)) != 0)
+ if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))
+ if ((ret = __dbenv_iremove(dbenv, db_home, DB_FORCE)) != 0 ||
+ (ret = __dbenv_refresh(dbenv, orig_flags)) != 0)
return (ret);
- }
/* Initialize the DB_ENV structure. */
if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
goto err;
- /* Convert the DBENV->open flags to internal flags. */
+ /* Convert the DB_ENV->open flags to internal flags. */
if (LF_ISSET(DB_CREATE))
F_SET(dbenv, DB_ENV_CREATE);
if (LF_ISSET(DB_LOCKDOWN))
F_SET(dbenv, DB_ENV_LOCKDOWN);
if (LF_ISSET(DB_PRIVATE))
F_SET(dbenv, DB_ENV_PRIVATE);
+ if (LF_ISSET(DB_RECOVER_FATAL))
+ F_SET(dbenv, DB_ENV_FATAL);
if (LF_ISSET(DB_SYSTEM_MEM))
F_SET(dbenv, DB_ENV_SYSTEM_MEM);
if (LF_ISSET(DB_THREAD))
@@ -194,20 +216,6 @@ __dbenv_open(dbenv, db_home, flags, mode)
F_SET(dbenv, DB_ENV_CDB);
}
- /* Initialize the DB list, and its mutex if appropriate. */
- LIST_INIT(&dbenv->dblist);
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if ((ret = __db_mutex_alloc(dbenv,
- dbenv->reginfo, (MUTEX **)&dbenv->dblist_mutexp)) != 0)
- return (ret);
- if ((ret = __db_mutex_init(dbenv,
- dbenv->dblist_mutexp, 0, MUTEX_THREAD)) != 0) {
- __db_mutex_free(dbenv, dbenv->reginfo,
- dbenv->dblist_mutexp);
- return (ret);
- }
- }
-
/*
* Initialize the subsystems. Transactions imply logging but do not
* imply locking. While almost all applications want both locking
@@ -215,9 +223,24 @@ __dbenv_open(dbenv, db_home, flags, mode)
* process to want transactions for atomicity guarantees, but not
* necessarily need concurrency.
*/
+
if (LF_ISSET(DB_INIT_MPOOL))
if ((ret = __memp_open(dbenv)) != 0)
goto err;
+
+#ifdef HAVE_CRYPTO
+ /*
+ * Initialize the ciphering area prior to any running of recovery so
+ * that we can initialize the keys, etc. before recovery.
+ *
+ * !!!
+ * This must be after the mpool init, but before the log initialization
+ * because log_open may attempt to run log_recover during its open.
+ */
+ if ((ret = __crypto_region_init(dbenv)) != 0)
+ goto err;
+#endif
+
if (LF_ISSET(DB_INIT_LOG | DB_INIT_TXN))
if ((ret = __log_open(dbenv)) != 0)
goto err;
@@ -232,38 +255,98 @@ __dbenv_open(dbenv, db_home, flags, mode)
* If the application is running with transactions, initialize
* the function tables.
*/
- if ((ret = __bam_init_recover(dbenv)) != 0)
+ if ((ret = __bam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __crdel_init_recover(dbenv)) != 0)
+ if ((ret = __crdel_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __db_init_recover(dbenv)) != 0)
+ if ((ret = __db_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __ham_init_recover(dbenv)) != 0)
+ if ((ret = __dbreg_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __log_init_recover(dbenv)) != 0)
+ if ((ret = __fop_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __qam_init_recover(dbenv)) != 0)
+ if ((ret = __ham_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
- if ((ret = __txn_init_recover(dbenv)) != 0)
+ if ((ret = __qam_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
-
- /*
- * If the application specified their own recovery
- * initialization function, call it.
- */
- if (dbenv->db_recovery_init != NULL &&
- (ret = dbenv->db_recovery_init(dbenv)) != 0)
+ if ((ret = __txn_init_recover(dbenv, &dbenv->recover_dtab,
+ &dbenv->recover_dtab_size)) != 0)
goto err;
/* Perform recovery for any previous run. */
if (LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
- (ret = __db_apprec(dbenv,
+ (ret = __db_apprec(dbenv, NULL,
LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL))) != 0)
goto err;
}
+
+ /* Initialize the replication area just in case. */
+ if ((ret = __rep_region_init(dbenv)) != 0)
+ goto err;
+
+ /*
+ * Initialize the DB list, and its mutex as necessary. If the env
+ * handle isn't free-threaded we don't need a mutex because there
+ * will never be more than a single DB handle on the list. If the
+ * mpool wasn't initialized, then we can't ever open a DB handle.
+ *
+ * We also need to initialize the MT mutex as necessary, so do them
+ * both. If we error, __dbenv_refresh() will clean up.
+ *
+ * !!!
+ * This must come after the __memp_open call above because if we are
+ * recording mutexes for system resources, we will do it in the mpool
+ * region for environments and db handles. So, the mpool region must
+ * already be initialized.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (F_ISSET(dbenv, DB_ENV_THREAD) && LF_ISSET(DB_INIT_MPOOL)) {
+ dbmp = dbenv->mp_handle;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->dblist_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ if ((ret = __db_mutex_setup(
+ dbenv, dbmp->reginfo, &dbenv->mt_mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+ }
+
+ /*
+ * If we've created the regions, are running with transactions, and did
+ * not just run recovery, we need to log the fact that the transaction
+ * IDs got reset.
+ *
+ * If we ran recovery, there may be prepared-but-not-yet-committed
+ * transactions that need to be resolved. Recovery resets the minimum
+ * transaction ID and logs the reset if that's appropriate, so we
+ * don't need to do anything here in the recover case.
+ */
+ if (TXN_ON(dbenv) &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE) &&
+ !LF_ISSET(DB_RECOVER | DB_RECOVER_FATAL) &&
+ (ret = __txn_reset(dbenv)) != 0)
+ goto err;
+
return (0);
-err: (void)__dbenv_refresh(dbenv);
+err: /* If we fail after creating the regions, remove them. */
+ if (dbenv->reginfo != NULL &&
+ F_ISSET((REGINFO *)dbenv->reginfo, REGION_CREATE)) {
+ ret = __db_panic(dbenv, ret);
+
+ (void)__dbenv_refresh(dbenv, orig_flags);
+ (void)__dbenv_iremove(dbenv, db_home, DB_FORCE);
+ }
+ (void)__dbenv_refresh(dbenv, orig_flags);
+
return (ret);
}
@@ -281,40 +364,42 @@ __dbenv_remove(dbenv, db_home, flags)
{
int ret, t_ret;
+ ret = __dbenv_iremove(dbenv, db_home, flags);
+
+ if ((t_ret = dbenv->close(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __dbenv_iremove --
+ * Discard an environment, internal version.
+ */
+static int
+__dbenv_iremove(dbenv, db_home, flags)
+ DB_ENV *dbenv;
+ const char *db_home;
+ u_int32_t flags;
+{
+ int ret;
+
#undef OKFLAGS
#define OKFLAGS \
DB_FORCE | DB_USE_ENVIRON | DB_USE_ENVIRON_ROOT
/* Validate arguments. */
- if ((ret = __db_fchk(dbenv, "DBENV->remove", flags, OKFLAGS)) != 0)
- goto err;
+ if ((ret = __db_fchk(dbenv, "DB_ENV->remove", flags, OKFLAGS)) != 0)
+ return (ret);
- /*
- * A hard-to-debug error is calling DBENV->remove after open. That's
- * not legal. You have to close the original, already opened handle
- * and then allocate a new DBENV handle to use for DBENV->remove.
- */
- if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
- __db_err(dbenv,
- "DBENV handle opened, not usable for remove method.");
- return (EINVAL);
- }
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "DB_ENV->remove");
/* Initialize the DB_ENV structure. */
if ((ret = __dbenv_config(dbenv, db_home, flags)) != 0)
- goto err;
+ return (ret);
/* Remove the environment. */
- ret = __db_e_remove(dbenv, LF_ISSET(DB_FORCE) ? 1 : 0);
-
- /* Discard any resources we've acquired. */
-err: if ((t_ret = __dbenv_refresh(dbenv)) != 0 && ret == 0)
- ret = t_ret;
-
- memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
- __os_free(dbenv, sizeof(DB_ENV));
-
- return (ret);
+ return (__db_e_remove(dbenv, flags));
}
/*
@@ -329,53 +414,48 @@ __dbenv_config(dbenv, db_home, flags)
{
FILE *fp;
int ret;
- char *lp, buf[MAXPATHLEN * 2];
+ char *p, buf[256];
- /* Set the database home. */
+ /*
+ * Set the database home. Do this before calling __db_appname,
+ * it uses the home directory.
+ */
if ((ret = __db_home(dbenv, db_home, flags)) != 0)
return (ret);
- /*
- * Parse the config file.
- *
- * !!!
- * Don't use sprintf(3)/snprintf(3) -- the former is dangerous, and
- * the latter isn't standard, and we're manipulating strings handed
- * us by the application.
- */
- if (dbenv->db_home != NULL) {
-#define CONFIG_NAME "/DB_CONFIG"
- if (strlen(dbenv->db_home) +
- strlen(CONFIG_NAME) + 1 > sizeof(buf)) {
- ret = ENAMETOOLONG;
- return (ret);
- }
- (void)strcpy(buf, dbenv->db_home);
- (void)strcat(buf, CONFIG_NAME);
- if ((fp = fopen(buf, "r")) != NULL) {
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- if ((lp = strchr(buf, '\n')) == NULL) {
- __db_err(dbenv,
- "%s: line too long", CONFIG_NAME);
- (void)fclose(fp);
- ret = EINVAL;
- return (ret);
- }
- *lp = '\0';
- if (buf[0] == '\0' ||
- buf[0] == '#' || isspace((int)buf[0]))
- continue;
-
- if ((ret = __db_parse(dbenv, buf)) != 0) {
- (void)fclose(fp);
- return (ret);
- }
+ /* Parse the config file. */
+ if ((ret =
+ __db_appname(dbenv, DB_APP_NONE, "DB_CONFIG", 0, NULL, &p)) != 0)
+ return (ret);
+
+ fp = fopen(p, "r");
+ __os_free(dbenv, p);
+
+ if (fp != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((p = strchr(buf, '\n')) != NULL)
+ *p = '\0';
+ else if (strlen(buf) + 1 == sizeof(buf)) {
+ __db_err(dbenv, "DB_CONFIG: line too long");
+ (void)fclose(fp);
+ return (EINVAL);
+ }
+ if (buf[0] == '\0' ||
+ buf[0] == '#' || isspace((int)buf[0]))
+ continue;
+
+ if ((ret = __db_parse(dbenv, buf)) != 0) {
+ (void)fclose(fp);
+ return (ret);
}
- (void)fclose(fp);
}
+ (void)fclose(fp);
}
- /* Set up the tmp directory path. */
+ /*
+ * If no temporary directory path was specified in the config file,
+ * choose one.
+ */
if (dbenv->db_tmp_dir == NULL && (ret = __os_tmpdir(dbenv, flags)) != 0)
return (ret);
@@ -383,17 +463,12 @@ __dbenv_config(dbenv, db_home, flags)
* The locking file descriptor is rarely on. Set the fd to -1, not
* because it's ever tested, but to make sure we catch mistakes.
*/
- if ((ret =
- __os_calloc(dbenv,
- 1, sizeof(*dbenv->lockfhp), &dbenv->lockfhp)) != 0)
+ if ((ret = __os_calloc(
+ dbenv, 1, sizeof(*dbenv->lockfhp), &dbenv->lockfhp)) != 0)
return (ret);
dbenv->lockfhp->fd = -1;
- /*
- * Flag that the DB_ENV structure has been initialized. Note, this
- * must be set before calling into the subsystems as it's used during
- * file naming.
- */
+ /* Flag that the DB_ENV structure has been initialized. */
F_SET(dbenv, DB_ENV_OPEN_CALLED);
return (0);
@@ -410,66 +485,149 @@ __dbenv_close(dbenv, flags)
DB_ENV *dbenv;
u_int32_t flags;
{
- int ret;
+ char **p;
+ int ret, t_ret;
COMPQUIET(flags, 0);
PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /*
+ * Before checking the reference count, we have to see if we
+ * were in the middle of restoring transactions and need to
+ * close the open files.
+ */
+ if (TXN_ON(dbenv) && (t_ret = __txn_preclose(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->rep_handle != NULL &&
+ (t_ret = __rep_preclose(dbenv, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (dbenv->db_ref != 0) {
+ __db_err(dbenv,
+ "Database handles open during environment close");
+ if (ret == 0)
+ ret = EINVAL;
+ }
- ret = __dbenv_refresh(dbenv);
+ /*
+ * Detach from the regions and undo the allocations done by
+ * DB_ENV->open.
+ */
+ if ((t_ret = __dbenv_refresh(dbenv, 0)) != 0 && ret == 0)
+ ret = t_ret;
- /* Discard the structure if we allocated it. */
- if (!F_ISSET(dbenv, DB_ENV_USER_ALLOC)) {
- memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
- __os_free(dbenv, sizeof(DB_ENV));
+ /* Do per-subsystem destruction. */
+ __lock_dbenv_close(dbenv); /* void */
+ if ((t_ret = __rep_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+#ifdef HAVE_CRYPTO
+ if ((t_ret = __crypto_dbenv_close(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+#endif
+
+ /* Release any string-based configuration parameters we've copied. */
+ if (dbenv->db_log_dir != NULL)
+ __os_free(dbenv, dbenv->db_log_dir);
+ if (dbenv->db_tmp_dir != NULL)
+ __os_free(dbenv, dbenv->db_tmp_dir);
+ if (dbenv->db_data_dir != NULL) {
+ for (p = dbenv->db_data_dir; *p != NULL; ++p)
+ __os_free(dbenv, *p);
+ __os_free(dbenv, dbenv->db_data_dir);
}
+ /* Discard the structure. */
+ memset(dbenv, CLEAR_BYTE, sizeof(DB_ENV));
+ __os_free(NULL, dbenv);
+
return (ret);
}
/*
* __dbenv_refresh --
- * Refresh the DB_ENV structure, releasing any allocated resources.
+ * Refresh the DB_ENV structure, releasing resources allocated by
+ * DB_ENV->open, and returning it to the state it was in just before
+ * open was called. (Note that this means that any state set by
+ * pre-open configuration functions must be preserved.)
*/
static int
-__dbenv_refresh(dbenv)
+__dbenv_refresh(dbenv, orig_flags)
DB_ENV *dbenv;
+ u_int32_t orig_flags;
{
+ DB_MPOOL *dbmp;
int ret, t_ret;
- char **p;
ret = 0;
/*
* Close subsystems, in the reverse order they were opened (txn
* must be first, it may want to discard locks and flush the log).
+ *
+ * !!!
+ * Note that these functions, like all of __dbenv_refresh, only undo
+ * the effects of __dbenv_open. Functions that undo work done by
+ * db_env_create or by a configurator function should go in
+ * __dbenv_close.
*/
- if (TXN_ON(dbenv)) {
- if ((t_ret = __txn_close(dbenv)) != 0 && ret == 0)
- ret = t_ret;
- }
+ if (TXN_ON(dbenv) &&
+ (t_ret = __txn_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
- if (LOCKING_ON(dbenv)) {
- if ((t_ret = __lock_close(dbenv)) != 0 && ret == 0)
- ret = t_ret;
- }
- __lock_dbenv_close(dbenv);
+ if (LOGGING_ON(dbenv) &&
+ (t_ret = __log_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
- if (LOGGING_ON(dbenv)) {
- if ((t_ret = __log_close(dbenv)) != 0 && ret == 0)
- ret = t_ret;
+ /*
+ * Locking should come after logging, because closing log results
+ * in files closing which may require locks being released.
+ */
+ if (LOCKING_ON(dbenv) &&
+ (t_ret = __lock_dbenv_refresh(dbenv)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * Discard DB list and its mutex.
+ * Discard the MT mutex.
+ *
+ * !!!
+ * This must be done before we close the mpool region because we
+ * may have allocated the DB handle mutex in the mpool region.
+ * It must be done *after* we close the log region, though, because
+ * we close databases and try to acquire the mutex when we close
+ * log file handles. Ick.
+ */
+ LIST_INIT(&dbenv->dblist);
+ if (dbenv->dblist_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->dblist_mutexp);
+ }
+ if (dbenv->mt_mutexp != NULL) {
+ dbmp = dbenv->mp_handle;
+ __db_mutex_free(dbenv, dbmp->reginfo, dbenv->mt_mutexp);
+ }
+ if (dbenv->mt != NULL) {
+ __os_free(dbenv, dbenv->mt);
+ dbenv->mt = NULL;
}
if (MPOOL_ON(dbenv)) {
- if ((t_ret = __memp_close(dbenv)) != 0 && ret == 0)
+ /*
+ * If it's a private environment, flush the contents to disk.
+ * Recovery would have put everything back together, but it's
+ * faster and cleaner to flush instead.
+ */
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE) &&
+ (t_ret = dbenv->memp_sync(dbenv, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __memp_dbenv_refresh(dbenv)) != 0 && ret == 0)
ret = t_ret;
}
- /* Discard DB list and its mutex. */
- LIST_INIT(&dbenv->dblist);
- if (dbenv->dblist_mutexp != NULL)
- __db_mutex_free(dbenv, dbenv->reginfo, dbenv->dblist_mutexp);
-
/* Detach from the region. */
if (dbenv->reginfo != NULL) {
if ((t_ret = __db_e_detach(dbenv, 0)) != 0 && ret == 0)
@@ -481,51 +639,26 @@ __dbenv_refresh(dbenv)
*/
}
- /* Clean up the structure. */
- dbenv->db_panic = 0;
-
+ /* Undo changes and allocations done by __dbenv_open. */
if (dbenv->db_home != NULL) {
- __os_freestr(dbenv->db_home);
+ __os_free(dbenv, dbenv->db_home);
dbenv->db_home = NULL;
}
- if (dbenv->db_log_dir != NULL) {
- __os_freestr(dbenv->db_log_dir);
- dbenv->db_log_dir = NULL;
- }
- if (dbenv->db_tmp_dir != NULL) {
- __os_freestr(dbenv->db_tmp_dir);
- dbenv->db_tmp_dir = NULL;
- }
- if (dbenv->db_data_dir != NULL) {
- for (p = dbenv->db_data_dir; *p != NULL; ++p)
- __os_freestr(*p);
- __os_free(dbenv->db_data_dir,
- dbenv->data_cnt * sizeof(char **));
- dbenv->db_data_dir = NULL;
- }
- dbenv->data_cnt = dbenv->data_next = 0;
dbenv->db_mode = 0;
if (dbenv->lockfhp != NULL) {
- __os_free(dbenv->lockfhp, sizeof(*dbenv->lockfhp));
+ __os_free(dbenv, dbenv->lockfhp);
dbenv->lockfhp = NULL;
}
- if (dbenv->dtab != NULL) {
- __os_free(dbenv->dtab,
- dbenv->dtab_size * sizeof(dbenv->dtab[0]));
- dbenv->dtab = NULL;
- dbenv->dtab_size = 0;
+ if (dbenv->recover_dtab != NULL) {
+ __os_free(dbenv, dbenv->recover_dtab);
+ dbenv->recover_dtab = NULL;
+ dbenv->recover_dtab_size = 0;
}
- dbenv->mp_mmapsize = 0;
- dbenv->links.tqe_next = NULL;
- dbenv->links.tqe_prev = NULL;
- dbenv->xa_rmid = 0;
- dbenv->xa_txn = 0;
-
- F_CLR(dbenv, ~(DB_ENV_STANDALONE | DB_ENV_USER_ALLOC));
+ dbenv->flags = orig_flags;
return (ret);
}
@@ -550,34 +683,33 @@ __dbenv_refresh(dbenv)
/*
* __db_appname --
* Given an optional DB environment, directory and file name and type
- * of call, build a path based on the DBENV->open rules, and return
+ * of call, build a path based on the DB_ENV->open rules, and return
* it in allocated space.
*
* PUBLIC: int __db_appname __P((DB_ENV *, APPNAME,
- * PUBLIC: const char *, const char *, u_int32_t, DB_FH *, char **));
+ * PUBLIC: const char *, u_int32_t, DB_FH *, char **));
*/
int
-__db_appname(dbenv, appname, dir, file, tmp_oflags, fhp, namep)
+__db_appname(dbenv, appname, file, tmp_oflags, fhp, namep)
DB_ENV *dbenv;
APPNAME appname;
- const char *dir, *file;
+ const char *file;
u_int32_t tmp_oflags;
DB_FH *fhp;
char **namep;
{
- DB_ENV etmp;
size_t len, str_len;
- int data_entry, ret, slash, tmp_create, tmp_free;
- const char *a, *b, *c;
+ int data_entry, ret, slash, tmp_create;
+ const char *a, *b;
char *p, *str;
- a = b = c = NULL;
+ a = b = NULL;
data_entry = -1;
- tmp_create = tmp_free = 0;
+ tmp_create = 0;
/*
- * We don't return a name when creating temporary files, just a
- * file handle. Default to an error now.
+ * We don't return a name when creating temporary files, just a file
+ * handle. Default to an error now.
*/
if (fhp != NULL)
F_CLR(fhp, DB_FH_VALID);
@@ -586,118 +718,49 @@ __db_appname(dbenv, appname, dir, file, tmp_oflags, fhp, namep)
/*
* Absolute path names are never modified. If the file is an absolute
- * path, we're done. If the directory is, simply append the file and
- * return.
+ * path, we're done.
*/
if (file != NULL && __os_abspath(file))
return (__os_strdup(dbenv, file, namep));
- if (dir != NULL && __os_abspath(dir)) {
- a = dir;
- goto done;
- }
- /*
- * DB_ENV DIR APPNAME RESULT
- * -------------------------------------------
- * null null none <tmp>/file
- * null set none DIR/file
- * set null none DB_HOME/file
- * set set none DB_HOME/DIR/file
- *
- * DB_ENV FILE APPNAME RESULT
- * -------------------------------------------
- * null null DB_APP_DATA <tmp>/<create>
- * null set DB_APP_DATA ./file
- * set null DB_APP_DATA <tmp>/<create>
- * set set DB_APP_DATA DB_HOME/DB_DATA_DIR/file
- *
- * DB_ENV DIR APPNAME RESULT
- * -------------------------------------------
- * null null DB_APP_LOG <tmp>/file
- * null set DB_APP_LOG DIR/file
- * set null DB_APP_LOG DB_HOME/DB_LOG_DIR/file
- * set set DB_APP_LOG DB_HOME/DB_LOG_DIR/DIR/file
- *
- * DB_ENV APPNAME RESULT
- * -------------------------------------------
- * null DB_APP_TMP* <tmp>/<create>
- * set DB_APP_TMP* DB_HOME/DB_TMP_DIR/<create>
+ /* Everything else is relative to the environment home. */
+ if (dbenv != NULL)
+ a = dbenv->db_home;
+
+retry: /*
+ * DB_APP_NONE:
+ * DB_HOME/file
+ * DB_APP_DATA:
+ * DB_HOME/DB_DATA_DIR/file
+ * DB_APP_LOG:
+ * DB_HOME/DB_LOG_DIR/file
+ * DB_APP_TMP:
+ * DB_HOME/DB_TMP_DIR/<create>
*/
-retry: switch (appname) {
+ switch (appname) {
case DB_APP_NONE:
- if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
- if (dir == NULL)
- goto tmp;
- a = dir;
- } else {
- a = dbenv->db_home;
- b = dir;
- }
break;
case DB_APP_DATA:
- if (dir != NULL) {
- __db_err(dbenv,
- "DB_APP_DATA: illegal directory specification");
- return (EINVAL);
- }
-
- if (file == NULL) {
- tmp_create = 1;
- goto tmp;
- }
- if (dbenv != NULL && F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
- a = dbenv->db_home;
- if (dbenv->db_data_dir != NULL &&
- (b = dbenv->db_data_dir[++data_entry]) == NULL) {
- data_entry = -1;
- b = dbenv->db_data_dir[0];
- }
+ if (dbenv != NULL && dbenv->db_data_dir != NULL &&
+ (b = dbenv->db_data_dir[++data_entry]) == NULL) {
+ data_entry = -1;
+ b = dbenv->db_data_dir[0];
}
break;
case DB_APP_LOG:
- if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
- if (dir == NULL)
- goto tmp;
- a = dir;
- } else {
- a = dbenv->db_home;
+ if (dbenv != NULL)
b = dbenv->db_log_dir;
- c = dir;
- }
break;
case DB_APP_TMP:
- if (dir != NULL || file != NULL) {
- __db_err(dbenv,
- "DB_APP_TMP: illegal directory or file specification");
- return (EINVAL);
- }
-
- tmp_create = 1;
- if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED))
- goto tmp;
- else {
- a = dbenv->db_home;
+ if (dbenv != NULL)
b = dbenv->db_tmp_dir;
- }
+ tmp_create = 1;
break;
}
- /* Reference a file from the appropriate temporary directory. */
- if (0) {
-tmp: if (dbenv == NULL || !F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
- memset(&etmp, 0, sizeof(etmp));
- if ((ret = __os_tmpdir(&etmp, DB_USE_ENVIRON)) != 0)
- return (ret);
- tmp_free = 1;
- a = etmp.db_tmp_dir;
- } else
- a = dbenv->db_tmp_dir;
- }
-
-done: len =
+ len =
(a == NULL ? 0 : strlen(a) + 1) +
(b == NULL ? 0 : strlen(b) + 1) +
- (c == NULL ? 0 : strlen(c) + 1) +
(file == NULL ? 0 : strlen(file) + 1);
/*
@@ -707,11 +770,8 @@ done: len =
*/
#define DB_TRAIL "BDBXXXXXX"
str_len = len + sizeof(DB_TRAIL) + 10;
- if ((ret = __os_malloc(dbenv, str_len, NULL, &str)) != 0) {
- if (tmp_free)
- __os_freestr(etmp.db_tmp_dir);
+ if ((ret = __os_malloc(dbenv, str_len, &str)) != 0)
return (ret);
- }
slash = 0;
p = str;
@@ -720,31 +780,25 @@ done: len =
DB_ADDSTR(file);
*p = '\0';
- /* Discard any space allocated to find the temp directory. */
- if (tmp_free) {
- __os_freestr(etmp.db_tmp_dir);
- tmp_free = 0;
- }
-
/*
* If we're opening a data file, see if it exists. If it does,
* return it, otherwise, try and find another one to open.
*/
- if (data_entry != -1 && __os_exists(str, NULL) != 0) {
- __os_free(str, str_len);
- a = b = c = NULL;
+ if (__os_exists(str, NULL) != 0 && data_entry != -1) {
+ __os_free(dbenv, str);
+ b = NULL;
goto retry;
}
/* Create the file if so requested. */
if (tmp_create &&
(ret = __db_tmp_open(dbenv, tmp_oflags, str, fhp)) != 0) {
- __os_free(str, str_len);
+ __os_free(dbenv, str);
return (ret);
}
if (namep == NULL)
- __os_free(str, str_len);
+ __os_free(dbenv, str);
else
*namep = str;
return (0);
@@ -753,8 +807,10 @@ done: len =
/*
* __db_home --
* Find the database home.
+ *
+ * PUBLIC: int __db_home __P((DB_ENV *, const char *, u_int32_t));
*/
-static int
+int
__db_home(dbenv, db_home, flags)
DB_ENV *dbenv;
const char *db_home;
@@ -779,6 +835,13 @@ __db_home(dbenv, db_home, flags)
return (p == NULL ? 0 : __os_strdup(dbenv, p, &dbenv->db_home));
}
+#define __DB_OVFL(v, max) \
+ if (v > max) { \
+ __v = v; \
+ __max = max; \
+ goto toobig; \
+ }
+
/*
* __db_parse --
* Parse a single NAME VALUE pair.
@@ -788,7 +851,7 @@ __db_parse(dbenv, s)
DB_ENV *dbenv;
char *s;
{
- u_long v1, v2, v3;
+ u_long __max, __v, v1, v2, v3;
u_int32_t flags;
char *name, *p, *value, v4;
@@ -831,7 +894,11 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
if (!strcasecmp(name, "set_cachesize")) {
if (sscanf(value, "%lu %lu %lu %c", &v1, &v2, &v3, &v4) != 3)
goto badarg;
- return (dbenv->set_cachesize(dbenv, v1, v2, v3));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ __DB_OVFL(v2, UINT32_T_MAX);
+ __DB_OVFL(v3, 10000);
+ return (dbenv->set_cachesize(
+ dbenv, (u_int32_t)v1, (u_int32_t)v2, (int)v3));
}
if (!strcasecmp(name, "set_data_dir") ||
@@ -844,23 +911,49 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
if (!strcasecmp(value, "db_cdb_alldb"))
return (dbenv->set_flags(dbenv, DB_CDB_ALLDB, 1));
+ if (!strcasecmp(value, "db_direct_db"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_DB, 1));
+ if (!strcasecmp(value, "db_direct_log"))
+ return (dbenv->set_flags(dbenv, DB_DIRECT_LOG, 1));
+ if (!strcasecmp(value, "db_nolocking"))
+ return (dbenv->set_flags(dbenv, DB_NOLOCKING, 1));
if (!strcasecmp(value, "db_nommap"))
return (dbenv->set_flags(dbenv, DB_NOMMAP, 1));
+ if (!strcasecmp(value, "db_overwrite"))
+ return (dbenv->set_flags(dbenv, DB_OVERWRITE, 1));
+ if (!strcasecmp(value, "db_nopanic"))
+ return (dbenv->set_flags(dbenv, DB_NOPANIC, 1));
+ if (!strcasecmp(value, "db_region_init"))
+ return (dbenv->set_flags(dbenv, DB_REGION_INIT, 1));
if (!strcasecmp(value, "db_txn_nosync"))
return (dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1));
+ if (!strcasecmp(value, "db_txn_write_nosync"))
+ return (
+ dbenv->set_flags(dbenv, DB_TXN_WRITE_NOSYNC, 1));
+ if (!strcasecmp(value, "db_yieldcpu"))
+ return (dbenv->set_flags(dbenv, DB_YIELDCPU, 1));
goto badarg;
}
if (!strcasecmp(name, "set_lg_bsize")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lg_bsize(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_bsize(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_lg_max")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lg_max(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lg_regionmax")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lg_regionmax(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_lg_dir") ||
@@ -872,6 +965,14 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
goto badarg;
if (!strcasecmp(value, "db_lock_default"))
flags = DB_LOCK_DEFAULT;
+ else if (!strcasecmp(value, "db_lock_expire"))
+ flags = DB_LOCK_EXPIRE;
+ else if (!strcasecmp(value, "db_lock_maxlocks"))
+ flags = DB_LOCK_MAXLOCKS;
+ else if (!strcasecmp(value, "db_lock_minlocks"))
+ flags = DB_LOCK_MINLOCKS;
+ else if (!strcasecmp(value, "db_lock_minwrite"))
+ flags = DB_LOCK_MINWRITE;
else if (!strcasecmp(value, "db_lock_oldest"))
flags = DB_LOCK_OLDEST;
else if (!strcasecmp(value, "db_lock_random"))
@@ -886,37 +987,51 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
if (!strcasecmp(name, "set_lk_max")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lk_max(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_lk_max_locks")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lk_max_locks(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_locks(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_lk_max_lockers")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lk_max_lockers(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_lockers(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_lk_max_objects")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_lk_max_objects(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_lk_max_objects(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_lock_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_LOCK_TIMEOUT));
}
if (!strcasecmp(name, "set_mp_mmapsize")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_mp_mmapsize(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_mp_mmapsize(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_region_init")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1 || v1 != 1)
goto badarg;
- return (db_env_set_region_init(v1));
+ return (dbenv->set_flags(
+ dbenv, DB_REGION_INIT, v1 == 0 ? 0 : 1));
}
if (!strcasecmp(name, "set_shm_key")) {
@@ -928,7 +1043,8 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
if (!strcasecmp(name, "set_tas_spins")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (db_env_set_tas_spins(v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tas_spins(dbenv, (u_int32_t)v1));
}
if (!strcasecmp(name, "set_tmp_dir") ||
@@ -938,7 +1054,16 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
if (!strcasecmp(name, "set_tx_max")) {
if (sscanf(value, "%lu %c", &v1, &v4) != 1)
goto badarg;
- return (dbenv->set_tx_max(dbenv, v1));
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_tx_max(dbenv, (u_int32_t)v1));
+ }
+
+ if (!strcasecmp(name, "set_txn_timeout")) {
+ if (sscanf(value, "%lu %c", &v1, &v4) != 1)
+ goto badarg;
+ __DB_OVFL(v1, UINT32_T_MAX);
+ return (dbenv->set_timeout(
+ dbenv, (u_int32_t)v1, DB_SET_TXN_TIMEOUT));
}
if (!strcasecmp(name, "set_verbose")) {
@@ -963,6 +1088,10 @@ illegal: __db_err(dbenv, "mis-formatted name-value pair: %s", s);
badarg: __db_err(dbenv, "incorrect arguments for name-value pair: %s", s);
return (EINVAL);
+
+toobig: __db_err(dbenv,
+ "%s: %lu larger than maximum value %lu", s, __v, __max);
+ return (EINVAL);
}
/*
@@ -976,7 +1105,7 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhp)
char *path;
DB_FH *fhp;
{
- u_long pid;
+ u_int32_t id;
int mode, isdir, ret;
const char *p;
char *trv;
@@ -1001,12 +1130,9 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhp)
for (p = DB_TRAIL; (*++trv = *p) != '\0'; ++p)
;
- /*
- * Replace the X's with the process ID. Pid should be a pid_t,
- * but we use unsigned long for portability.
- */
- for (pid = getpid(); *--trv == 'X'; pid /= 10)
- switch (pid % 10) {
+ /* Replace the X's with the process ID. */
+ for (__os_id(&id); *--trv == 'X'; id /= 10)
+ switch (id % 10) {
case 0: *trv = '0'; break;
case 1: *trv = '1'; break;
case 2: *trv = '2'; break;
@@ -1026,7 +1152,8 @@ __db_tmp_open(dbenv, tmp_oflags, path, fhp)
/* Loop, trying to open a file. */
for (;;) {
if ((ret = __os_open(dbenv, path,
- tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp)) == 0)
+ tmp_oflags | DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
+ mode, fhp)) == 0)
return (0);
/*
diff --git a/bdb/env/env_recover.c b/bdb/env/env_recover.c
index bc5e4760584..fbe3b345b0d 100644
--- a/bdb/env/env_recover.c
+++ b/bdb/env/env_recover.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
@@ -9,9 +9,9 @@
#ifndef lint
static const char copyright[] =
- "Copyright (c) 1996-2000\nSleepycat Software Inc. All rights reserved.\n";
+ "Copyright (c) 1996-2002\nSleepycat Software Inc. All rights reserved.\n";
static const char revid[] =
- "$Id: env_recover.c,v 11.33 2001/01/04 22:38:42 ubell Exp $";
+ "$Id: env_recover.c,v 11.97 2002/08/22 17:43:22 margo Exp $";
#endif
#ifndef NO_SYSTEM_INCLUDES
@@ -32,37 +32,65 @@ static const char revid[] =
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_dispatch.h"
-#include "db_am.h"
-#include "log.h"
-#include "txn.h"
-
-static float __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int));
-static int __log_earliest __P((DB_ENV *, int32_t *, DB_LSN *));
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+static int __log_backup __P((DB_ENV *, DB_LOGC *, DB_LSN *, DB_LSN *));
+static int __log_earliest __P((DB_ENV *, DB_LOGC *, int32_t *, DB_LSN *));
+static double __lsn_diff __P((DB_LSN *, DB_LSN *, DB_LSN *, u_int32_t, int));
/*
* __db_apprec --
- * Perform recovery.
+ * Perform recovery. If max_lsn is non-NULL, then we are trying
+ * to synchronize this system up with another system that has a max
+ * LSN of max_lsn, so we need to roll back sufficiently far for that
+ * to work. See __log_backup for details.
*
- * PUBLIC: int __db_apprec __P((DB_ENV *, u_int32_t));
+ * PUBLIC: int __db_apprec __P((DB_ENV *, DB_LSN *, u_int32_t));
*/
int
-__db_apprec(dbenv, flags)
+__db_apprec(dbenv, max_lsn, flags)
DB_ENV *dbenv;
+ DB_LSN *max_lsn;
u_int32_t flags;
{
DBT data;
- DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, open_lsn;
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, first_lsn, last_lsn, lowlsn, lsn, stop_lsn;
DB_TXNREGION *region;
__txn_ckp_args *ckp_args;
time_t now, tlow;
- float nfiles;
- int32_t low;
- int is_thread, progress, ret;
+ int32_t log_size, low;
+ double nfiles;
+ int have_rec, is_thread, progress, ret, t_ret;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ u_int32_t hi_txn, lockid, txnid;
+ char *p, *pass, t1[60], t2[60];
void *txninfo;
- COMPQUIET(nfiles, (float)0);
+ COMPQUIET(nfiles, (double)0);
+
+ logc = NULL;
+ ckp_args = NULL;
+ dtab = NULL;
+ hi_txn = TXN_MAXIMUM;
+ lockid = DB_LOCK_INVALIDID;
+ txninfo = NULL;
+ pass = "initial";
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
/*
* Save the state of the thread flag -- we don't need it on at the
@@ -70,60 +98,83 @@ __db_apprec(dbenv, flags)
*/
is_thread = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
F_CLR(dbenv, DB_ENV_THREAD);
+
+ /* Set in-recovery flags. */
F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ F_SET(region, TXN_IN_RECOVERY);
+
+ /* Allocate a cursor for the log. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
/*
- * If the user is specifying recover to a particular point in time,
- * verify that the logs present are sufficient to do this.
+ * If the user is specifying recovery to a particular point in time
+ * or to a particular LSN, find the point to start recovery from.
*/
ZERO_LSN(lowlsn);
- if (dbenv->tx_timestamp != 0) {
- if ((ret = __log_earliest(dbenv, &low, &lowlsn)) != 0)
- return (ret);
+ if (max_lsn != NULL) {
+ if ((ret = __log_backup(dbenv, logc, max_lsn, &lowlsn)) != 0)
+ goto err;
+ } else if (dbenv->tx_timestamp != 0) {
+ if ((ret = __log_earliest(dbenv, logc, &low, &lowlsn)) != 0)
+ goto err;
if ((int32_t)dbenv->tx_timestamp < low) {
- char t1[30], t2[30];
-
- strcpy(t1, ctime(&dbenv->tx_timestamp));
+ (void)snprintf(t1, sizeof(t1),
+ "%s", ctime(&dbenv->tx_timestamp));
+ if ((p = strchr(t1, '\n')) != NULL)
+ *p = '\0';
tlow = (time_t)low;
- strcpy(t2, ctime(&tlow));
+ (void)snprintf(t2, sizeof(t2), "%s", ctime(&tlow));
+ if ((p = strchr(t2, '\n')) != NULL)
+ *p = '\0';
__db_err(dbenv,
- "Invalid recovery timestamp %.*s; earliest time is %.*s",
- 24, t1, 24, t2);
- return (EINVAL);
+ "Invalid recovery timestamp %s; earliest time is %s",
+ t1, t2);
+ ret = EINVAL;
+ goto err;
}
}
- /* Initialize the transaction list. */
- if ((ret = __db_txnlist_init(dbenv, &txninfo)) != 0)
- return (ret);
-
/*
* Recovery is done in three passes:
* Pass #0:
- * We need to find the position from which we will open files
- * We need to open files beginning with the last to next
- * checkpoint because we might have crashed after writing the
- * last checkpoint record, but before having written out all
- * the open file information.
+ * We need to find the position from which we will open files.
+ * We need to open files beginning with the earlier of the
+ * most recent checkpoint LSN and a checkpoint LSN before the
+ * recovery timestamp, if specified. We need to be before the
+ * most recent checkpoint LSN because we are going to collect
+ * information about which transactions were begun before we
+ * start rolling forward. Those that were should never be undone
+ * because queue cannot use LSNs to determine what operations can
+ * safely be aborted and it cannot rollback operations in
+ * transactions for which there may be records not processed
+ * during recovery. We need to consider earlier points in time
+ * in case we are recovering to a particular timestamp.
*
* Pass #1:
- * Read forward through the log from the second to last checkpoint
- * opening and closing files so that at the end of the log we have
- * the "current" set of files open.
+ * Read forward through the log from the position found in pass 0
+ * opening and closing files, and recording transactions for which
+ * we've seen their first record (the transaction's prev_lsn is
+ * 0,0). At the end of this pass, we know all transactions for
+ * which we've seen begins and we have the "current" set of files
+ * open.
*
* Pass #2:
* Read backward through the log undoing any uncompleted TXNs.
- * There are three cases:
- * 1. If doing catastrophic recovery, we read to the beginning
- * of the log
+ * There are four cases:
+ * 1. If doing catastrophic recovery, we read to the
+ * beginning of the log
* 2. If we are doing normal reovery, then we have to roll
- * back to the most recent checkpoint that occurs
- * before the most recent checkpoint LSN, which is
- * returned by __log_findckp().
+ * back to the most recent checkpoint LSN.
* 3. If we are recovering to a point in time, then we have
* to roll back to the checkpoint whose ckp_lsn is earlier
* than the specified time. __log_earliest will figure
* this out for us.
+ * 4. If we are recovering back to a particular LSN, then
+ * we have to roll back to the checkpoint whose ckp_lsn
+ * is earlier than the max_lsn. __log_backup will figure
+ * that out for us.
* In case 2, "uncompleted TXNs" include all those who commited
* after the user's specified timestamp.
*
@@ -133,6 +184,14 @@ __db_apprec(dbenv, flags)
* specified rollback point). During this pass, checkpoint
* file information is ignored, and file openings and closings
* are redone.
+ *
+ * ckp_lsn -- lsn of the last checkpoint or the first in the log.
+ * first_lsn -- the lsn where the forward passes begin.
+ * last_lsn -- the last lsn in the log, used for feedback
+ * lowlsn -- the lsn we are rolling back to, if we are recovering
+ * to a point in time.
+ * lsn -- temporary use lsn.
+ * stop_lsn -- the point at which forward roll should stop
*/
/*
@@ -143,132 +202,209 @@ __db_apprec(dbenv, flags)
* same amount of time (a false assumption) and then use the %-age
* of the amount of log traversed to figure out how much of the
* pass we've accomplished.
+ *
+ * If we can't find any log records, we're kind of done.
*/
+#ifdef UMRW
+ ZERO_LSN(last_lsn);
+#endif
memset(&data, 0, sizeof(data));
- if (dbenv->db_feedback != NULL &&
- (ret = log_get(dbenv, &last_lsn, &data, DB_LAST)) != 0)
- goto out;
+ if ((ret = logc->get(logc, &last_lsn, &data, DB_LAST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "Last log record not found");
+ goto err;
+ }
+
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_PREV)) == 0);
/*
- * Pass #0
- * Find the second to last checkpoint in the log. This is the point
- * from which we want to begin pass #1 (the open files pass).
+ * There are no transactions, so there is nothing to do unless
+ * we're recovering to an LSN. If we are, we need to proceed since
+ * we'll still need to do a vtruncate based on information we haven't
+ * yet collected.
*/
- ckp_args = NULL;
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
+ }
+ if (ret != 0)
+ goto err;
- if (LF_ISSET(DB_RECOVER_FATAL)) {
- if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
- if (ret == DB_NOTFOUND)
- ret = 0;
- else
- __db_err(dbenv, "First log record not found");
- goto out;
+ hi_txn = txnid;
+
+ /*
+ * Pass #0
+ * Find the LSN from which we begin OPENFILES.
+ *
+ * If this is a catastrophic recovery, or if no checkpoint exists
+ * in the log, the LSN is the first LSN in the log.
+ *
+ * Otherwise, it is the minimum of (1) the LSN in the last checkpoint
+ * and (2) the LSN in the checkpoint before any specified recovery
+ * timestamp or max_lsn.
+ */
+ /*
+ * Get the first LSN in the log; it's an initial default
+ * even if this is not a catastrophic recovery.
+ */
+ if ((ret = logc->get(logc, &ckp_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ first_lsn = ckp_lsn;
+ have_rec = 1;
+
+ if (!LF_ISSET(DB_RECOVER_FATAL)) {
+ if ((ret = __txn_getckp(dbenv, &ckp_lsn)) == 0 &&
+ (ret = logc->get(logc, &ckp_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)ckp_lsn.file,
+ (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ have_rec = 0;
}
- open_lsn = ckp_lsn;
- } else if ((ret =
- log_get(dbenv, &ckp_lsn, &data, DB_CHECKPOINT)) != 0) {
+
/*
- * If we don't find a checkpoint, start from the beginning.
- * If that fails, we're done. Note, we do not require that
- * there be log records if we're performing recovery.
+ * If LSN (2) exists, use it if it's before LSN (1).
+ * (If LSN (1) doesn't exist, first_lsn is the
+ * beginning of the log, so will "win" this check.)
+ *
+ * XXX
+ * In the recovery-to-a-timestamp case, lowlsn is chosen by
+ * __log_earliest, and is the checkpoint LSN of the
+ * *earliest* checkpoint in the unreclaimed log. I
+ * (krinsky) believe that we could optimize this by looking
+ * instead for the LSN of the *latest* checkpoint before
+ * the timestamp of interest, but I'm not sure that this
+ * is worth doing right now. (We have to look for lowlsn
+ * and low anyway, to make sure the requested timestamp is
+ * somewhere in the logs we have, and all that's required
+ * is that we pick *some* checkpoint after the beginning of
+ * the logs and before the timestamp.
*/
-first: if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
- if (ret == DB_NOTFOUND)
- ret = 0;
- else
- __db_err(dbenv, "First log record not found");
- goto out;
+ if ((dbenv->tx_timestamp != 0 || max_lsn != NULL) &&
+ log_compare(&lowlsn, &first_lsn) < 0) {
+ DB_ASSERT(have_rec == 0);
+ first_lsn = lowlsn;
}
- open_lsn = ckp_lsn;
- } else if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0) {
- __db_err(dbenv, "Invalid checkpoint record at [%ld][%ld]\n",
- (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
- goto out;
- } else if (IS_ZERO_LSN(ckp_args->last_ckp) ||
- (ret = log_get(dbenv, &ckp_args->last_ckp, &data, DB_SET)) != 0)
- goto first;
- else
- open_lsn = ckp_args->last_ckp;
+ }
+
+ /* Get the record at first_lsn if we don't have it already. */
+ if (!have_rec &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0) {
+ __db_err(dbenv, "Checkpoint LSN record [%ld][%ld] not found",
+ (u_long)first_lsn.file, (u_long)first_lsn.offset);
+ goto err;
+ }
if (dbenv->db_feedback != NULL) {
- if (last_lsn.file == open_lsn.file)
- nfiles = (float)(last_lsn.offset - open_lsn.offset) /
- dbenv->lg_max;
+ if (last_lsn.file == first_lsn.file)
+ nfiles = (double)
+ (last_lsn.offset - first_lsn.offset) / log_size;
else
- nfiles = (float)(last_lsn.file - open_lsn.file) +
- (float)(dbenv->lg_max - open_lsn.offset +
- last_lsn.offset) / dbenv->lg_max;
+ nfiles = (double)(last_lsn.file - first_lsn.file) +
+ (double)(log_size - first_lsn.offset +
+ last_lsn.offset) / log_size;
/* We are going to divide by nfiles; make sure it isn't 0. */
if (nfiles == 0)
- nfiles = (float)0.001;
+ nfiles = (double)0.001;
}
+ /* Find a low txnid. */
+ ret = 0;
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&txnid,
+ (u_int8_t *)data.data + sizeof(u_int32_t), sizeof(txnid));
+
+ if (txnid != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &data, DB_NEXT)) == 0);
+
/*
- * Pass #1
- * Now, ckp_lsn is either the lsn of the last checkpoint
- * or the lsn of the first record in the log. Open_lsn is
- * the second to last checkpoint or the beinning of the log;
- * begin the open files pass from that lsn, and proceed to
- * the end of the log.
+ * There are no transactions and we're not recovering to an LSN (see
+ * above), so there is nothing to do.
*/
- lsn = open_lsn;
- for (;;) {
- if (dbenv->db_feedback != NULL) {
- progress = (int)(33 * (__lsn_diff(&open_lsn,
- &last_lsn, &lsn, dbenv->lg_max, 1) / nfiles));
- dbenv->db_feedback(dbenv, DB_RECOVER, progress);
- }
- ret = __db_dispatch(dbenv,
- &data, &lsn, DB_TXN_OPENFILES, txninfo);
- if (ret != 0 && ret != DB_TXN_CKP)
- goto msgerr;
- if ((ret = log_get(dbenv, &lsn, &data, DB_NEXT)) != 0) {
- if (ret == DB_NOTFOUND)
- break;
- goto out;
- }
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ if (max_lsn == NULL)
+ goto done;
}
+ /* Reset to the first lsn. */
+ if (ret != 0 || (ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+
+ /* Initialize the transaction list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, txnid, hi_txn, max_lsn, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Pass #1
+ * Run forward through the log starting at the first relevant lsn.
+ */
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, &last_lsn, nfiles, 1)) != 0)
+ goto err;
+
/*
* Pass #2.
*
- * Before we can begin pass #2, backward roll phase, we determine how
- * far back in the log to recover. If we are doing catastrophic
- * recovery, then we go as far back as we have files. If we are
- * doing normal recovery, we go as back to the most recent checkpoint
- * that occurs before the most recent checkpoint LSN. If we are
- * recovering to a point in time, then rollback to the checkpoint whose
- * ckp_lsn precedes the first log record (and then roll forward to
- * the appropriate timestamp in Pass #3).
+ * We used first_lsn to tell us how far back we need to recover,
+ * use it here.
*/
- if (LF_ISSET(DB_RECOVER_FATAL)) {
- ZERO_LSN(first_lsn);
- } else if (dbenv->tx_timestamp != 0)
- first_lsn = lowlsn;
- else
- if ((ret = __log_findckp(dbenv, &first_lsn)) == DB_NOTFOUND) {
- /*
- * We don't require that log files exist if recovery
- * was specified.
- */
- ret = 0;
- goto out;
- }
if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
__db_err(dbenv, "Recovery starting from [%lu][%lu]",
(u_long)first_lsn.file, (u_long)first_lsn.offset);
- for (ret = log_get(dbenv, &lsn, &data, DB_LAST);
- ret == 0 && log_compare(&lsn, &first_lsn) > 0;
- ret = log_get(dbenv, &lsn, &data, DB_PREV)) {
+ /*
+ * If we are doing client recovery, then we need to allocate
+ * the page-info lock table.
+ */
+ if (max_lsn != NULL) {
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+ }
+
+ pass = "backward";
+ for (ret = logc->get(logc, &lsn, &data, DB_LAST);
+ ret == 0 && log_compare(&lsn, &first_lsn) >= 0;
+ ret = logc->get(logc, &lsn, &data, DB_PREV)) {
if (dbenv->db_feedback != NULL) {
- progress = 34 + (int)(33 * (__lsn_diff(&open_lsn,
- &last_lsn, &lsn, dbenv->lg_max, 0) / nfiles));
+ progress = 34 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 0) / nfiles));
dbenv->db_feedback(dbenv, DB_RECOVER, progress);
}
- ret = __db_dispatch(dbenv,
- &data, &lsn, DB_TXN_BACKWARD_ROLL, txninfo);
+ if (max_lsn != NULL && (ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, &lsn, NULL, NULL, lockid)) != 0)
+ continue;
+
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_BACKWARD_ROLL, txninfo);
if (ret != 0) {
if (ret != DB_TXN_CKP)
goto msgerr;
@@ -277,63 +413,128 @@ first: if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
}
}
if (ret != 0 && ret != DB_NOTFOUND)
- goto out;
+ goto err;
/*
- * Pass #3.
+ * Pass #3. If we are recovering to a timestamp or to an LSN,
+ * we need to make sure that we don't roll-forward beyond that
+ * point because there may be non-transactional operations (e.g.,
+ * closes that would fail). The last_lsn variable is used for
+ * feedback calculations, but use it to set an initial stopping
+ * point for the forward pass, and then reset appropriately to
+ * derive a real stop_lsn that tells how far the forward pass
+ * should go.
*/
- for (ret = log_get(dbenv, &lsn, &data, DB_NEXT);
- ret == 0; ret = log_get(dbenv, &lsn, &data, DB_NEXT)) {
+ pass = "forward";
+ stop_lsn = last_lsn;
+ if (max_lsn != NULL || dbenv->tx_timestamp != 0)
+ stop_lsn = ((DB_TXNHEAD *)txninfo)->maxlsn;
+
+ for (ret = logc->get(logc, &lsn, &data, DB_NEXT);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
+ /*
+ * If we are recovering to a timestamp or an LSN,
+ * we need to make sure that we don't try to roll
+ * forward beyond the soon-to-be end of log.
+ */
+ if (log_compare(&lsn, &stop_lsn) > 0)
+ break;
+
if (dbenv->db_feedback != NULL) {
- progress = 67 + (int)(33 * (__lsn_diff(&open_lsn,
- &last_lsn, &lsn, dbenv->lg_max, 1) / nfiles));
+ progress = 67 + (int)(33 * (__lsn_diff(&first_lsn,
+ &last_lsn, &lsn, log_size, 1) / nfiles));
dbenv->db_feedback(dbenv, DB_RECOVER, progress);
}
- ret = __db_dispatch(dbenv,
- &data, &lsn, DB_TXN_FORWARD_ROLL, txninfo);
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data, &lsn,
+ DB_TXN_FORWARD_ROLL, txninfo);
if (ret != 0) {
if (ret != DB_TXN_CKP)
goto msgerr;
else
ret = 0;
}
+
}
- if (ret != DB_NOTFOUND)
- goto out;
+ if (ret != 0 && ret != DB_NOTFOUND)
+ goto err;
/*
- * Process any pages that were on the limbo list
- * and move them to the free list. Do this
- * before checkpointing the database.
+ * Process any pages that were on the limbo list and move them to
+ * the free list. Do this before checkpointing the database.
*/
- if ((ret = __db_do_the_limbo(dbenv, txninfo)) != 0)
- goto out;
+ if ((ret = __db_do_the_limbo(dbenv, NULL, NULL, txninfo)) != 0)
+ goto err;
- /*
- * Now set the last checkpoint lsn and the current time,
- * take a checkpoint, and reset the txnid.
- */
- (void)time(&now);
- region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
- region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid;
- region->last_ckp = ckp_lsn;
- region->time_ckp = (u_int32_t)now;
+ if (max_lsn == NULL)
+ region->last_txnid = ((DB_TXNHEAD *)txninfo)->maxid;
- /*
- * Take two checkpoints so that we don't re-recover any of the
- * work we've already done.
- */
- if ((ret = txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
- goto out;
+ /* Take a checkpoint here to force any dirty data pages to disk. */
+ if (dbenv->tx_timestamp != 0) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+ __log_vtruncate(dbenv, &((DB_TXNHEAD *)txninfo)->maxlsn,
+ &((DB_TXNHEAD *)txninfo)->ckplsn);
+ }
- /* Now close all the db files that are open. */
- __log_close_files(dbenv);
+ if ((ret = dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
+ goto err;
- if ((ret = txn_checkpoint(dbenv, 0, 0, DB_FORCE)) != 0)
- goto out;
- region->last_txnid = TXN_MINIMUM;
+ /* Close all the db files that are open. */
+ if ((ret = __dbreg_close_files(dbenv)) != 0)
+ goto err;
+
+ if (max_lsn != NULL) {
+ region->last_ckp = ((DB_TXNHEAD *)txninfo)->ckplsn;
+
+ /* We are going to truncate, so we'd best close the cursor. */
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ goto err;
+ __log_vtruncate(dbenv,
+ max_lsn, &((DB_TXNHEAD *)txninfo)->ckplsn);
+
+ /*
+ * Now we need to open files that should be open in order for
+ * client processing to continue. However, since we've
+ * truncated the log, we need to recompute from where the
+ * openfiles pass should begin.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ else
+ __db_err(dbenv, "First log record not found");
+ goto err;
+ }
+ if ((ret = __txn_getckp(dbenv, &first_lsn)) == 0 &&
+ (ret = logc->get(logc, &first_lsn, &data, DB_SET)) == 0) {
+ /* We have a recent checkpoint. This is LSN (1). */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%ld][%ld]",
+ (u_long)first_lsn.file,
+ (u_long)first_lsn.offset);
+ goto err;
+ }
+ first_lsn = ckp_args->ckp_lsn;
+ }
+ if ((ret = logc->get(logc, &first_lsn, &data, DB_SET)) != 0)
+ goto err;
+ if ((ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &first_lsn, NULL, nfiles, 1)) != 0)
+ goto err;
+ } else if (region->stat.st_nrestores == 0)
+ /*
+ * If there are no prepared transactions that need resolution,
+ * we need to reset the transaction ID space and log this fact.
+ */
+ if ((ret = __txn_reset(dbenv)) != 0)
+ goto err;
if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY)) {
+ (void)time(&now);
__db_err(dbenv, "Recovery complete at %.24s", ctime(&now));
__db_err(dbenv, "%s %lx %s [%lu][%lu]",
"Maximum transaction ID",
@@ -344,18 +545,41 @@ first: if ((ret = log_get(dbenv, &ckp_lsn, &data, DB_FIRST)) != 0) {
}
if (0) {
-msgerr: __db_err(dbenv, "Recovery function for LSN %lu %lu failed",
- (u_long)lsn.file, (u_long)lsn.offset);
+msgerr: __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed on %s pass",
+ (u_long)lsn.file, (u_long)lsn.offset, pass);
}
-out: if (is_thread)
- F_SET(dbenv, DB_ENV_THREAD);
- __db_txnlist_end(dbenv, txninfo);
+done:
+err: if (lockid != DB_LOCK_INVALIDID) {
+ if ((t_ret = __rep_unlockpages(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
if (ckp_args != NULL)
- __os_free(ckp_args, sizeof(*ckp_args));
- F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ __os_free(dbenv, ckp_args);
dbenv->tx_timestamp = 0;
+
+ /* Restore the state of the thread flag, clear in-recovery flags. */
+ if (is_thread)
+ F_SET(dbenv, DB_ENV_THREAD);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ F_CLR(region, TXN_IN_RECOVERY);
+
return (ret);
}
@@ -365,13 +589,13 @@ out: if (is_thread)
* we are moving backward, we are computing high - current. max is
* the number of bytes per logfile.
*/
-static float
+static double
__lsn_diff(low, high, current, max, is_forward)
DB_LSN *low, *high, *current;
u_int32_t max;
int is_forward;
{
- float nf;
+ double nf;
/*
* There are three cases in each direction. If you are in the
@@ -382,27 +606,78 @@ __lsn_diff(low, high, current, max, is_forward)
*/
if (is_forward) {
if (current->file == low->file)
- nf = (float)(current->offset - low->offset) / max;
+ nf = (double)(current->offset - low->offset) / max;
else if (current->offset < low->offset)
- nf = (float)(current->file - low->file - 1) +
- (float)(max - low->offset + current->offset) / max;
+ nf = (double)(current->file - low->file - 1) +
+ (double)(max - low->offset + current->offset) / max;
else
- nf = (float)(current->file - low->file) +
- (float)(current->offset - low->offset) / max;
+ nf = (double)(current->file - low->file) +
+ (double)(current->offset - low->offset) / max;
} else {
if (current->file == high->file)
- nf = (float)(high->offset - current->offset) / max;
+ nf = (double)(high->offset - current->offset) / max;
else if (current->offset > high->offset)
- nf = (float)(high->file - current->file - 1) +
- (float)(max - current->offset + high->offset) / max;
+ nf = (double)(high->file - current->file - 1) +
+ (double)
+ (max - current->offset + high->offset) / max;
else
- nf = (float)(high->file - current->file) +
- (float)(high->offset - current->offset) / max;
+ nf = (double)(high->file - current->file) +
+ (double)(high->offset - current->offset) / max;
}
return (nf);
}
/*
+ * __log_backup --
+ *
+ * This is used to find the earliest log record to process when a client
+ * is trying to sync up with a master whose max LSN is less than this
+ * client's max lsn; we want to roll back everything after that
+ *
+ * Find the latest checkpoint whose ckp_lsn is less than the max lsn.
+ */
+static int
+__log_backup(dbenv, logc, max_lsn, start_lsn)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ DB_LSN *max_lsn, *start_lsn;
+{
+ DB_LSN lsn;
+ DBT data;
+ __txn_ckp_args *ckp_args;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+ ckp_args = NULL;
+
+ /*
+ * Follow checkpoints through the log until we find one with
+ * a ckp_lsn less than max_lsn.
+ */
+ if ((ret = __txn_getckp(dbenv, &lsn)) != 0)
+ goto err;
+ while ((ret = logc->get(logc, &lsn, &data, DB_SET)) == 0) {
+ if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0)
+ return (ret);
+ if (log_compare(&ckp_args->ckp_lsn, max_lsn) <= 0) {
+ *start_lsn = ckp_args->ckp_lsn;
+ break;
+ }
+
+ lsn = ckp_args->prev_lsn;
+ if (IS_ZERO_LSN(lsn))
+ break;
+ __os_free(dbenv, ckp_args);
+ }
+
+ if (ckp_args != NULL)
+ __os_free(dbenv, ckp_args);
+err: if (IS_ZERO_LSN(*start_lsn) && (ret == 0 || ret == DB_NOTFOUND))
+ ret = logc->get(logc, start_lsn, &data, DB_FIRST);
+ return (ret);
+}
+
+/*
* __log_earliest --
*
* Return the earliest recovery point for the log files present. The
@@ -410,8 +685,9 @@ __lsn_diff(low, high, current, max, is_forward)
* whose checkpoint LSN is greater than the first LSN we process.
*/
static int
-__log_earliest(dbenv, lowtime, lowlsn)
+__log_earliest(dbenv, logc, lowtime, lowlsn)
DB_ENV *dbenv;
+ DB_LOGC *logc;
int32_t *lowtime;
DB_LSN *lowlsn;
{
@@ -427,19 +703,17 @@ __log_earliest(dbenv, lowtime, lowlsn)
* record whose ckp_lsn is greater than first_lsn.
*/
- for (ret = log_get(dbenv, &first_lsn, &data, DB_FIRST);
- ret == 0; ret = log_get(dbenv, &lsn, &data, DB_NEXT)) {
- if (ret != 0)
- break;
+ for (ret = logc->get(logc, &first_lsn, &data, DB_FIRST);
+ ret == 0; ret = logc->get(logc, &lsn, &data, DB_NEXT)) {
memcpy(&rectype, data.data, sizeof(rectype));
- if (rectype != DB_txn_ckp)
+ if (rectype != DB___txn_ckp)
continue;
if ((ret = __txn_ckp_read(dbenv, data.data, &ckpargs)) == 0) {
cmp = log_compare(&ckpargs->ckp_lsn, &first_lsn);
*lowlsn = ckpargs->ckp_lsn;
*lowtime = ckpargs->timestamp;
- __os_free(ckpargs, 0);
+ __os_free(dbenv, ckpargs);
if (cmp >= 0)
break;
}
@@ -447,3 +721,70 @@ __log_earliest(dbenv, lowtime, lowlsn)
return (ret);
}
+
+/*
+ * __env_openfiles --
+ * Perform the pass of recovery that opens files. This is used
+ * both during regular recovery and an initial call to txn_recover (since
+ * we need files open in order to abort prepared, but not yet committed
+ * transactions).
+ *
+ * See the comments in db_apprec for a detailed description of the
+ * various recovery passes.
+ *
+ * If we are not doing feedback processing (i.e., we are doing txn_recover
+ * processing and in_recovery is zero), then last_lsn can be NULL.
+ *
+ * PUBLIC: int __env_openfiles __P((DB_ENV *, DB_LOGC *,
+ * PUBLIC: void *, DBT *, DB_LSN *, DB_LSN *, double, int));
+ */
+int
+__env_openfiles(dbenv, logc, txninfo,
+ data, open_lsn, last_lsn, nfiles, in_recovery)
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ void *txninfo;
+ DBT *data;
+ DB_LSN *open_lsn, *last_lsn;
+ int in_recovery;
+ double nfiles;
+{
+ DB_LSN lsn;
+ u_int32_t log_size;
+ int progress, ret;
+
+ /*
+ * XXX
+ * Get the log size. No locking required because we're single-threaded
+ * during recovery.
+ */
+ log_size =
+ ((LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary))->log_size;
+
+ lsn = *open_lsn;
+ for (;;) {
+ if (in_recovery && dbenv->db_feedback != NULL) {
+ DB_ASSERT(last_lsn != NULL);
+ progress = (int)(33 * (__lsn_diff(open_lsn,
+ last_lsn, &lsn, log_size, 1) / nfiles));
+ dbenv->db_feedback(dbenv, DB_RECOVER, progress);
+ }
+ ret = __db_dispatch(dbenv,
+ dbenv->recover_dtab, dbenv->recover_dtab_size, data, &lsn,
+ in_recovery ? DB_TXN_OPENFILES : DB_TXN_POPENFILES,
+ txninfo);
+ if (ret != 0 && ret != DB_TXN_CKP) {
+ __db_err(dbenv,
+ "Recovery function for LSN %lu %lu failed",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ break;
+ }
+ if ((ret = logc->get(logc, &lsn, data, DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ break;
+ }
+ }
+
+ return (ret);
+}
diff --git a/bdb/env/env_region.c b/bdb/env/env_region.c
index f3df4bac184..a919cf328b4 100644
--- a/bdb/env/env_region.c
+++ b/bdb/env/env_region.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: env_region.c,v 11.28 2000/12/12 17:36:10 bostic Exp $";
+static const char revid[] = "$Id: env_region.c,v 11.64 2002/07/17 15:09:19 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,20 +20,17 @@ static const char revid[] = "$Id: env_region.c,v 11.28 2000/12/12 17:36:10 bosti
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "lock_ext.h"
-#include "log.h"
-#include "log_ext.h"
-#include "mp.h"
-#include "mp_ext.h"
-#include "txn.h"
-#include "txn_ext.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
static int __db_des_destroy __P((DB_ENV *, REGION *));
static int __db_des_get __P((DB_ENV *, REGINFO *, REGINFO *, REGION **));
static int __db_e_remfile __P((DB_ENV *));
-static int __db_faultmem __P((void *, size_t, int));
+static int __db_faultmem __P((DB_ENV *, void *, size_t, int));
static void __db_region_destroy __P((DB_ENV *, REGINFO *));
/*
@@ -110,13 +107,17 @@ loop: renv = NULL;
* If this is a public environment, we use the filesystem to ensure
* the creation of the environment file is single-threaded.
*/
- if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if ((ret = __os_strdup(dbenv,
+ "process-private", &infop->name)) != 0)
+ goto err;
goto creation;
+ }
/* Build the region name. */
(void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
if ((ret = __db_appname(dbenv,
- DB_APP_NONE, NULL, buf, 0, NULL, &infop->name)) != 0)
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
goto err;
/*
@@ -128,8 +129,8 @@ loop: renv = NULL;
* errno return value -- I sure hope they're right.
*/
if (F_ISSET(dbenv, DB_ENV_CREATE)) {
- if ((ret = __os_open(dbenv,
- infop->name, DB_OSO_REGION | DB_OSO_CREATE | DB_OSO_EXCL,
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_REGION,
dbenv->db_mode, dbenv->lockfhp)) == 0)
goto creation;
if (ret != EEXIST) {
@@ -143,8 +144,8 @@ loop: renv = NULL;
* If we couldn't create the file, try and open it. (If that fails,
* we're done.)
*/
- if ((ret = __os_open(dbenv, infop->name,
- DB_OSO_REGION, dbenv->db_mode, dbenv->lockfhp)) != 0)
+ if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION | DB_OSO_DIRECT,
+ dbenv->db_mode, dbenv->lockfhp)) != 0)
goto err;
/*
@@ -230,12 +231,12 @@ loop: renv = NULL;
* the better.
*/
#ifdef HAVE_MUTEX_THREADS
- __os_closehandle(dbenv->lockfhp);
+ __os_closehandle(dbenv, dbenv->lockfhp);
#endif
/* Call the region join routine to acquire the region. */
memset(&tregion, 0, sizeof(tregion));
- tregion.size = size;
+ tregion.size = (roff_t)size;
tregion.segid = segid;
if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
goto err;
@@ -265,7 +266,7 @@ loop: renv = NULL;
* can't because Windows/NT filesystems won't open files mode 0.
*/
renv = infop->primary;
- if (renv->panic) {
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
ret = __db_panic_msg(dbenv);
goto err;
}
@@ -287,13 +288,13 @@ loop: renv = NULL;
}
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &renv->mutex);
/*
* Finally! We own the environment now. Repeat the panic check, it's
* possible that it was set while we waited for the lock.
*/
- if (renv->panic) {
+ if (renv->envpanic && !F_ISSET(dbenv, DB_ENV_NOPANIC)) {
ret = __db_panic_msg(dbenv);
goto err_unlock;
}
@@ -338,7 +339,7 @@ err_unlock: MUTEX_UNLOCK(dbenv, &renv->mutex);
* Fault the pages into memory. Note, do this AFTER releasing the
* lock, because we're only reading the pages, not writing them.
*/
- (void)__db_faultmem(infop->primary, rp->size, 0);
+ (void)__db_faultmem(dbenv, infop->primary, rp->size, 0);
/* Everything looks good, we're done. */
dbenv->reginfo = infop;
@@ -352,9 +353,12 @@ creation:
* Allocate room for 50 REGION structures plus overhead (we're going
* to use this space for last-ditch allocation requests), although we
* should never need anything close to that.
+ *
+ * Encryption passwds are stored in the env region. Add that in too.
*/
memset(&tregion, 0, sizeof(tregion));
- tregion.size = 50 * sizeof(REGION) + 50 * sizeof(MUTEX) + 2048;
+ tregion.size = (roff_t)(50 * sizeof(REGION) +
+ dbenv->passwd_len + 2048);
tregion.segid = INVALID_REGION_SEGID;
if ((ret = __os_r_attach(dbenv, infop, &tregion)) != 0)
goto err;
@@ -363,7 +367,7 @@ creation:
* Fault the pages into memory. Note, do this BEFORE we initialize
* anything, because we're writing the pages, not just reading them.
*/
- (void)__db_faultmem(infop->addr, tregion.size, 1);
+ (void)__db_faultmem(dbenv, infop->addr, tregion.size, 1);
/*
* The first object in the region is the REGENV structure. This is
@@ -392,10 +396,12 @@ creation:
* number which validates the file/environment.
*/
renv = infop->primary;
- renv->panic = 0;
+ renv->envpanic = 0;
db_version(&renv->majver, &renv->minver, &renv->patch);
SH_LIST_INIT(&renv->regionq);
renv->refcnt = 1;
+ renv->cipher_off = INVALID_ROFF;
+ renv->rep_off = INVALID_ROFF;
/*
* Initialize init_flags to store the flags that any other environment
@@ -412,15 +418,15 @@ creation:
* filesystem as the database home. But you knew that, I'm sure -- it
* probably wasn't even worth mentioning.)
*/
- if ((ret =
- __db_mutex_init(dbenv, &renv->mutex, DB_FCNTL_OFF_GEN, 0)) != 0) {
+ if ((ret = __db_mutex_setup(dbenv, infop, &renv->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
__db_err(dbenv, "%s: unable to initialize environment lock: %s",
infop->name, db_strerror(ret));
goto err;
}
if (!F_ISSET(&renv->mutex, MUTEX_IGNORE) &&
- (ret = __db_mutex_lock(dbenv, &renv->mutex, dbenv->lockfhp)) != 0) {
+ (ret = __db_mutex_lock(dbenv, &renv->mutex)) != 0) {
__db_err(dbenv, "%s: unable to acquire environment lock: %s",
infop->name, db_strerror(ret));
goto err;
@@ -459,8 +465,8 @@ find_err: __db_err(dbenv,
if (tregion.segid != INVALID_REGION_SEGID) {
ref.size = tregion.size;
ref.segid = tregion.segid;
- if ((ret = __os_write(dbenv, dbenv->lockfhp,
- &ref, sizeof(ref), &nrw)) != 0 || nrw != sizeof(ref)) {
+ if ((ret = __os_write(
+ dbenv, dbenv->lockfhp, &ref, sizeof(ref), &nrw)) != 0) {
__db_err(dbenv,
"%s: unable to write out public environment ID: %s",
infop->name, db_strerror(ret));
@@ -476,7 +482,7 @@ find_err: __db_err(dbenv,
*/
#if defined(HAVE_MUTEX_THREADS)
if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
- __os_closehandle(dbenv->lockfhp);
+ __os_closehandle(dbenv, dbenv->lockfhp);
#endif
/* Validate the file. */
@@ -492,7 +498,7 @@ find_err: __db_err(dbenv,
err:
retry: /* Close any open file handle. */
if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
- (void)__os_closehandle(dbenv->lockfhp);
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
/*
* If we joined or created the region, detach from it. If we created
@@ -513,8 +519,8 @@ retry: /* Close any open file handle. */
/* Free the allocated name and/or REGINFO structure. */
if (infop->name != NULL)
- __os_freestr(infop->name);
- __os_free(infop, sizeof(REGINFO));
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, infop);
/* If we had a temporary error, wait awhile and try again. */
if (ret == 0) {
@@ -547,8 +553,10 @@ __db_e_detach(dbenv, destroy)
infop = dbenv->reginfo;
renv = infop->primary;
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &renv->mutex);
/* Decrement the reference count. */
if (renv->refcnt == 0) {
@@ -563,33 +571,39 @@ __db_e_detach(dbenv, destroy)
/* Close the locking file handle. */
if (F_ISSET(dbenv->lockfhp, DB_FH_VALID))
- (void)__os_closehandle(dbenv->lockfhp);
+ (void)__os_closehandle(dbenv, dbenv->lockfhp);
/* Reset the addr value that we "corrected" above. */
infop->addr = infop->primary;
/*
* If we are destroying the environment, we need to
- * destroy any system resources backing the mutex.
- * Do that now before we free the memory in __os_r_detach.
+ * destroy any system resources backing the mutex, as well
+ * as any system resources that the replication system may have
+ * acquired and put in the main region.
+ *
+ * Do these now before we free the memory in __os_r_detach.
*/
- if (destroy)
+ if (destroy) {
+ __rep_region_destroy(dbenv);
__db_mutex_destroy(&renv->mutex);
+ __db_mutex_destroy(&infop->rp->mutex);
+ }
/*
* Release the region, and kill our reference.
*
- * We set the DBENV->reginfo field to NULL here and discard its memory.
- * DBENV->remove calls __dbenv_remove to do the region remove, and
+ * We set the DB_ENV->reginfo field to NULL here and discard its memory.
+ * DB_ENV->remove calls __dbenv_remove to do the region remove, and
* __dbenv_remove attached and then detaches from the region. We don't
- * want to return to DBENV->remove with a non-NULL DBENV->reginfo field
- * because it will attempt to detach again as part of its cleanup.
+ * want to return to DB_ENV->remove with a non-NULL DB_ENV->reginfo
+ * field because it will attempt to detach again as part of its cleanup.
*/
(void)__os_r_detach(dbenv, infop, destroy);
if (infop->name != NULL)
- __os_free(infop->name, 0);
- __os_free(dbenv->reginfo, sizeof(REGINFO));
+ __os_free(dbenv, infop->name);
+ __os_free(dbenv, dbenv->reginfo);
dbenv->reginfo = NULL;
return (0);
@@ -599,18 +613,20 @@ __db_e_detach(dbenv, destroy)
* __db_e_remove --
* Discard an environment if it's not in use.
*
- * PUBLIC: int __db_e_remove __P((DB_ENV *, int));
+ * PUBLIC: int __db_e_remove __P((DB_ENV *, u_int32_t));
*/
int
-__db_e_remove(dbenv, force)
+__db_e_remove(dbenv, flags)
DB_ENV *dbenv;
- int force;
+ u_int32_t flags;
{
REGENV *renv;
REGINFO *infop, reginfo;
REGION *rp;
- int ret;
+ u_int32_t db_env_reset;
+ int force, ret;
+ force = LF_ISSET(DB_FORCE) ? 1 : 0;
/*
* This routine has to walk a nasty line between not looking into
* the environment (which may be corrupted after an app or system
@@ -632,8 +648,10 @@ __db_e_remove(dbenv, force)
* If the force flag is set, we do not acquire any locks during this
* process.
*/
+ db_env_reset = F_ISSET(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
if (force)
- dbenv->db_mutexlocks = 0;
+ F_SET(dbenv, DB_ENV_NOLOCKING);
+ F_SET(dbenv, DB_ENV_NOPANIC);
/* Join the environment. */
if ((ret = __db_e_attach(dbenv, NULL)) != 0) {
@@ -645,17 +663,21 @@ __db_e_remove(dbenv, force)
ret = 0;
if (force)
goto remfiles;
- goto err;
+ goto done;
}
infop = dbenv->reginfo;
renv = infop->primary;
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &renv->mutex);
- /* If it's in use, we're done. */
- if (renv->refcnt == 1 || force) {
+ /*
+ * If it's in use, we're done unless we're forcing the issue or the
+ * environment has panic'd. (Presumably, if the environment panic'd,
+ * the thread holding the reference count may not have cleaned up.)
+ */
+ if (renv->refcnt == 1 || renv->envpanic == 1 || force) {
/*
* Set the panic flag and overwrite the magic number.
*
@@ -663,7 +685,7 @@ __db_e_remove(dbenv, force)
* From this point on, there's no going back, we pretty
* much ignore errors, and just whack on whatever we can.
*/
- renv->panic = 1;
+ renv->envpanic = 1;
renv->magic = 0;
/*
@@ -713,7 +735,7 @@ restart: for (rp = SH_LIST_FIRST(&renv->regionq, __db_region);
/* Destroy the environment's region. */
(void)__db_e_detach(dbenv, 1);
- /* Discard the physical files. */
+ /* Discard any remaining physical files. */
remfiles: (void)__db_e_remfile(dbenv);
} else {
/* Unlock the environment. */
@@ -725,7 +747,9 @@ remfiles: (void)__db_e_remfile(dbenv);
ret = EBUSY;
}
-err:
+done: F_CLR(dbenv, DB_ENV_NOLOCKING | DB_ENV_NOPANIC);
+ F_SET(dbenv, db_env_reset);
+
return (ret);
}
@@ -742,7 +766,7 @@ __db_e_remfile(dbenv)
"__db_log.share",
"__db_mpool.share",
"__db_txn.share",
- NULL,
+ NULL
};
int cnt, fcnt, lastrm, ret;
u_int8_t saved_byte;
@@ -751,8 +775,7 @@ __db_e_remfile(dbenv)
/* Get the full path of a file in the environment. */
(void)snprintf(buf, sizeof(buf), "%s", DB_REGION_ENV);
- if ((ret =
- __db_appname(dbenv, DB_APP_NONE, NULL, buf, 0, NULL, &path)) != 0)
+ if ((ret = __db_appname(dbenv, DB_APP_NONE, buf, 0, NULL, &path)) != 0)
return (ret);
/* Get the parent directory for the environment. */
@@ -769,16 +792,15 @@ __db_e_remfile(dbenv)
}
/* Get the list of file names. */
- ret = __os_dirlist(dbenv, dir, &names, &fcnt);
+ if ((ret = __os_dirlist(dbenv, dir, &names, &fcnt)) != 0)
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
/* Restore the path, and free it. */
*p = saved_byte;
- __os_freestr(path);
+ __os_free(dbenv, path);
- if (ret != 0) {
- __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+ if (ret != 0)
return (ret);
- }
/*
* Search for valid region names, and remove them. We remove the
@@ -799,19 +821,23 @@ __db_e_remfile(dbenv)
continue;
if (__db_appname(dbenv,
- DB_APP_NONE, NULL, names[cnt], 0, NULL, &path) == 0) {
+ DB_APP_NONE, names[cnt], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
(void)__os_unlink(dbenv, path);
- __os_freestr(path);
+ __os_free(dbenv, path);
}
}
if (lastrm != -1)
if (__db_appname(dbenv,
- DB_APP_NONE, NULL, names[lastrm], 0, NULL, &path) == 0) {
+ DB_APP_NONE, names[lastrm], 0, NULL, &path) == 0) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
(void)__os_unlink(dbenv, path);
- __os_freestr(path);
+ __os_free(dbenv, path);
}
- __os_dirfree(names, fcnt);
+ __os_dirfree(dbenv, names, fcnt);
/*
* !!!
@@ -820,9 +846,9 @@ __db_e_remfile(dbenv)
*/
for (names = (char **)old_region_names; *names != NULL; ++names)
if (__db_appname(dbenv,
- DB_APP_NONE, NULL, *names, 0, NULL, &path) == 0) {
+ DB_APP_NONE, *names, 0, NULL, &path) == 0) {
(void)__os_unlink(dbenv, path);
- __os_freestr(path);
+ __os_free(dbenv, path);
}
return (0);
@@ -832,33 +858,47 @@ __db_e_remfile(dbenv)
* __db_e_stat
* Statistics for the environment.
*
- * PUBLIC: int __db_e_stat __P((DB_ENV *, REGENV *, REGION *, int *));
+ * PUBLIC: int __db_e_stat __P((DB_ENV *,
+ * PUBLIC: REGENV *, REGION *, int *, u_int32_t));
*/
int
-__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt)
+__db_e_stat(dbenv, arg_renv, arg_regions, arg_regions_cnt, flags)
DB_ENV *dbenv;
REGENV *arg_renv;
REGION *arg_regions;
int *arg_regions_cnt;
+ u_int32_t flags;
{
REGENV *renv;
REGINFO *infop;
REGION *rp;
- int n;
+ int n, ret;
infop = dbenv->reginfo;
renv = infop->primary;
rp = infop->rp;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &rp->mutex);
*arg_renv = *renv;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ renv->mutex.mutex_set_nowait = 0;
+ renv->mutex.mutex_set_wait = 0;
+ }
for (n = 0, rp = SH_LIST_FIRST(&renv->regionq, __db_region);
n < *arg_regions_cnt && rp != NULL;
- ++n, rp = SH_LIST_NEXT(rp, q, __db_region))
+ ++n, rp = SH_LIST_NEXT(rp, q, __db_region)) {
arg_regions[n] = *rp;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ rp->mutex.mutex_set_nowait = 0;
+ rp->mutex.mutex_set_wait = 0;
+ }
+ }
/* Release the lock. */
rp = infop->rp;
@@ -887,12 +927,15 @@ __db_r_attach(dbenv, infop, size)
char buf[sizeof(DB_REGION_FMT) + 20];
renv = ((REGINFO *)dbenv->reginfo)->primary;
- F_CLR(infop, REGION_CREATE);
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &renv->mutex);
- /* Find or create a REGION structure for this region. */
+ /*
+ * Find or create a REGION structure for this region. If we create
+ * it, the REGION_CREATE flag will be set in the infop structure.
+ */
+ F_CLR(infop, REGION_CREATE);
if ((ret = __db_des_get(dbenv, dbenv->reginfo, infop, &rp)) != 0) {
MUTEX_UNLOCK(dbenv, &renv->mutex);
return (ret);
@@ -903,12 +946,12 @@ __db_r_attach(dbenv, infop, size)
/* If we're creating the region, set the desired size. */
if (F_ISSET(infop, REGION_CREATE))
- rp->size = size;
+ rp->size = (roff_t)size;
/* Join/create the underlying region. */
(void)snprintf(buf, sizeof(buf), DB_REGION_FMT, infop->id);
if ((ret = __db_appname(dbenv,
- DB_APP_NONE, NULL, buf, 0, NULL, &infop->name)) != 0)
+ DB_APP_NONE, buf, 0, NULL, &infop->name)) != 0)
goto err;
if ((ret = __os_r_attach(dbenv, infop, rp)) != 0)
goto err;
@@ -918,8 +961,8 @@ __db_r_attach(dbenv, infop, size)
* anything because we're writing pages in created regions, not just
* reading them.
*/
- (void)__db_faultmem(infop->addr,
- rp->size, F_ISSET(infop, REGION_CREATE));
+ (void)__db_faultmem(dbenv,
+ infop->addr, rp->size, F_ISSET(infop, REGION_CREATE));
/*
* !!!
@@ -940,7 +983,7 @@ __db_r_attach(dbenv, infop, size)
* for it and release our lock on the environment.
*/
if (infop->type != REGION_TYPE_ENV) {
- MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &rp->mutex);
MUTEX_UNLOCK(dbenv, &renv->mutex);
}
@@ -954,8 +997,10 @@ err: if (infop->addr != NULL)
infop->id = INVALID_REGION_ID;
/* Discard the REGION structure if we created it. */
- if (F_ISSET(infop, REGION_CREATE))
+ if (F_ISSET(infop, REGION_CREATE)) {
(void)__db_des_destroy(dbenv, rp);
+ F_CLR(infop, REGION_CREATE);
+ }
/* Release the environment lock. */
MUTEX_UNLOCK(dbenv, &renv->mutex);
@@ -981,12 +1026,14 @@ __db_r_detach(dbenv, infop, destroy)
renv = ((REGINFO *)dbenv->reginfo)->primary;
rp = infop->rp;
+ if (F_ISSET(dbenv, DB_ENV_PRIVATE))
+ destroy = 1;
/* Lock the environment. */
- MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &renv->mutex);
/* Acquire the lock for the REGION. */
- MUTEX_LOCK(dbenv, &rp->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &rp->mutex);
/*
* We need to call destroy on per-subsystem info before
@@ -1011,7 +1058,7 @@ __db_r_detach(dbenv, infop, destroy)
/* Destroy the structure. */
if (infop->name != NULL)
- __os_freestr(infop->name);
+ __os_free(dbenv, infop->name);
return (ret);
}
@@ -1089,9 +1136,8 @@ __db_des_get(dbenv, env_infop, infop, rpp)
/* Initialize the region. */
memset(rp, 0, sizeof(*rp));
- if ((ret = __db_mutex_init(dbenv, &rp->mutex,
- R_OFFSET(env_infop, &rp->mutex) + DB_FCNTL_OFF_GEN,
- 0)) != 0) {
+ if ((ret = __db_mutex_setup(dbenv, env_infop, &rp->mutex,
+ MUTEX_NO_RECORD | MUTEX_NO_RLOCK)) != 0) {
__db_shalloc_free(env_infop->addr, rp);
return (ret);
}
@@ -1141,7 +1187,8 @@ __db_des_destroy(dbenv, rp)
* Fault the region into memory.
*/
static int
-__db_faultmem(addr, size, created)
+__db_faultmem(dbenv, addr, size, created)
+ DB_ENV *dbenv;
void *addr;
size_t size;
int created;
@@ -1162,7 +1209,7 @@ __db_faultmem(addr, size, created)
* that it doesn't figure out that we're never really using it.
*/
ret = 0;
- if (DB_GLOBAL(db_region_init)) {
+ if (F_ISSET(dbenv, DB_ENV_REGION_INIT)) {
if (created)
for (p = addr, t = (u_int8_t *)addr + size;
p < t; p += OS_VMPAGESIZE)
@@ -1190,13 +1237,17 @@ __db_region_destroy(dbenv, infop)
case REGION_TYPE_LOCK:
__lock_region_destroy(dbenv, infop);
break;
+ case REGION_TYPE_LOG:
+ __log_region_destroy(dbenv, infop);
+ break;
case REGION_TYPE_MPOOL:
__mpool_region_destroy(dbenv, infop);
break;
+ case REGION_TYPE_TXN:
+ __txn_region_destroy(dbenv, infop);
+ break;
case REGION_TYPE_ENV:
- case REGION_TYPE_LOG:
case REGION_TYPE_MUTEX:
- case REGION_TYPE_TXN:
break;
default:
DB_ASSERT(0);
diff --git a/bdb/examples_c/README b/bdb/examples_c/README
index f59ae00a608..d5475ba01b2 100644
--- a/bdb/examples_c/README
+++ b/bdb/examples_c/README
@@ -1,7 +1,9 @@
-# $Id: README,v 11.3 2000/12/13 06:32:29 krinsky Exp $
+# $Id: README,v 11.5 2002/02/26 16:22:45 krinsky Exp $
ex_access.c Using just the DB access methods.
+ex_apprec Application-specific recovery.
+
ex_btrec.c Using the BTREE access method with record numbers.
ex_env.c Setting up the DB environment.
@@ -10,6 +12,10 @@ ex_lock.c Locking.
ex_mpool.c Shared memory buffer pools.
+ex_repquote Replication. This creates a toy stock quote server
+ with DB's single-master, multiple-client replication,
+ with communication over TCP.
+
ex_tpcb.c TPC/B.
Ex_tpcb sets up a framework in which to run a TPC/B test.
Database initialization (the -i flag) and running the
diff --git a/bdb/examples_c/bench_001.c b/bdb/examples_c/bench_001.c
new file mode 100644
index 00000000000..14fd3e549b6
--- /dev/null
+++ b/bdb/examples_c/bench_001.c
@@ -0,0 +1,382 @@
+/*-
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: bench_001.c,v 1.13 2002/08/15 02:45:39 bostic Exp $
+ */
+
+/*
+ * bench_001 - time bulk fetch interface.
+ * Without -R builds a btree acording to the arguments.
+ * With -R runs and times bulk fetches. If -d is specified
+ * during reads the DB_MULTIPLE interface is used
+ * otherwise the DB_MULTIPLE_KEY interface is used.
+ *
+ * ARGUMENTS:
+ * -c cachesize [1000 * pagesize]
+ * -d number of duplicates [none]
+ * -E don't use environment
+ * -I Just initialize the environment
+ * -i number of read iterations [1000000]
+ * -l length of data item [20]
+ * -n number of keys [1000000]
+ * -p pagesize [65536]
+ * -R perform read test.
+ * -T incorporate transactions.
+ *
+ * COMPILE:
+ * cc -I /usr/local/BerkeleyDB/include \
+ * -o bench_001 -O2 bench_001.c /usr/local/BerkeleyDB/lib/libdb.so
+ */
+#include <sys/types.h>
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define DATABASE "bench_001.db"
+
+int main(int, char *[]);
+void usage(void);
+
+const char
+ *progname = "bench_001"; /* Program name. */
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home, prefix, cachesize, txn)
+ char *home, *prefix;
+ int cachesize, txn;
+{
+ DB_ENV *dbenv;
+ int flags, ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_env_create");
+ return (NULL);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ (void)dbenv->set_cachesize(dbenv, 0,
+ cachesize == 0 ? 50 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ flags = DB_CREATE | DB_INIT_MPOOL;
+ if (txn)
+ flags |= DB_INIT_TXN | DB_INIT_LOCK;
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+ (void)dbenv->close(dbenv, 0);
+ return (NULL);
+ }
+ return (dbenv);
+}
+
+/*
+ * get -- loop getting batches of records.
+ *
+ */
+int
+get(dbp, txn, datalen, num, dups, iter, countp)
+ DB *dbp;
+ int txn, datalen, num, dups, iter, *countp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DB_TXN *txnp;
+ u_int32_t len, klen;
+ int count, flags, i, j, ret;
+ void *pointer, *dp, *kp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = &j;
+ key.size = sizeof(j);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_USERMEM;
+ data.data = malloc(datalen*1024*1024);
+ data.ulen = data.size = datalen*1024*1024;
+ count = 0;
+ flags = DB_SET;
+ if (!dups)
+ flags |= DB_MULTIPLE_KEY;
+ else
+ flags |= DB_MULTIPLE;
+ for (i = 0; i < iter; i++) {
+ txnp = NULL;
+ if (txn)
+ dbp->dbenv->txn_begin(dbp->dbenv, NULL, &txnp, 0);
+ dbp->cursor(dbp, txnp, &dbcp, 0);
+
+ j = random() % num;
+ switch (ret = dbcp->c_get(dbcp, &key, &data, flags)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbcp->dbp, ret, "DBC->c_get");
+ return (ret);
+ }
+ DB_MULTIPLE_INIT(pointer, &data);
+ if (dups)
+ while (pointer != NULL) {
+ DB_MULTIPLE_NEXT(pointer, &data, dp, len);
+ if (dp != NULL)
+ count++;
+ }
+ else
+ while (pointer != NULL) {
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, kp, klen, dp, len);
+ if (kp != NULL)
+ count++;
+ }
+ dbcp->c_close(dbcp);
+ if (txn)
+ txnp->commit(txnp, 0);
+ }
+
+ *countp = count;
+ return (0);
+}
+
+/*
+ * fill - fill a db
+ * Since we open/created the db with transactions (potentially),
+ * we need to populate it with transactions. We'll bundle the puts
+ * 10 to a transaction.
+ */
+#define PUTS_PER_TXN 10
+int
+fill(dbenv, dbp, txn, datalen, num, dups)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int txn, datalen, num, dups;
+{
+ DBT key, data;
+ DB_TXN *txnp;
+ struct data {
+ int id;
+ char str[1];
+ } *data_val;
+ int count, i, ret;
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ txnp = NULL;
+ ret = 0;
+ count = 0;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ key.data = &i;
+ key.size = sizeof(i);
+ data.data = data_val = (struct data *) malloc(datalen);
+ memcpy(data_val->str, "0123456789012345678901234567890123456789",
+ datalen - sizeof (data_val->id));
+ data.size = datalen;
+ data.flags = DB_DBT_USERMEM;
+
+ for (i = 0; i < num; i++) {
+ if (txn != 0 && i % PUTS_PER_TXN == 0) {
+ if (txnp != NULL) {
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err;
+ }
+ if ((ret =
+ dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ goto err;
+ }
+ data_val->id = 0;
+ do {
+ switch (ret =
+ dbp->put(dbp, txnp, &key, &data, 0)) {
+ case 0:
+ count++;
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ goto err;
+ }
+ } while (++data_val->id < dups);
+ }
+ if (txnp != NULL)
+ ret = txnp->commit(txnp, 0);
+
+ printf("%d\n", count);
+ return (ret);
+
+err: if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ return (ret);
+}
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_TXN *txnp;
+ struct timeval start_time, end_time;
+ double secs;
+ int cache, ch, count, datalen, dups, env, init, iter, num, pagesize;
+ int ret, rflag, txn;
+
+ txnp = NULL;
+ datalen = 20;
+ iter = num = 1000000;
+ env = 1;
+ dups = init = rflag = txn = 0;
+
+ pagesize = 65536;
+ cache = 1000 * pagesize;
+
+ while ((ch = getopt(argc, argv, "c:d:EIi:l:n:p:RT")) != EOF)
+ switch (ch) {
+ case 'c':
+ cache = atoi(optarg);
+ break;
+ case 'd':
+ dups = atoi(optarg);
+ break;
+ case 'E':
+ env = 0;
+ break;
+ case 'I':
+ init = 1;
+ break;
+ case 'i':
+ iter = atoi(optarg);
+ break;
+ case 'l':
+ datalen = atoi(optarg);
+ break;
+ case 'n':
+ num = atoi(optarg);
+ break;
+ case 'p':
+ pagesize = atoi(optarg);
+ break;
+ case 'R':
+ rflag = 1;
+ break;
+ case 'T':
+ txn = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Remove the previous database. */
+ if (!rflag) {
+ if (env)
+ system("rm -rf BENCH_001; mkdir BENCH_001");
+ else
+ (void)unlink(DATABASE);
+ }
+
+ dbenv = NULL;
+ if (env == 1 &&
+ (dbenv = db_init("BENCH_001", "bench_001", cache, txn)) == NULL)
+ return (-1);
+ if (init)
+ exit(0);
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ exit(EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, pagesize)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if (dups && (ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+
+ if (env == 0 && (ret = dbp->set_cachesize(dbp, 0, cache, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+
+ if ((ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+
+ if (txn != 0)
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ goto err1;
+
+ if ((ret = dbp->open(
+ dbp, txnp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ goto err1;
+ }
+
+ if (txnp != NULL)
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err1;
+
+ if (rflag) {
+ /* If no environment, fill the cache. */
+ if (!env && (ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+
+ /* Time the get loop. */
+ gettimeofday(&start_time, NULL);
+ if ((ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+ gettimeofday(&end_time, NULL);
+ secs =
+ (((double)end_time.tv_sec * 1000000 + end_time.tv_usec) -
+ ((double)start_time.tv_sec * 1000000 + start_time.tv_usec))
+ / 1000000;
+ printf("%d records read using %d batches in %.2f seconds: ",
+ count, iter, secs);
+ printf("%.0f records/second\n", (double)count / secs);
+
+ } else if ((ret = fill(dbenv, dbp, txn, datalen, num, dups)) != 0)
+ goto err1;
+
+ /* Close everything down. */
+ if ((ret = dbp->close(dbp, rflag ? DB_NOSYNC : 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (ret);
+
+err1: (void)dbp->close(dbp, 0);
+ return (1);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: %s %s\n\t%s\n",
+ progname, "[-EIRT] [-c cachesize] [-d dups]",
+ "[-i iterations] [-l datalen] [-n keys] [-p pagesize]");
+ exit(EXIT_FAILURE);
+}
diff --git a/bdb/examples_c/ex_access.c b/bdb/examples_c/ex_access.c
index 3448daf43a3..5cac09ecf05 100644
--- a/bdb/examples_c/ex_access.c
+++ b/bdb/examples_c/ex_access.c
@@ -1,87 +1,69 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_access.c,v 11.7 2000/05/22 15:17:03 sue Exp $
+ * $Id: ex_access.c,v 11.22 2002/09/03 12:54:26 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
#include <unistd.h>
#endif
#include <db.h>
-#ifdef HAVE_VXWORKS
-#include "stdio.h"
-#define DATABASE "/vxtmp/vxtmp/access.db"
-#define ERROR_RETURN ERROR
-#else
#define DATABASE "access.db"
-#define ERROR_RETURN 1
-int main __P((int, char *[]));
-void usage __P((char *));
-#endif
-
-int ex_access __P((void));
+int main __P((int, char *[]));
+int usage __P((void));
-#ifndef HAVE_VXWORKS
int
main(argc, argv)
int argc;
char *argv[];
{
- extern char *optarg;
extern int optind;
- int ch;
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ch, ret, rflag;
+ char *database, *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "ex_access"; /* Program name. */
- while ((ch = getopt(argc, argv, "")) != EOF)
+ rflag = 0;
+ while ((ch = getopt(argc, argv, "r")) != EOF)
switch (ch) {
+ case 'r':
+ rflag = 1;
+ break;
case '?':
default:
- usage(argv[0]);
+ return (usage());
}
argc -= optind;
argv += optind;
- return (ex_access());
-}
-
-void
-usage(progname)
- char *progname;
-{
- (void)fprintf(stderr, "usage: %s\n", progname);
- exit(1);
-}
-#endif
-
-int
-ex_access()
-{
- DB *dbp;
- DBC *dbcp;
- DBT key, data;
- u_int32_t len;
- int ret;
- char *p, *t, buf[1024], rbuf[1024];
- const char *progname = "ex_access"; /* Program name. */
+ /* Accept optional database name. */
+ database = *argv == NULL ? DATABASE : argv[0];
- /* Remove the previous database. */
- (void)unlink(DATABASE);
+ /* Optionally discard the database. */
+ if (rflag)
+ (void)remove(database);
/* Create and initialize database object, open the database. */
if ((ret = db_create(&dbp, NULL, 0)) != 0) {
fprintf(stderr,
"%s: db_create: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (EXIT_FAILURE);
}
dbp->set_errfile(dbp, stderr);
dbp->set_errpfx(dbp, progname);
@@ -93,9 +75,9 @@ ex_access()
dbp->err(dbp, ret, "set_cachesize");
goto err1;
}
- if ((ret =
- dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
- dbp->err(dbp, ret, "%s: open", DATABASE);
+ if ((ret = dbp->open(dbp,
+ NULL, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", database);
goto err1;
}
@@ -110,6 +92,8 @@ ex_access()
fflush(stdout);
if (fgets(buf, sizeof(buf), stdin) == NULL)
break;
+ if (strcmp(buf, "exit\n") == 0 || strcmp(buf, "quit\n") == 0)
+ break;
if ((len = strlen(buf)) <= 1)
continue;
for (t = rbuf, p = buf + (len - 2); p >= buf;)
@@ -161,11 +145,18 @@ ex_access()
if ((ret = dbp->close(dbp, 0)) != 0) {
fprintf(stderr,
"%s: DB->close: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (EXIT_FAILURE);
}
- return (0);
+ return (EXIT_SUCCESS);
err2: (void)dbcp->c_close(dbcp);
err1: (void)dbp->close(dbp, 0);
- return (ERROR_RETURN);
+ return (EXIT_FAILURE);
+}
+
+int
+usage()
+{
+ (void)fprintf(stderr, "usage: ex_access [-r] [database]\n");
+ return (EXIT_FAILURE);
}
diff --git a/bdb/examples_c/ex_apprec/auto_rebuild b/bdb/examples_c/ex_apprec/auto_rebuild
new file mode 100644
index 00000000000..34251984888
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/auto_rebuild
@@ -0,0 +1,9 @@
+# Script to rebuild automatically generated files for ex_apprec.
+
+E=../examples_c/ex_apprec
+
+cd ../../dist
+awk -f gen_rec.awk \
+ -v source_file=$E/ex_apprec_auto.c \
+ -v header_file=$E/ex_apprec_auto.h \
+ -v template_file=$E/ex_apprec_template < $E/ex_apprec.src
diff --git a/bdb/examples_c/ex_apprec/ex_apprec.c b/bdb/examples_c/ex_apprec/ex_apprec.c
new file mode 100644
index 00000000000..c045e734250
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec.c
@@ -0,0 +1,267 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_apprec.c,v 1.2 2002/08/06 05:39:01 bostic Exp $
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+
+int apprec_dispatch __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+int open_env __P((const char *, FILE *, const char *, DB_ENV **));
+int verify_absence __P((DB_ENV *, const char *));
+int verify_presence __P((DB_ENV *, const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *txn;
+ DBT dirnamedbt;
+ int ret;
+ const char *home;
+ char ch, dirname[256];
+ const char *progname = "ex_apprec"; /* Program name. */
+
+ /* Default home. */
+ home = "TESTDIR";
+
+ while ((ch = getopt(argc, argv, "h:")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ default:
+ fprintf(stderr, "usage: %s [-h home]", progname);
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Set up environment.\n");
+ if ((ret = open_env(home, stderr, progname, &dbenv)) != 0)
+ return (EXIT_FAILURE);
+
+ printf("Create a directory in a transaction.\n");
+ /*
+ * This application's convention is to log the full directory name,
+ * including trailing nul.
+ */
+ memset(&dirnamedbt, 0, sizeof(dirnamedbt));
+ sprintf(dirname, "%s/MYDIRECTORY", home);
+ dirnamedbt.data = dirname;
+ dirnamedbt.size = strlen(dirname) + 1;
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ return (EXIT_FAILURE);
+ }
+
+ /* Remember, always log actions before you execute them! */
+ memset(&lsn, 0, sizeof(lsn));
+ if ((ret =
+ ex_apprec_mkdir_log(dbenv, txn, &lsn, 0, &dirnamedbt)) != 0) {
+ dbenv->err(dbenv, ret, "mkdir_log");
+ return (EXIT_FAILURE);
+ }
+ if (mkdir(dirname, 0755) != 0) {
+ dbenv->err(dbenv, errno, "mkdir");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now abort the transaction and verify that the directory goes away. */
+ printf("Abort the transaction.\n");
+ if ((ret = txn->abort(txn)) != 0) {
+ dbenv->err(dbenv, ret, "txn_abort");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's absence: ");
+ verify_absence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now do the same thing over again, only with a commit this time. */
+ printf("Create a directory in a transaction.\n");
+ memset(&dirnamedbt, 0, sizeof(dirnamedbt));
+ sprintf(dirname, "%s/MYDIRECTORY", home);
+ dirnamedbt.data = dirname;
+ dirnamedbt.size = strlen(dirname) + 1;
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_begin");
+ return (EXIT_FAILURE);
+ }
+
+ memset(&lsn, 0, sizeof(lsn));
+ if ((ret =
+ ex_apprec_mkdir_log(dbenv, txn, &lsn, 0, &dirnamedbt)) != 0) {
+ dbenv->err(dbenv, ret, "mkdir_log");
+ return (EXIT_FAILURE);
+ }
+ if (mkdir(dirname, 0755) != 0) {
+ dbenv->err(dbenv, errno, "mkdir");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Now abort the transaction and verify that the directory goes away. */
+ printf("Commit the transaction.\n");
+ if ((ret = txn->commit(txn, 0)) != 0) {
+ dbenv->err(dbenv, ret, "txn_commit");
+ return (EXIT_FAILURE);
+ }
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ printf("Now remove the directory, then run recovery.\n");
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ if (rmdir(dirname) != 0) {
+ fprintf(stderr,
+ "%s: rmdir failed with error %s", progname,
+ strerror(errno));
+ }
+ verify_absence(dbenv, dirname);
+
+ /* Opening with DB_RECOVER runs recovery. */
+ if ((ret = open_env(home, stderr, progname, &dbenv)) != 0)
+ return (EXIT_FAILURE);
+
+ printf("Verify the directory's presence: ");
+ verify_presence(dbenv, dirname);
+ printf("check.\n");
+
+ /* Close the handle. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+
+ return (EXIT_SUCCESS);
+}
+
+int
+open_env(home, errfp, progname, dbenvp)
+ const char *home, *progname;
+ FILE *errfp;
+ DB_ENV **dbenvp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv->set_errfile(dbenv, errfp);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /* Set up our custom recovery dispatch function. */
+ if ((ret = dbenv->set_app_dispatch(dbenv, apprec_dispatch)) != 0) {
+ dbenv->err(dbenv, ret, "set_app_dispatch");
+ return (ret);
+ }
+
+ /*
+ * Open the environment with full transactional support, running
+ * recovery.
+ */
+ if ((ret =
+ dbenv->open(dbenv, home, DB_CREATE | DB_RECOVER | DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0)) != 0) {
+ dbenv->err(dbenv, ret, "environment open: %s", home);
+ dbenv->close(dbenv, 0);
+ return (ret);
+ }
+
+ *dbenvp = dbenv;
+ return (0);
+}
+
+/*
+ * Sample application dispatch function to handle user-specified log record
+ * types.
+ */
+int
+apprec_dispatch(dbenv, dbt, lsn, op)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ DB_LSN *lsn;
+ db_recops op;
+{
+ u_int32_t rectype;
+
+ /* Pull the record type out of the log record. */
+ memcpy(&rectype, dbt->data, sizeof(rectype));
+
+ switch (rectype) {
+ case DB_ex_apprec_mkdir:
+ return (ex_apprec_mkdir_recover(dbenv, dbt, lsn, op, NULL));
+ default:
+ /*
+ * We've hit an unexpected, allegedly user-defined record
+ * type.
+ */
+ dbenv->errx(dbenv, "Unexpected log record type encountered");
+ return (EINVAL);
+ }
+}
+
+int
+verify_absence(dbenv, dirname)
+ DB_ENV *dbenv;
+ const char *dirname;
+{
+
+ if (access(dirname, F_OK) == 0) {
+ dbenv->errx(dbenv, "Error--directory present!");
+ exit(EXIT_FAILURE);
+ }
+
+ return (0);
+}
+
+int
+verify_presence(dbenv, dirname)
+ DB_ENV *dbenv;
+ const char *dirname;
+{
+
+ if (access(dirname, F_OK) != 0) {
+ dbenv->errx(dbenv, "Error--directory not present!");
+ exit(EXIT_FAILURE);
+ }
+
+ return (0);
+}
diff --git a/bdb/examples_c/ex_apprec/ex_apprec.h b/bdb/examples_c/ex_apprec/ex_apprec.h
new file mode 100644
index 00000000000..9bbb567d4a6
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec.h
@@ -0,0 +1,24 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_apprec.h,v 1.2 2002/08/08 15:47:00 bostic Exp $
+ */
+
+#ifndef _EX_APPREC_H_
+#define _EX_APPREC_H_
+
+#include "ex_apprec_auto.h"
+
+int ex_apprec_mkdir_log
+ __P((DB_ENV *, DB_TXN *, DB_LSN *, u_int32_t, const DBT *));
+int ex_apprec_mkdir_print
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+int ex_apprec_mkdir_read
+ __P((DB_ENV *, void *, ex_apprec_mkdir_args **));
+int ex_apprec_mkdir_recover
+ __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+
+#endif /* !_EX_APPREC_H_ */
diff --git a/bdb/examples_c/ex_apprec/ex_apprec.src b/bdb/examples_c/ex_apprec/ex_apprec.src
new file mode 100644
index 00000000000..b048c504927
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec.src
@@ -0,0 +1,41 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_apprec.src,v 1.3 2002/08/08 15:47:00 bostic Exp $
+ */
+
+PREFIX ex_apprec
+
+/*
+ * This is the source file used to generate the application-specific recovery
+ * functions used by the ex_apprec example. It should be turned into usable
+ * source code (including a template for the recovery function itself) by
+ * invoking changing to the dist directory of the DB distribution and
+ * running the gen_rec.awk script there as follows:
+ *
+ * awk -f ./gen_rec.awk \
+ * -v source_file=../examples_c/ex_apprec/ex_apprec_auto.c \
+ * -v header_file=../examples_c/ex_apprec/ex_apprec_auto.h \
+ * -v template_file=../examples_c/ex_apprec/ex_apprec_template \
+ * < ../examples_c/ex_apprec/ex_apprec.src
+
+INCLUDE #include <ctype.h>
+INCLUDE #include <errno.h>
+INCLUDE #include <stdlib.h>
+INCLUDE #include <string.h>
+INCLUDE
+INCLUDE #include <db.h>
+INCLUDE
+INCLUDE #include "ex_apprec.h"
+
+/*
+ * mkdir: used to create a directory
+ *
+ * dirname: relative or absolute pathname of the directory to be created
+ */
+BEGIN mkdir 10000
+DBT dirname DBT s
+END
diff --git a/bdb/examples_c/ex_apprec/ex_apprec_auto.c b/bdb/examples_c/ex_apprec/ex_apprec_auto.c
new file mode 100644
index 00000000000..d8c27e762c7
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec_auto.c
@@ -0,0 +1,188 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+/*
+ * PUBLIC: int ex_apprec_mkdir_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *));
+ */
+int
+ex_apprec_mkdir_log(dbenv, txnid, ret_lsnp, flags,
+ dirname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *dirname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB_ex_apprec_mkdir;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (dirname == NULL ? 0 : dirname->size);
+ if ((logrec.data = malloc(logrec.size)) == NULL)
+ return (ENOMEM);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (dirname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &dirname->size, sizeof(dirname->size));
+ bp += sizeof(dirname->size);
+ memcpy(bp, dirname->data, dirname->size);
+ bp += dirname->size;
+ }
+
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)ex_apprec_mkdir_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ free(logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int ex_apprec_mkdir_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+ex_apprec_mkdir_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ ex_apprec_mkdir_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = ex_apprec_mkdir_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]ex_apprec_mkdir: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tdirname: ");
+ for (i = 0; i < argp->dirname.size; i++) {
+ ch = ((u_int8_t *)argp->dirname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\n");
+ free(argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int ex_apprec_mkdir_read __P((DB_ENV *, void *,
+ * PUBLIC: ex_apprec_mkdir_args **));
+ */
+int
+ex_apprec_mkdir_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ ex_apprec_mkdir_args **argpp;
+{
+ ex_apprec_mkdir_args *argp;
+ u_int8_t *bp;
+ /* Keep the compiler quiet. */
+
+ dbenv = NULL;
+ if ((argp = malloc(sizeof(ex_apprec_mkdir_args) + sizeof(DB_TXN))) == NULL)
+ return (ENOMEM);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->dirname, 0, sizeof(argp->dirname));
+ memcpy(&argp->dirname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->dirname.data = bp;
+ bp += argp->dirname.size;
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int ex_apprec_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+ex_apprec_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int __db_add_recovery __P((DB_ENV *,
+ int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ size_t *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), u_int32_t));
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ ex_apprec_mkdir_print, DB_ex_apprec_mkdir)) != 0)
+ return (ret);
+ return (0);
+}
+
diff --git a/bdb/examples_c/ex_apprec/ex_apprec_auto.h b/bdb/examples_c/ex_apprec/ex_apprec_auto.h
new file mode 100644
index 00000000000..358b1a9f091
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec_auto.h
@@ -0,0 +1,13 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+
+#ifndef ex_apprec_AUTO_H
+#define ex_apprec_AUTO_H
+#define DB_ex_apprec_mkdir 10000
+typedef struct _ex_apprec_mkdir_args {
+ u_int32_t type;
+ DB_TXN *txnid;
+ DB_LSN prev_lsn;
+ DBT dirname;
+} ex_apprec_mkdir_args;
+
+#endif
diff --git a/bdb/examples_c/ex_apprec/ex_apprec_rec.c b/bdb/examples_c/ex_apprec/ex_apprec_rec.c
new file mode 100644
index 00000000000..8b6056d73f1
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec_rec.c
@@ -0,0 +1,115 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_apprec_rec.c,v 1.2 2002/08/06 05:39:02 bostic Exp $
+ */
+
+/*
+ * This file is based on the template file ex_apprec_template. Note that
+ * because ex_apprec_mkdir, like most application-specific recovery functions,
+ * does not make use of DB-private structures, it has actually been simplified
+ * significantly.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_apprec.h"
+
+/*
+ * ex_apprec_mkdir_recover --
+ * Recovery function for mkdir.
+ *
+ * PUBLIC: int ex_apprec_mkdir_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+ex_apprec_mkdir_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ ex_apprec_mkdir_args *argp;
+ int ret;
+
+ argp = NULL;
+
+ /*
+ * Shut up the compiler--"info" is used for the recovery functions
+ * belonging to transaction meta-operations such as txn_create, and
+ * need not concern us here either.
+ */
+ info = NULL;
+
+ if ((ret = ex_apprec_mkdir_read(dbenv, dbtp->data, &argp)) != 0)
+ goto out;
+
+ switch (op) {
+ case DB_TXN_ABORT:
+ case DB_TXN_BACKWARD_ROLL:
+ /*
+ * If we're aborting, we need to remove the directory if it
+ * exists. We log the trailing zero in pathnames, so we can
+ * simply pass the data part of the DBT into rmdir as a string.
+ * (Note that we don't have any alignment guarantees, but for
+ * a char * this doesn't matter.)
+ *
+ * Ignore all errors other than ENOENT; DB may attempt to undo
+ * or redo operations without knowing whether they have already
+ * been done or undone, so we should never assume in a recovery
+ * function that the task definitely needs doing or undoing.
+ */
+ ret = rmdir(argp->dirname.data);
+ if (ret != 0 && errno != ENOENT)
+ dbenv->err(dbenv, ret, "Error in abort of mkdir");
+ else
+ ret = 0;
+ break;
+ case DB_TXN_FORWARD_ROLL:
+ /*
+ * The forward direction is just the opposite; here, we ignore
+ * EEXIST, because the directory may already exist.
+ */
+ ret = mkdir(argp->dirname.data, 0755);
+ if (ret != 0 && errno != EEXIST)
+ dbenv->err(dbenv,
+ ret, "Error in roll-forward of mkdir");
+ else
+ ret = 0;
+ break;
+ default:
+ /*
+ * We might want to handle DB_TXN_PRINT or DB_TXN_APPLY here,
+ * too, but we don't try to print the log records and aren't
+ * using replication, so there's no need to in this example.
+ */
+ dbenv->errx(dbenv, "Unexpected operation type\n");
+ return (EINVAL);
+ }
+
+ /*
+ * The recovery function is responsible for returning the LSN of the
+ * previous log record in this transaction, so that transaction aborts
+ * can follow the chain backwards.
+ *
+ * (If we'd wanted the LSN of this record earlier, we could have
+ * read it from lsnp, as well--but because we weren't working with
+ * pages or other objects that store their LSN and base recovery
+ * decisions on it, we didn't need to.)
+ */
+ *lsnp = argp->prev_lsn;
+
+out: if (argp != NULL)
+ free(argp);
+ return (ret);
+}
diff --git a/bdb/examples_c/ex_apprec/ex_apprec_template b/bdb/examples_c/ex_apprec/ex_apprec_template
new file mode 100644
index 00000000000..e67ccb6d8c3
--- /dev/null
+++ b/bdb/examples_c/ex_apprec/ex_apprec_template
@@ -0,0 +1,75 @@
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/ex_apprec.h"
+#include "dbinc/log.h"
+
+/*
+ * ex_apprec_mkdir_recover --
+ * Recovery function for mkdir.
+ *
+ * PUBLIC: int ex_apprec_mkdir_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+ex_apprec_mkdir_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ ex_apprec_mkdir_args *argp;
+ DB *file_dbp;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ PAGE *pagep;
+ int cmp_n, cmp_p, modified, ret;
+
+ REC_PRINT(ex_apprec_mkdir_print);
+ REC_INTRO(ex_apprec_mkdir_read, 1);
+
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0)
+ if (DB_REDO(op)) {
+ if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ } else {
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+ goto out;
+ }
+
+ modified = 0;
+ cmp_n = log_compare(lsnp, &LSN(pagep));
+
+ /*
+ * Use this when there is something like "pagelsn" in the argp
+ * structure. Sometimes, you might need to compare meta-data
+ * lsn's instead.
+ *
+ * cmp_p = log_compare(&LSN(pagep), argp->pagelsn);
+ */
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Need to redo update described. */
+ modified = 1;
+ } else if (cmp_n == 0 && !DB_REDO(op)) {
+ /* Need to undo update described. */
+ modified = 1;
+ }
+ if (ret = mpf->put(mpf, pagep, modified ? DB_MPOOL_DIRTY : 0))
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: REC_CLOSE;
+}
+
diff --git a/bdb/examples_c/ex_btrec.c b/bdb/examples_c/ex_btrec.c
index b74f16b83e4..8e4aa3901d1 100644
--- a/bdb/examples_c/ex_btrec.c
+++ b/bdb/examples_c/ex_btrec.c
@@ -1,70 +1,32 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_btrec.c,v 11.8 2000/05/22 15:17:03 sue Exp $
+ * $Id: ex_btrec.c,v 11.18 2002/01/23 15:33:18 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
#include <db.h>
-#ifdef HAVE_VXWORKS
-#define DATABASE "/vxtmp/vxtmp/access.db"
-#define WORDLIST "/vxtmp/vxtmp/wordlist"
-#define ERROR_RETURN ERROR
-#else
#define DATABASE "access.db"
#define WORDLIST "../test/wordlist"
-#define ERROR_RETURN 1
-int main __P((int, char *[]));
-void usage __P((char *));
-#endif
+int main __P((void));
int ex_btrec __P((void));
-void show __P((char *, DBT *, DBT *));
+void show __P((const char *, DBT *, DBT *));
-#ifndef HAVE_VXWORKS
int
-main(argc, argv)
- int argc;
- char *argv[];
-{
- extern char *optarg;
- extern int optind;
- int ch;
-
- while ((ch = getopt(argc, argv, "")) != EOF)
- switch (ch) {
- case '?':
- default:
- usage(argv[0]);
- }
- argc -= optind;
- argv += optind;
-
- return (ex_btrec());
-}
-
-void
-usage(progname)
- char *progname;
+main()
{
- (void)fprintf(stderr, "usage: %s\n", progname);
- exit(1);
+ return (ex_btrec() == 1 ? EXIT_FAILURE : EXIT_SUCCESS);
}
-#endif
int
ex_btrec()
@@ -84,32 +46,32 @@ ex_btrec()
if ((fp = fopen(WORDLIST, "r")) == NULL) {
fprintf(stderr, "%s: open %s: %s\n",
progname, WORDLIST, db_strerror(errno));
- return (ERROR_RETURN);
+ return (1);
}
/* Remove the previous database. */
- (void)unlink(DATABASE);
+ (void)remove(DATABASE);
/* Create and initialize database object, open the database. */
if ((ret = db_create(&dbp, NULL, 0)) != 0) {
fprintf(stderr,
"%s: db_create: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
dbp->set_errfile(dbp, stderr);
dbp->set_errpfx(dbp, progname); /* 1K page sizes. */
if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
dbp->err(dbp, ret, "set_pagesize");
- return (ERROR_RETURN);
+ return (1);
} /* Record numbers. */
if ((ret = dbp->set_flags(dbp, DB_RECNUM)) != 0) {
dbp->err(dbp, ret, "set_flags: DB_RECNUM");
- return (ERROR_RETURN);
+ return (1);
}
- if ((ret =
- dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ if ((ret = dbp->open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
dbp->err(dbp, ret, "open: %s", DATABASE);
- return (ERROR_RETURN);
+ return (1);
}
/*
@@ -144,7 +106,7 @@ ex_btrec()
(void)fclose(fp);
/* Print out the number of records in the database. */
- if ((ret = dbp->stat(dbp, &statp, NULL, 0)) != 0) {
+ if ((ret = dbp->stat(dbp, &statp, 0)) != 0) {
dbp->err(dbp, ret, "DB->stat");
goto err1;
}
@@ -215,7 +177,7 @@ get_err: dbp->err(dbp, ret, "DBcursor->get");
if ((ret = dbp->close(dbp, 0)) != 0) {
fprintf(stderr,
"%s: DB->close: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
return (0);
@@ -232,8 +194,8 @@ err1: (void)dbp->close(dbp, 0);
*/
void
show(msg, key, data)
+ const char *msg;
DBT *key, *data;
- char *msg;
{
printf("%s%.*s : %.*s\n", msg,
(int)key->size, (char *)key->data,
diff --git a/bdb/examples_c/ex_dbclient.c b/bdb/examples_c/ex_dbclient.c
index 27461a8923f..5baa640811f 100644
--- a/bdb/examples_c/ex_dbclient.c
+++ b/bdb/examples_c/ex_dbclient.c
@@ -1,22 +1,18 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_dbclient.c,v 1.12 2000/10/26 14:13:05 bostic Exp $
+ * $Id: ex_dbclient.c,v 1.28 2002/08/06 06:11:24 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#endif
#include <db.h>
@@ -24,52 +20,40 @@
#define DATABASE "access.db"
-int db_clientrun __P((DB_ENV *, char *));
-int ex_dbclient_run __P((char *, FILE *, char *, char *));
-#ifdef HAVE_VXWORKS
-int ex_dbclient __P((char *));
-#define ERROR_RETURN ERROR
-#define VXSHM_KEY 10
-#else
+int db_clientrun __P((DB_ENV *, const char *));
+int ex_dbclient_run __P((const char *, FILE *, const char *, const char *));
int main __P((int, char *[]));
-#define ERROR_RETURN 1
-#endif
/*
* An example of a program creating/configuring a Berkeley DB environment.
*/
-#ifndef HAVE_VXWORKS
int
main(argc, argv)
int argc;
char *argv[];
{
- char *home;
- int ret;
+ const char *home;
if (argc != 2) {
- fprintf(stderr, "Usage: %s hostname\n",argv[0]);
- exit(1);
+ fprintf(stderr, "Usage: %s hostname\n", argv[0]);
+ return (EXIT_FAILURE);
}
+
/*
* All of the shared database files live in DATABASE_HOME, but
* data files will live in CONFIG_DATA_DIR.
*/
home = DATABASE_HOME;
-
- if ((ret = ex_dbclient_run(home, stderr, argv[1], argv[0])) != 0)
- return (ret);
-
- return (0);
+ return (ex_dbclient_run(home,
+ stderr, argv[1], argv[0]) == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-#endif
int
ex_dbclient(host)
- char *host;
+ const char *host;
{
- char *home;
- char *progname = "ex_dbclient"; /* Program name. */
+ const char *home;
+ const char *progname = "ex_dbclient"; /* Program name. */
int ret;
/*
@@ -86,7 +70,7 @@ ex_dbclient(host)
int
ex_dbclient_run(home, errfp, host, progname)
- char *home, *host, *progname;
+ const char *home, *host, *progname;
FILE *errfp;
{
DB_ENV *dbenv;
@@ -98,35 +82,29 @@ ex_dbclient_run(home, errfp, host, progname)
*/
if ((ret = db_env_create(&dbenv, DB_CLIENT)) != 0) {
fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
- }
-#ifdef HAVE_VXWORKS
- if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
- fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
-#endif
retry = 0;
retry:
while (retry < 5) {
/*
* Set the server host we are talking to.
*/
- if ((ret =
- dbenv->set_server(dbenv, host, 10000, 10000, 0)) != 0) {
- fprintf(stderr, "Try %d: DBENV->set_server: %s\n",
+ if ((ret = dbenv->set_rpc_server(dbenv, NULL, host, 10000,
+ 10000, 0)) != 0) {
+ fprintf(stderr, "Try %d: DB_ENV->set_rpc_server: %s\n",
retry, db_strerror(ret));
retry++;
- if ((ret = __os_sleep(dbenv, 15, 0)) != 0)
- return (ret);
+ sleep(15);
} else
break;
}
if (retry >= 5) {
- fprintf(stderr, "DBENV->set_server: %s\n", db_strerror(ret));
+ fprintf(stderr,
+ "DB_ENV->set_rpc_server: %s\n", db_strerror(ret));
dbenv->close(dbenv, 0);
- return (ERROR_RETURN);
+ return (1);
}
/*
* We want to specify the shared memory buffer pool cachesize,
@@ -135,7 +113,7 @@ retry:
if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
dbenv->err(dbenv, ret, "set_cachesize");
dbenv->close(dbenv, 0);
- return (ERROR_RETURN);
+ return (1);
}
/*
* We have multiple processes reading/writing these files, so
@@ -143,12 +121,12 @@ retry:
* not logging or transactions.
*/
if ((ret = dbenv->open(dbenv, home,
- DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL | DB_INIT_TXN, 0)) != 0) {
+ DB_CREATE | DB_INIT_LOCK | DB_INIT_MPOOL, 0)) != 0) {
dbenv->err(dbenv, ret, "environment open: %s", home);
dbenv->close(dbenv, 0);
if (ret == DB_NOSERVER)
goto retry;
- return (ERROR_RETURN);
+ return (1);
}
ret = db_clientrun(dbenv, progname);
@@ -158,8 +136,8 @@ retry:
/* Close the handle. */
if ((ret = dbenv->close(dbenv, 0)) != 0) {
- fprintf(stderr, "DBENV->close: %s\n", db_strerror(ret));
- return (ERROR_RETURN);
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (1);
}
return (0);
}
@@ -167,7 +145,7 @@ retry:
int
db_clientrun(dbenv, progname)
DB_ENV *dbenv;
- char *progname;
+ const char *progname;
{
DB *dbp;
DBT key, data;
@@ -187,8 +165,8 @@ db_clientrun(dbenv, progname)
dbp->err(dbp, ret, "set_pagesize");
goto err1;
}
- if ((ret =
- dbp->open(dbp, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ if ((ret = dbp->open(dbp,
+ NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
dbp->err(dbp, ret, "%s: open", DATABASE);
goto err1;
}
diff --git a/bdb/examples_c/ex_env.c b/bdb/examples_c/ex_env.c
index 5490723a31c..32257effb7b 100644
--- a/bdb/examples_c/ex_env.c
+++ b/bdb/examples_c/ex_env.c
@@ -1,21 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_env.c,v 11.18 2000/10/27 20:32:00 dda Exp $
+ * $Id: ex_env.c,v 11.27 2002/08/15 14:34:11 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
-#endif
#include <db.h>
@@ -27,40 +23,23 @@
#define DATABASE_HOME "\\tmp\\database"
#define CONFIG_DATA_DIR "\\database\\files"
#else
-#ifdef HAVE_VXWORKS
-#define DATABASE_HOME "/ata0/vxtmp/database"
-#define CONFIG_DATA_DIR "/vxtmp/vxtmp/database/files"
-#else
#define DATABASE_HOME "/tmp/database"
#define CONFIG_DATA_DIR "/database/files"
#endif
#endif
-#endif
-int db_setup __P((char *, char *, FILE *, char *));
-int db_teardown __P((char *, char *, FILE *, char *));
-#ifdef HAVE_VXWORKS
-int ex_env __P((void));
-#define ERROR_RETURN ERROR
-#define VXSHM_KEY 11
-#else
+int db_setup __P((const char *, const char *, FILE *, const char *));
+int db_teardown __P((const char *, const char *, FILE *, const char *));
int main __P((void));
-#define ERROR_RETURN 1
-#endif
/*
* An example of a program creating/configuring a Berkeley DB environment.
*/
int
-#ifdef HAVE_VXWORKS
-ex_env()
-#else
main()
-#endif
{
- int ret;
- char *data_dir, *home;
- char *progname = "ex_env"; /* Program name. */
+ const char *data_dir, *home;
+ const char *progname = "ex_env"; /* Program name. */
/*
* All of the shared database files live in DATABASE_HOME, but
@@ -70,19 +49,19 @@ main()
data_dir = CONFIG_DATA_DIR;
printf("Setup env\n");
- if ((ret = db_setup(home, data_dir, stderr, progname)) != 0)
- return (ret);
+ if (db_setup(home, data_dir, stderr, progname) != 0)
+ return (EXIT_FAILURE);
printf("Teardown env\n");
- if ((ret = db_teardown(home, data_dir, stderr, progname)) != 0)
- return (ret);
+ if (db_teardown(home, data_dir, stderr, progname) != 0)
+ return (EXIT_FAILURE);
- return (0);
+ return (EXIT_SUCCESS);
}
int
db_setup(home, data_dir, errfp, progname)
- char *home, *data_dir, *progname;
+ const char *home, *data_dir, *progname;
FILE *errfp;
{
DB_ENV *dbenv;
@@ -94,19 +73,11 @@ db_setup(home, data_dir, errfp, progname)
*/
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
dbenv->set_errfile(dbenv, errfp);
dbenv->set_errpfx(dbenv, progname);
-#ifdef HAVE_VXWORKS
- /* VxWorks needs to specify a base segment ID. */
- if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
- fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
- }
-#endif
-
/*
* We want to specify the shared memory buffer pool cachesize,
* but everything else is the default.
@@ -114,7 +85,7 @@ db_setup(home, data_dir, errfp, progname)
if ((ret = dbenv->set_cachesize(dbenv, 0, 64 * 1024, 0)) != 0) {
dbenv->err(dbenv, ret, "set_cachesize");
dbenv->close(dbenv, 0);
- return (ERROR_RETURN);
+ return (1);
}
/* Databases are in a subdirectory. */
@@ -126,22 +97,22 @@ db_setup(home, data_dir, errfp, progname)
0)) != 0) {
dbenv->err(dbenv, ret, "environment open: %s", home);
dbenv->close(dbenv, 0);
- return (ERROR_RETURN);
+ return (1);
}
/* Do something interesting... */
/* Close the handle. */
if ((ret = dbenv->close(dbenv, 0)) != 0) {
- fprintf(stderr, "DBENV->close: %s\n", db_strerror(ret));
- return (ERROR_RETURN);
+ fprintf(stderr, "DB_ENV->close: %s\n", db_strerror(ret));
+ return (1);
}
return (0);
}
int
db_teardown(home, data_dir, errfp, progname)
- char *home, *data_dir, *progname;
+ const char *home, *data_dir, *progname;
FILE *errfp;
{
DB_ENV *dbenv;
@@ -150,21 +121,15 @@ db_teardown(home, data_dir, errfp, progname)
/* Remove the shared database regions. */
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
dbenv->set_errfile(dbenv, errfp);
dbenv->set_errpfx(dbenv, progname);
-#ifdef HAVE_VXWORKS
- if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
- fprintf(errfp, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
- }
-#endif
(void)dbenv->set_data_dir(dbenv, data_dir);
if ((ret = dbenv->remove(dbenv, home, 0)) != 0) {
- fprintf(stderr, "DBENV->remove: %s\n", db_strerror(ret));
- return (ERROR_RETURN);
+ fprintf(stderr, "DB_ENV->remove: %s\n", db_strerror(ret));
+ return (1);
}
return (0);
}
diff --git a/bdb/examples_c/ex_lock.c b/bdb/examples_c/ex_lock.c
index e858be6b330..21a3584ceea 100644
--- a/bdb/examples_c/ex_lock.c
+++ b/bdb/examples_c/ex_lock.c
@@ -1,27 +1,28 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_lock.c,v 11.6 2001/01/04 14:23:29 dda Exp $
+ * $Id: ex_lock.c,v 11.18 2002/04/10 21:48:20 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <stdlib.h>
#include <string.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
#include <unistd.h>
#endif
#include <db.h>
-void db_init __P((char *, u_int32_t, int));
-int main __P((int, char *[]));
-void usage __P((void));
+int db_init __P((const char *, u_int32_t, int));
+int main __P((int, char *[]));
+int usage __P((void));
DB_ENV *dbenv;
const char
@@ -41,7 +42,8 @@ main(argc, argv)
long held;
u_int32_t len, locker, maxlocks;
int ch, do_unlink, did_get, i, lockid, lockcount, ret;
- char *home, opbuf[16], objbuf[1024], lockbuf[16];
+ const char *home;
+ char opbuf[16], objbuf[1024], lockbuf[16];
home = "TESTDIR";
maxlocks = 0;
@@ -53,7 +55,7 @@ main(argc, argv)
break;
case 'm':
if ((i = atoi(optarg)) <= 0)
- usage();
+ return (usage());
maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
break;
case 'u':
@@ -61,16 +63,17 @@ main(argc, argv)
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
if (argc != 0)
- usage();
+ return (usage());
/* Initialize the database environment. */
- db_init(home, maxlocks, do_unlink);
+ if ((ret = db_init(home, maxlocks, do_unlink)) != 0)
+ return (ret);
locks = 0;
lockcount = 0;
@@ -78,10 +81,10 @@ main(argc, argv)
/*
* Accept lock requests.
*/
- if ((ret = lock_id(dbenv, &locker)) != 0) {
+ if ((ret = dbenv->lock_id(dbenv, &locker)) != 0) {
dbenv->err(dbenv, ret, "unable to get locker id");
(void)dbenv->close(dbenv, 0);
- exit (1);
+ return (EXIT_FAILURE);
}
lockid = -1;
@@ -117,7 +120,7 @@ main(argc, argv)
lock_dbt.data = objbuf;
lock_dbt.size = strlen(objbuf);
- ret = lock_get(dbenv, locker,
+ ret = dbenv->lock_get(dbenv, locker,
DB_LOCK_NOWAIT, &lock_dbt, lock_type, &lock);
if (ret == 0) {
did_get = 1;
@@ -145,7 +148,7 @@ main(argc, argv)
continue;
}
lock = locks[lockid];
- ret = lock_put(dbenv, &lock);
+ ret = dbenv->lock_put(dbenv, &lock);
did_get = 0;
}
switch (ret) {
@@ -165,7 +168,7 @@ main(argc, argv)
dbenv->err(dbenv, ret,
"lock_%s", did_get ? "get" : "put");
(void)dbenv->close(dbenv, 0);
- exit (1);
+ return (EXIT_FAILURE);
}
}
@@ -177,18 +180,18 @@ main(argc, argv)
if ((ret = dbenv->close(dbenv, 0)) != 0) {
fprintf(stderr,
"%s: dbenv->close: %s\n", progname, db_strerror(ret));
- return (1);
+ return (EXIT_FAILURE);
}
- return (0);
+ return (EXIT_SUCCESS);
}
/*
* db_init --
* Initialize the environment.
*/
-void
+int
db_init(home, maxlocks, do_unlink)
- char *home;
+ const char *home;
u_int32_t maxlocks;
int do_unlink;
{
@@ -197,19 +200,19 @@ db_init(home, maxlocks, do_unlink)
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr, "%s: db_env_create: %s\n",
progname, db_strerror(ret));
- exit (1);
+ return (EXIT_FAILURE);
}
if (do_unlink) {
if ((ret = dbenv->remove(dbenv, home, DB_FORCE)) != 0) {
fprintf(stderr, "%s: dbenv->remove: %s\n",
progname, db_strerror(ret));
- exit (1);
+ return (EXIT_FAILURE);
}
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr, "%s: db_env_create: %s\n",
progname, db_strerror(ret));
- exit (1);
+ return (EXIT_FAILURE);
}
}
@@ -222,14 +225,15 @@ db_init(home, maxlocks, do_unlink)
dbenv->open(dbenv, home, DB_CREATE | DB_INIT_LOCK, 0)) != 0) {
dbenv->err(dbenv, ret, NULL);
(void)dbenv->close(dbenv, 0);
- exit(1);
+ return (EXIT_FAILURE);
}
+ return (0);
}
-void
+int
usage()
{
(void)fprintf(stderr,
"usage: %s [-u] [-h home] [-m maxlocks]\n", progname);
- exit(1);
+ return (EXIT_FAILURE);
}
diff --git a/bdb/examples_c/ex_mpool.c b/bdb/examples_c/ex_mpool.c
index 376c6647895..280adc48910 100644
--- a/bdb/examples_c/ex_mpool.c
+++ b/bdb/examples_c/ex_mpool.c
@@ -1,53 +1,35 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_mpool.c,v 11.13 2000/10/27 20:32:00 dda Exp $
+ * $Id: ex_mpool.c,v 11.26 2002/08/15 14:34:56 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#if TIME_WITH_SYS_TIME
-#include <sys/time.h>
-#include <time.h>
-#else
-#if HAVE_SYS_TIME_H
-#include <sys/time.h>
-#else
-#include <time.h>
-#endif
-#endif
-
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
#include <unistd.h>
#endif
#include <db.h>
-int init __P((char *, int, int, char *));
-int run __P((int, int, int, int, char *));
-int run_mpool __P((int, int, int, int, char *));
-#ifdef HAVE_VXWORKS
-int ex_mpool __P((void));
-#define MPOOL "/vxtmp/vxtmp/mpool" /* File. */
-#define ERROR_RETURN ERROR
-#define VXSHM_KEY 12
-#else
+int init __P((const char *, int, int, const char *));
+int run __P((int, int, int, int, const char *));
+int run_mpool __P((int, int, int, int, const char *));
int main __P((int, char *[]));
-void usage __P((char *));
+int usage __P((const char *));
#define MPOOL "mpool" /* File. */
-#define ERROR_RETURN 1
-#endif
-#ifndef HAVE_VXWORKS
int
main(argc, argv)
int argc;
@@ -67,59 +49,45 @@ main(argc, argv)
switch (ch) {
case 'c':
if ((cachesize = atoi(optarg)) < 20 * 1024)
- usage(progname);
+ return (usage(progname));
break;
case 'h':
if ((hits = atoi(optarg)) <= 0)
- usage(progname);
+ return (usage(progname));
break;
case 'n':
if ((npages = atoi(optarg)) <= 0)
- usage(progname);
+ return (usage(progname));
break;
case 'p':
if ((pagesize = atoi(optarg)) <= 0)
- usage(progname);
+ return (usage(progname));
break;
case '?':
default:
- usage(progname);
+ return (usage(progname));
}
argc -= optind;
argv += optind;
- return (run_mpool(pagesize, cachesize, hits, npages, progname));
+ return (run_mpool(pagesize, cachesize,
+ hits, npages, progname) == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
-void
+int
usage(progname)
- char *progname;
+ const char *progname;
{
(void)fprintf(stderr,
"usage: %s [-c cachesize] [-h hits] [-n npages] [-p pagesize]\n",
progname);
- exit(1);
+ return (EXIT_FAILURE);
}
-#else
-int
-ex_mpool()
-{
- char *progname = "ex_mpool"; /* Program name. */
- int cachesize, ch, hits, npages, pagesize;
-
- cachesize = 20 * 1024;
- hits = 1000;
- npages = 50;
- pagesize = 1024;
-
- return (run_mpool(pagesize, cachesize, hits, npages, progname));
-}
-#endif
int
run_mpool(pagesize, cachesize, hits, npages, progname)
int pagesize, cachesize, hits, npages;
- char *progname;
+ const char *progname;
{
int ret;
@@ -140,41 +108,38 @@ run_mpool(pagesize, cachesize, hits, npages, progname)
*/
int
init(file, pagesize, npages, progname)
- char *file, *progname;
+ const char *file, *progname;
int pagesize, npages;
{
- int cnt, flags, fd;
+ FILE *fp;
+ int cnt;
char *p;
/*
* Create a file with the right number of pages, and store a page
* number on each page.
*/
- flags = O_CREAT | O_RDWR | O_TRUNC;
-#ifdef DB_WIN32
- flags |= O_BINARY;
-#endif
- if ((fd = open(file, flags, 0666)) < 0) {
+ if ((fp = fopen(file, "wb")) == NULL) {
fprintf(stderr,
"%s: %s: %s\n", progname, file, strerror(errno));
- return (ERROR_RETURN);
+ return (1);
}
if ((p = (char *)malloc(pagesize)) == NULL) {
fprintf(stderr, "%s: %s\n", progname, strerror(ENOMEM));
- return (ERROR_RETURN);
+ return (1);
}
/* The pages are numbered from 0. */
for (cnt = 0; cnt <= npages; ++cnt) {
*(int *)p = cnt;
- if (write(fd, p, pagesize) != pagesize) {
+ if (fwrite(p, pagesize, 1, fp) != 1) {
fprintf(stderr,
"%s: %s: %s\n", progname, file, strerror(errno));
- return (ERROR_RETURN);
+ return (1);
}
}
- (void)close(fd);
+ (void)fclose(fp);
free(p);
return (0);
}
@@ -186,14 +151,17 @@ init(file, pagesize, npages, progname)
int
run(hits, cachesize, pagesize, npages, progname)
int hits, cachesize, pagesize, npages;
- char *progname;
+ const char *progname;
{
DB_ENV *dbenv;
- DB_MPOOLFILE *dbmfp;
+ DB_MPOOLFILE *mfp;
db_pgno_t pageno;
int cnt, ret;
void *p;
+ dbenv = NULL;
+ mfp = NULL;
+
printf("%s: cachesize: %d; pagesize: %d; N pages: %d\n",
progname, cachesize, pagesize, npages);
@@ -204,35 +172,38 @@ run(hits, cachesize, pagesize, npages, progname)
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
#ifdef HAVE_VXWORKS
if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
dbenv->err(dbenv, ret, "set_shm_key");
- return (ERROR_RETURN);
+ return (1);
}
#endif
/* Set the cachesize. */
if ((ret = dbenv->set_cachesize(dbenv, 0, cachesize, 0)) != 0) {
dbenv->err(dbenv, ret, "set_cachesize");
- goto err1;
+ goto err;
}
/* Open the environment. */
if ((ret = dbenv->open(
dbenv, NULL, DB_CREATE | DB_INIT_MPOOL, 0)) != 0) {
- dbenv->err(dbenv, ret, "open");
- goto err1;
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto err;
}
/* Open the file in the environment. */
- if ((ret =
- memp_fopen(dbenv, MPOOL, 0, 0, pagesize, NULL, &dbmfp)) != 0) {
- dbenv->err(dbenv, ret, "memp_fopen: %s", MPOOL);
- goto err1;
+ if ((ret = dbenv->memp_fcreate(dbenv, &mfp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->memp_fcreate: %s", MPOOL);
+ goto err;
+ }
+ if ((ret = mfp->open(mfp, MPOOL, 0, 0, pagesize)) != 0) {
+ dbenv->err(dbenv, ret, "DB_MPOOLFILE->open: %s", MPOOL);
+ goto err;
}
printf("retrieve %d random pages... ", hits);
@@ -240,41 +211,43 @@ run(hits, cachesize, pagesize, npages, progname)
srand((u_int)time(NULL));
for (cnt = 0; cnt < hits; ++cnt) {
pageno = (rand() % npages) + 1;
- if ((ret = memp_fget(dbmfp, &pageno, 0, &p)) != 0) {
+ if ((ret = mfp->get(mfp, &pageno, 0, &p)) != 0) {
dbenv->err(dbenv, ret,
"unable to retrieve page %lu", (u_long)pageno);
- goto err2;
+ goto err;
}
if (*(db_pgno_t *)p != pageno) {
dbenv->errx(dbenv,
"wrong page retrieved (%lu != %d)",
(u_long)pageno, *(int *)p);
- goto err2;
+ goto err;
}
- if ((ret = memp_fput(dbmfp, p, 0)) != 0) {
+ if ((ret = mfp->put(mfp, p, 0)) != 0) {
dbenv->err(dbenv, ret,
"unable to return page %lu", (u_long)pageno);
- goto err2;
+ goto err;
}
}
printf("successful.\n");
/* Close the file. */
- if ((ret = memp_fclose(dbmfp)) != 0) {
- dbenv->err(dbenv, ret, "memp_fclose");
- goto err1;
+ if ((ret = mfp->close(mfp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_MPOOLFILE->close");
+ goto err;
}
/* Close the pool. */
if ((ret = dbenv->close(dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
+ return (1);
}
return (0);
-err2: (void)memp_fclose(dbmfp);
-err1: (void)dbenv->close(dbenv, 0);
- return (ERROR_RETURN);
+err: if (mfp != NULL)
+ (void)mfp->close(mfp, 0);
+ if (dbenv != NULL)
+ (void)dbenv->close(dbenv, 0);
+ return (1);
}
diff --git a/bdb/examples_c/ex_repquote/ex_repquote.h b/bdb/examples_c/ex_repquote/ex_repquote.h
new file mode 100644
index 00000000000..74a0860e050
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_repquote.h
@@ -0,0 +1,69 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_repquote.h,v 1.27 2002/04/23 04:27:50 krinsky Exp $
+ */
+
+#ifndef _EX_REPQUOTE_H_
+#define _EX_REPQUOTE_H_
+
+#define SELF_EID 1
+
+typedef struct {
+ char *host; /* Host name. */
+ u_int32_t port; /* Port on which to connect to this site. */
+} repsite_t;
+
+/* Globals */
+extern int master_eid;
+extern char *myaddr;
+
+struct __member; typedef struct __member member_t;
+struct __machtab; typedef struct __machtab machtab_t;
+
+/* Arguments for the connect_all thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ const char *progname;
+ const char *home;
+ machtab_t *machtab;
+ repsite_t *sites;
+ int nsites;
+} all_args;
+
+/* Arguments for the connect_loop thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ const char * home;
+ const char * progname;
+ machtab_t *machtab;
+ int port;
+} connect_args;
+
+#define CACHESIZE (10 * 1024 * 1024)
+#define DATABASE "quote.db"
+#define SLEEPTIME 3
+
+void *connect_all __P((void *args));
+void *connect_thread __P((void *args));
+int doclient __P((DB_ENV *, const char *, machtab_t *));
+int domaster __P((DB_ENV *, const char *));
+int get_accepted_socket __P((const char *, int));
+int get_connected_socket __P((machtab_t *, const char *, const char *, int, int *, int *));
+int get_next_message __P((int, DBT *, DBT *));
+int listen_socket_init __P((const char *, int));
+int listen_socket_accept __P((machtab_t *, const char *, int, int *));
+int machtab_getinfo __P((machtab_t *, int, u_int32_t *, int *));
+int machtab_init __P((machtab_t **, int, int));
+void machtab_parm __P((machtab_t *, int *, int *, u_int32_t *));
+int machtab_rem __P((machtab_t *, int, int));
+int quote_send __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+
+#ifndef COMPQUIET
+#define COMPQUIET(x,y) x = (y)
+#endif
+
+#endif /* !_EX_REPQUOTE_H_ */
diff --git a/bdb/examples_c/ex_repquote/ex_rq_client.c b/bdb/examples_c/ex_repquote/ex_rq_client.c
new file mode 100644
index 00000000000..d382fe84fc7
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_rq_client.c
@@ -0,0 +1,250 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_rq_client.c,v 1.29 2002/01/23 15:33:19 bostic Exp $
+ */
+
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *check_loop __P((void *));
+static void *display_loop __P((void *));
+static int print_stocks __P((DBC *));
+
+typedef struct {
+ const char *progname;
+ DB_ENV *dbenv;
+} disploop_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} checkloop_args;
+
+int
+doclient(dbenv, progname, machtab)
+ DB_ENV *dbenv;
+ const char *progname;
+ machtab_t *machtab;
+{
+ checkloop_args cargs;
+ disploop_args dargs;
+ pthread_t check_thr, disp_thr;
+ void *cstatus, *dstatus;
+ int rval, s;
+
+ rval = EXIT_SUCCESS;
+ s = -1;
+
+ memset(&dargs, 0, sizeof(dargs));
+ dstatus = (void *)EXIT_FAILURE;
+
+ dargs.progname = progname;
+ dargs.dbenv = dbenv;
+ if (pthread_create(&disp_thr, NULL, display_loop, (void *)&dargs)) {
+ dbenv->err(dbenv, errno, "display_loop pthread_create failed");
+ goto err;
+ }
+
+ cargs.dbenv = dbenv;
+ cargs.machtab = machtab;
+ if (pthread_create(&check_thr, NULL, check_loop, (void *)&cargs)) {
+ dbenv->err(dbenv, errno, "check_thread pthread_create failed");
+ goto err;
+ }
+ if (pthread_join(disp_thr, &dstatus) ||
+ pthread_join(check_thr, &cstatus)) {
+ dbenv->err(dbenv, errno, "pthread_join failed");
+ goto err;
+ }
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * Our only job is to check that the master is valid and if it's not
+ * for an extended period, to trigger an election. We do two phases.
+ * If we do not have a master, first we send out a request for a master
+ * to identify itself (that would be a call to rep_start). If that fails,
+ * we trigger an election.
+ */
+static void *
+check_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT dbt;
+ checkloop_args *cargs;
+ int count, n, pri;
+ machtab_t *machtab;
+ u_int32_t timeout;
+
+ cargs = (checkloop_args *)args;
+ dbenv = cargs->dbenv;
+ machtab = cargs->machtab;
+
+#define IDLE_INTERVAL 1
+
+ count = 0;
+ while (master_eid == DB_EID_INVALID) {
+ /*
+ * Call either rep_start or rep_elect depending on if
+ * count is 0 or 1.
+ */
+
+ if (count == 0) {
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = myaddr;
+ dbt.size = strlen(myaddr) + 1;
+ (void)dbenv->rep_start(dbenv, &dbt, DB_REP_CLIENT);
+ count = 1;
+ } else {
+ machtab_parm(machtab, &n, &pri, &timeout);
+ (void)dbenv->rep_elect(dbenv,
+ n, pri, timeout, &master_eid);
+ count = 0;
+ }
+ sleep(IDLE_INTERVAL);
+ }
+
+ return ((void *)EXIT_SUCCESS);
+}
+
+static void *
+display_loop(args)
+ void *args;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ const char *progname;
+ disploop_args *dargs;
+ int ret, rval;
+
+ dargs = (disploop_args *)args;
+ progname = dargs->progname;
+ dbenv = dargs->dbenv;
+
+ dbc = NULL;
+ dbp = NULL;
+
+ for (;;) {
+ /* If we become master, shut this loop off. */
+ if (master_eid == SELF_EID)
+ break;
+
+ if (dbp == NULL) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if ((ret = dbp->open(dbp, NULL,
+ DATABASE, NULL, DB_BTREE, DB_RDONLY, 0)) != 0) {
+ if (ret == ENOENT) {
+ printf(
+ "No stock database yet available.\n");
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv,
+ ret, "DB->close");
+ goto err;
+ }
+ dbp = NULL;
+ sleep(SLEEPTIME);
+ continue;
+ }
+ dbenv->err(dbenv, ret, "DB->open");
+ goto err;
+ }
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ goto err;
+ }
+
+ if ((ret = print_stocks(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "database traversal failed");
+ goto err;
+ }
+
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ goto err;
+ }
+
+ dbc = NULL;
+
+ sleep(SLEEPTIME);
+ }
+
+ rval = EXIT_SUCCESS;
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = EXIT_FAILURE;
+ }
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ return ((void *)rval);
+}
+
+static int
+print_stocks(dbc)
+ DBC *dbc;
+{
+ DBT key, data;
+#define MAXKEYSIZE 10
+#define MAXDATASIZE 20
+ char keybuf[MAXKEYSIZE + 1], databuf[MAXDATASIZE + 1];
+ int ret;
+ u_int32_t keysize, datasize;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ printf("\tSymbol\tPrice\n");
+ printf("\t======\t=====\n");
+
+ for (ret = dbc->c_get(dbc, &key, &data, DB_FIRST);
+ ret == 0;
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) {
+ keysize = key.size > MAXKEYSIZE ? MAXKEYSIZE : key.size;
+ memcpy(keybuf, key.data, keysize);
+ keybuf[keysize] = '\0';
+
+ datasize = data.size >= MAXDATASIZE ? MAXDATASIZE : data.size;
+ memcpy(databuf, data.data, datasize);
+ databuf[datasize] = '\0';
+
+ printf("\t%s\t%s\n", keybuf, databuf);
+ }
+ printf("\n");
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
diff --git a/bdb/examples_c/ex_repquote/ex_rq_main.c b/bdb/examples_c/ex_repquote/ex_rq_main.c
new file mode 100644
index 00000000000..03819728679
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_rq_main.c
@@ -0,0 +1,303 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_rq_main.c,v 1.23 2002/08/06 05:39:03 bostic Exp $
+ */
+
+#include <sys/types.h>
+#include <pthread.h>
+
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+/*
+ * Process globals (we could put these in the machtab I suppose.
+ */
+int master_eid;
+char *myaddr;
+
+static int env_init __P((const char *, const char *, DB_ENV **, machtab_t *,
+ u_int32_t));
+static void usage __P((const char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ DBT local;
+ enum { MASTER, CLIENT, UNKNOWN } whoami;
+ all_args aa;
+ connect_args ca;
+ machtab_t *machtab;
+ pthread_t all_thr, conn_thr;
+ repsite_t site, *sitep, self, *selfp;
+ struct sigaction sigact;
+ int maxsites, nsites, ret, priority, totalsites;
+ char *c, ch;
+ const char *home, *progname;
+ void *astatus, *cstatus;
+
+ master_eid = DB_EID_INVALID;
+
+ dbenv = NULL;
+ whoami = UNKNOWN;
+ machtab = NULL;
+ selfp = sitep = NULL;
+ maxsites = nsites = ret = totalsites = 0;
+ priority = 100;
+ home = "TESTDIR";
+ progname = "ex_repquote";
+
+ while ((ch = getopt(argc, argv, "Ch:Mm:n:o:p:")) != EOF)
+ switch (ch) {
+ case 'M':
+ whoami = MASTER;
+ master_eid = SELF_EID;
+ break;
+ case 'C':
+ whoami = CLIENT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'm':
+ if ((myaddr = strdup(optarg)) == NULL) {
+ fprintf(stderr,
+ "System error %s\n", strerror(errno));
+ goto err;
+ }
+ self.host = optarg;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ self.port = atoi(c);
+ selfp = &self;
+ break;
+ case 'n':
+ totalsites = atoi(optarg);
+ break;
+ case 'o':
+ site.host = optarg;
+ site.host = strtok(site.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ site.port = atoi(c);
+ if (sitep == NULL || nsites >= maxsites) {
+ maxsites = maxsites == 0 ? 10 : 2 * maxsites;
+ if ((sitep = realloc(sitep,
+ maxsites * sizeof(repsite_t))) == NULL) {
+ fprintf(stderr, "System error %s\n",
+ strerror(errno));
+ goto err;
+ }
+ }
+ sitep[nsites++] = site;
+ break;
+ case 'p':
+ priority = atoi(optarg);
+ break;
+ case '?':
+ default:
+ usage(progname);
+ }
+
+ /* Error check command line. */
+ if (whoami == UNKNOWN) {
+ fprintf(stderr, "Must specify -M or -C.\n");
+ goto err;
+ }
+
+ if (selfp == NULL)
+ usage(progname);
+
+ if (home == NULL)
+ usage(progname);
+
+ /*
+ * Turn off SIGPIPE so that we don't kill processes when they
+ * happen to lose a connection at the wrong time.
+ */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = SIG_IGN;
+ if ((ret = sigaction(SIGPIPE, &sigact, NULL)) != 0) {
+ fprintf(stderr,
+ "Unable to turn off SIGPIPE: %s\n", strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We are hardcoding priorities here that all clients have the
+ * same priority except for a designated master who gets a higher
+ * priority.
+ */
+ if ((ret =
+ machtab_init(&machtab, priority, totalsites)) != 0)
+ goto err;
+
+ /*
+ * We can know open our environment, although we're not ready to
+ * begin replicating. However, we want to have a dbenv around
+ * so that we can send it into any of our message handlers.
+ */
+ if ((ret = env_init(progname, home, &dbenv, machtab, DB_RECOVER)) != 0)
+ goto err;
+
+ /*
+ * Now sets up comm infrastructure. There are two phases. First,
+ * we open our port for listening for incoming connections. Then
+ * we attempt to connect to every host we know about.
+ */
+
+ ca.dbenv = dbenv;
+ ca.home = home;
+ ca.progname = progname;
+ ca.machtab = machtab;
+ ca.port = selfp->port;
+ if ((ret = pthread_create(&conn_thr, NULL, connect_thread, &ca)) != 0)
+ goto err;
+
+ aa.dbenv = dbenv;
+ aa.progname = progname;
+ aa.home = home;
+ aa.machtab = machtab;
+ aa.sites = sitep;
+ aa.nsites = nsites;
+ if ((ret = pthread_create(&all_thr, NULL, connect_all, &aa)) != 0)
+ goto err;
+
+ /*
+ * We have now got the entire communication infrastructure set up.
+ * It's time to declare ourselves to be a client or master.
+ */
+ if (whoami == MASTER) {
+ if ((ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->rep_start failed");
+ goto err;
+ }
+ if ((ret = domaster(dbenv, progname)) != 0) {
+ dbenv->err(dbenv, ret, "Master failed");
+ goto err;
+ }
+ } else {
+ memset(&local, 0, sizeof(local));
+ local.data = myaddr;
+ local.size = strlen(myaddr) + 1;
+ if ((ret =
+ dbenv->rep_start(dbenv, &local, DB_REP_CLIENT)) != 0) {
+ dbenv->err(dbenv, ret, "dbenv->rep_start failed");
+ goto err;
+ }
+ /* Sleep to give ourselves a minute to find a master. */
+ sleep(5);
+ if ((ret = doclient(dbenv, progname, machtab)) != 0) {
+ dbenv->err(dbenv, ret, "Client failed");
+ goto err;
+ }
+
+ }
+
+ /* Wait on the connection threads. */
+ if (pthread_join(all_thr, &astatus) || pthread_join(conn_thr, &cstatus))
+ ret = errno;
+ if (ret == 0 &&
+ ((int)astatus != EXIT_SUCCESS || (int)cstatus != EXIT_SUCCESS))
+ ret = -1;
+
+err: if (machtab != NULL)
+ free(machtab);
+ if (dbenv != NULL)
+ (void)dbenv->close(dbenv, 0);
+ return (ret);
+}
+
+/*
+ * In this application, we specify all communication via the command line.
+ * In a real application, we would expect that information about the other
+ * sites in the system would be maintained in some sort of configuration
+ * file. The critical part of this interface is that we assume at startup
+ * that we can find out 1) what host/port we wish to listen on for connections,
+ * 2) a (possibly empty) list of other sites we should attempt to connect to.
+ * 3) whether we are a master or client (if we don't know, we should come up
+ * as a client and see if there is a master out there) and 4) what our
+ * Berkeley DB home environment is.
+ *
+ * These pieces of information are expressed by the following flags.
+ * -m host:port (required; m stands for me)
+ * -o host:port (optional; o stands for other; any number of these may be
+ * specified)
+ * -[MC] M for master/C for client
+ * -h home directory
+ * -n nsites (optional; number of sites in replication group; defaults to 0
+ * in which case we try to dynamically compute the number of sites in
+ * the replication group.)
+ * -p priority (optional: defaults to 100)
+ */
+static void
+usage(progname)
+ const char *progname;
+{
+ fprintf(stderr, "usage: %s ", progname);
+ fprintf(stderr, "[-CM][-h home][-o host:port][-m host:port]%s",
+ "[-n nsites][-p priority]\n");
+ exit(EXIT_FAILURE);
+}
+
+/* Open and configure an environment. */
+int
+env_init(progname, home, dbenvp, machtab, flags)
+ const char *progname, *home;
+ DB_ENV **dbenvp;
+ machtab_t *machtab;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *prefix;
+
+ if ((prefix = malloc(strlen(progname) + 2)) == NULL) {
+ fprintf(stderr,
+ "%s: System error: %s\n", progname, strerror(errno));
+ return (errno);
+ }
+ sprintf(prefix, "%s:", progname);
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: env create failed: %s\n",
+ progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ /* (void)dbenv->set_verbose(dbenv, DB_VERB_REPLICATION, 1); */
+ (void)dbenv->set_cachesize(dbenv, 0, CACHESIZE, 0);
+ /* (void)dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1); */
+
+ dbenv->app_private = machtab;
+ (void)dbenv->set_rep_transport(dbenv, SELF_EID, quote_send);
+
+ flags |= DB_CREATE | DB_THREAD |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+
+ ret = dbenv->open(dbenv, home, flags, 0);
+
+ *dbenvp = dbenv;
+ return (ret);
+}
diff --git a/bdb/examples_c/ex_repquote/ex_rq_master.c b/bdb/examples_c/ex_repquote/ex_rq_master.c
new file mode 100644
index 00000000000..bb3f37f55b9
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_rq_master.c
@@ -0,0 +1,165 @@
+/*-
+ * #include <pthread.h>
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_rq_master.c,v 1.22 2002/08/06 05:39:03 bostic Exp $
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *master_loop __P((void *));
+
+#define BUFSIZE 1024
+
+int
+domaster(dbenv, progname)
+ DB_ENV *dbenv;
+ const char *progname;
+{
+ int ret, t_ret;
+ pthread_t interface_thr;
+ pthread_attr_t attr;
+
+ COMPQUIET(progname, NULL);
+
+ /* Spawn off a thread to handle the basic master interface. */
+ if ((ret = pthread_attr_init(&attr)) != 0 &&
+ (ret = pthread_attr_setdetachstate(&attr,
+ PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ if ((ret = pthread_create(&interface_thr,
+ &attr, master_loop, (void *)dbenv)) != 0)
+ goto err;
+
+err: if ((t_ret = pthread_attr_destroy(&attr)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static void *
+master_loop(dbenvv)
+ void *dbenvv;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DBT key, data;
+ char buf[BUFSIZE], *rbuf;
+ int ret;
+
+ dbp = NULL;
+ txn = NULL;
+
+ dbenv = (DB_ENV *)dbenvv;
+ /*
+ * Check if the database exists and if it verifies cleanly.
+ * If it does, run with it; else recreate it and go. Note
+ * that we have to verify outside of the environment.
+ */
+#ifdef NOTDEF
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->verify(dbp, DATABASE, NULL, NULL, 0)) != 0) {
+ if ((ret = dbp->remove(dbp, DATABASE, NULL, 0)) != 0 &&
+ ret != DB_NOTFOUND && ret != ENOENT)
+ return (ret);
+#endif
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return ((void *)ret);
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+ if ((ret = dbp->open(dbp, txn, DATABASE,
+ NULL, DB_BTREE, DB_CREATE /* | DB_THREAD */, 0)) != 0)
+ goto err;
+ ret = txn->commit(txn, 0);
+ txn = NULL;
+ if (ret != 0) {
+ dbp = NULL;
+ goto err;
+ }
+
+#ifdef NOTDEF
+ } else {
+ /* Reopen in the environment. */
+ if ((ret = dbp->close(dbp, 0)) != 0)
+ return (ret);
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_UNKNOWN, DB_THREAD, 0)) != 0)
+ goto err;
+ }
+#endif
+ /*
+ * XXX
+ * It would probably be kind of cool to do this in Tcl and
+ * have a nice GUI. It would also be cool to be independently
+ * wealthy.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ for (;;) {
+ printf("QUOTESERVER> ");
+ fflush(stdout);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ (void)strtok(&buf[0], " \t\n");
+ rbuf = strtok(NULL, " \t\n");
+ if (rbuf == NULL || rbuf[0] == '\0') {
+ if (strncmp(buf, "exit", 4) == 0 ||
+ strncmp(buf, "quit", 4) == 0)
+ break;
+ dbenv->errx(dbenv, "Format: TICKER VALUE");
+ continue;
+ }
+
+ key.data = buf;
+ key.size = strlen(buf);
+
+ data.data = rbuf;
+ data.size = strlen(rbuf);
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+ switch (ret =
+ dbp->put(dbp, txn, &key, &data, 0)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err;
+ break;
+ }
+ ret = txn->commit(txn, 0);
+ txn = NULL;
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (txn != NULL)
+ (void)txn->abort(txn);
+
+ if (dbp != NULL)
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return ((void *)ret);
+}
diff --git a/bdb/examples_c/ex_repquote/ex_rq_net.c b/bdb/examples_c/ex_repquote/ex_rq_net.c
new file mode 100644
index 00000000000..1a6d26488d6
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_rq_net.c
@@ -0,0 +1,692 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_rq_net.c,v 1.37 2002/08/06 05:39:04 bostic Exp $
+ */
+
+#include <sys/types.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <netdb.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+#include <dbinc/queue.h> /* !!!: for the LIST_XXX macros. */
+
+#include "ex_repquote.h"
+
+int machtab_add __P((machtab_t *, int, u_int32_t, int, int *));
+ssize_t readn __P((int, void *, size_t));
+
+/*
+ * This file defines the communication infrastructure for the ex_repquote
+ * sample application.
+ *
+ * This application uses TCP/IP for its communication. In an N-site
+ * replication group, this means that there are N * N communication
+ * channels so that every site can communicate with every other site
+ * (this allows elections to be held when the master fails). We do
+ * not require that anyone know about all sites when the application
+ * starts up. In order to communicate, the application should know
+ * about someone, else it has no idea how to ever get in the game.
+ *
+ * Communication is handled via a number of different threads. These
+ * thread functions are implemented in rep_util.c In this file, we
+ * define the data structures that maintain the state that describes
+ * the comm infrastructure, the functions that manipulates this state
+ * and the routines used to actually send and receive data over the
+ * sockets.
+ */
+
+/*
+ * The communication infrastructure is represented by a machine table,
+ * machtab_t, which is essentially a mutex-protected linked list of members
+ * of the group. The machtab also contains the parameters that are needed
+ * to call for an election. We hardwire values for these parameters in the
+ * init function, but these could be set via some configuration setup in a
+ * real application. We reserve the machine-id 1 to refer to ourselves and
+ * make the machine-id 0 be invalid.
+ */
+
+#define MACHID_INVALID 0
+#define MACHID_SELF 1
+
+struct __machtab {
+ LIST_HEAD(__machlist, __member) machlist;
+ int nextid;
+ pthread_mutex_t mtmutex;
+ u_int32_t timeout_time;
+ int current;
+ int max;
+ int nsites;
+ int priority;
+};
+
+/* Data structure that describes each entry in the machtab. */
+struct __member {
+ u_int32_t hostaddr; /* Host IP address. */
+ int port; /* Port number. */
+ int eid; /* Application-specific machine id. */
+ int fd; /* File descriptor for the socket. */
+ LIST_ENTRY(__member) links;
+ /* For linked list of all members we know of. */
+};
+
+static int quote_send_broadcast __P((machtab_t *,
+ const DBT *, const DBT *, u_int32_t));
+static int quote_send_one __P((const DBT *, const DBT *, int, u_int32_t));
+
+/*
+ * machtab_init --
+ * Initialize the machine ID table.
+ * XXX Right now we treat the number of sites as the maximum
+ * number we've ever had on the list at one time. We probably
+ * want to make that smarter.
+ */
+int
+machtab_init(machtabp, pri, nsites)
+ machtab_t **machtabp;
+ int pri, nsites;
+{
+ int ret;
+ machtab_t *machtab;
+
+ if ((machtab = malloc(sizeof(machtab_t))) == NULL)
+ return (ENOMEM);
+
+ LIST_INIT(&machtab->machlist);
+
+ /* Reserve eid's 0 and 1. */
+ machtab->nextid = 2;
+ machtab->timeout_time = 2 * 1000000; /* 2 seconds. */
+ machtab->current = machtab->max = 0;
+ machtab->priority = pri;
+ machtab->nsites = nsites;
+
+ ret = pthread_mutex_init(&machtab->mtmutex, NULL);
+
+ *machtabp = machtab;
+
+ return (ret);
+}
+
+/*
+ * machtab_add --
+ * Add a file descriptor to the table of machines, returning
+ * a new machine ID.
+ */
+int
+machtab_add(machtab, fd, hostaddr, port, idp)
+ machtab_t *machtab;
+ int fd;
+ u_int32_t hostaddr;
+ int port, *idp;
+{
+ int ret;
+ member_t *m, *member;
+
+ if ((member = malloc(sizeof(member_t))) == NULL)
+ return (ENOMEM);
+
+ member->fd = fd;
+ member->hostaddr = hostaddr;
+ member->port = port;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (m = LIST_FIRST(&machtab->machlist);
+ m != NULL; m = LIST_NEXT(m, links))
+ if (m->hostaddr == hostaddr && m->port == port)
+ break;
+
+ if (m == NULL) {
+ member->eid = machtab->nextid++;
+ LIST_INSERT_HEAD(&machtab->machlist, member, links);
+ } else
+ member->eid = m->eid;
+
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ if (idp != NULL)
+ *idp = member->eid;
+
+ if (m == NULL) {
+ if (++machtab->current > machtab->max)
+ machtab->max = machtab->current;
+ } else {
+ free(member);
+ ret = EEXIST;
+ }
+ return (ret);
+}
+
+/*
+ * machtab_getinfo --
+ * Return host and port information for a particular machine id.
+ */
+int
+machtab_getinfo(machtab, eid, hostp, portp)
+ machtab_t *machtab;
+ int eid;
+ u_int32_t *hostp;
+ int *portp;
+{
+ int ret;
+ member_t *member;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+ *hostp = member->hostaddr;
+ *portp = member->port;
+ break;
+ }
+
+ if ((ret = pthread_mutex_unlock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ return (member != NULL ? 0 : EINVAL);
+}
+
+/*
+ * machtab_rem --
+ * Remove a mapping from the table of machines. Lock indicates
+ * whether we need to lock the machtab or not (0 indicates we do not
+ * need to lock; non-zero indicates that we do need to lock).
+ */
+int
+machtab_rem(machtab, eid, lock)
+ machtab_t *machtab;
+ int eid;
+ int lock;
+{
+ int found, ret;
+ member_t *member;
+
+ ret = 0;
+ if (lock && (ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (found = 0, member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+ found = 1;
+ LIST_REMOVE(member, links);
+ (void)close(member->fd);
+ free(member);
+ machtab->current--;
+ break;
+ }
+
+ if (LIST_FIRST(&machtab->machlist) == NULL)
+ machtab->nextid = 2;
+
+ if (lock)
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ return (ret);
+}
+
+void
+machtab_parm(machtab, nump, prip, timeoutp)
+ machtab_t *machtab;
+ int *nump, *prip;
+ u_int32_t *timeoutp;
+{
+ if (machtab->nsites == 0)
+ *nump = machtab->max;
+ else
+ *nump = machtab->nsites;
+ *prip = machtab->priority;
+ *timeoutp = machtab->timeout_time;
+}
+
+/*
+ * listen_socket_init --
+ * Initialize a socket for listening on the specified port. Returns
+ * a file descriptor for the socket, ready for an accept() call
+ * in a thread that we're happy to let block.
+ */
+int
+listen_socket_init(progname, port)
+ const char *progname;
+ int port;
+{
+ int s;
+ struct protoent *proto;
+ struct sockaddr_in si;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ return (s);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * listen_socket_accept --
+ * Accept a connection on a socket. This is essentially just a wrapper
+ * for accept(3).
+ */
+int
+listen_socket_accept(machtab, progname, s, eidp)
+ machtab_t *machtab;
+ const char *progname;
+ int s, *eidp;
+{
+ struct sockaddr_in si;
+ int si_len;
+ int host, ns, port, ret;
+
+ COMPQUIET(progname, NULL);
+
+wait: memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+ host = ntohl(si.sin_addr.s_addr);
+ port = ntohs(si.sin_port);
+ ret = machtab_add(machtab, ns, host, port, eidp);
+ if (ret == EEXIST) {
+ close(ns);
+ goto wait;
+ } else if (ret != 0)
+ goto err;
+
+ return (ns);
+
+err: close(ns);
+ return (-1);
+}
+
+/*
+ * get_accepted_socket --
+ * Listen on the specified port, and return a file descriptor
+ * when we have accepted a connection on it.
+ */
+int
+get_accepted_socket(progname, port)
+ const char *progname;
+ int port;
+{
+ struct protoent *proto;
+ struct sockaddr_in si;
+ int si_len;
+ int s, ns;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+
+ return (ns);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * get_connected_socket --
+ * Connect to the specified port of the specified remote machine,
+ * and return a file descriptor when we have accepted a connection on it.
+ * Add this connection to the machtab. If we already have a connection
+ * open to this machine, then don't create another one, return the eid
+ * of the connection (in *eidp) and set is_open to 1. Return 0.
+ */
+int
+get_connected_socket(machtab, progname, remotehost, port, is_open, eidp)
+ machtab_t *machtab;
+ const char *progname, *remotehost;
+ int port, *is_open, *eidp;
+{
+ int ret, s;
+ struct hostent *hp;
+ struct protoent *proto;
+ struct sockaddr_in si;
+ u_int32_t addr;
+
+ *is_open = 0;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((hp = gethostbyname(remotehost)) == NULL) {
+ fprintf(stderr, "%s: host not found: %s\n", progname,
+ strerror(errno));
+ return (-1);
+ }
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+ memset(&si, 0, sizeof(si));
+ memcpy((char *)&si.sin_addr, hp->h_addr, hp->h_length);
+ addr = ntohl(si.sin_addr.s_addr);
+ ret = machtab_add(machtab, s, addr, port, eidp);
+ if (ret == EEXIST) {
+ *is_open = 1;
+ close(s);
+ return (0);
+ } else if (ret != 0) {
+ close (s);
+ return (-1);
+ }
+
+ si.sin_family = AF_INET;
+ si.sin_port = htons(port);
+ if (connect(s, (struct sockaddr *)&si, sizeof(si)) < 0) {
+ fprintf(stderr, "%s: connection failed: %s",
+ progname, strerror(errno));
+ (void)machtab_rem(machtab, *eidp, 1);
+ return (-1);
+ }
+
+ return (s);
+}
+
+/*
+ * get_next_message --
+ * Read a single message from the specified file descriptor, and
+ * return it in the format used by rep functions (two DBTs and a type).
+ *
+ * This function is called in a loop by both clients and masters, and
+ * the resulting DBTs are manually dispatched to DB_ENV->rep_process_message().
+ */
+int
+get_next_message(fd, rec, control)
+ int fd;
+ DBT *rec, *control;
+{
+ size_t nr;
+ u_int32_t rsize, csize;
+ u_int8_t *recbuf, *controlbuf;
+
+ /*
+ * The protocol we use on the wire is dead simple:
+ *
+ * 4 bytes - rec->size
+ * (# read above) - rec->data
+ * 4 bytes - control->size
+ * (# read above) - control->data
+ */
+
+ /* Read rec->size. */
+ nr = readn(fd, &rsize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the record itself. */
+ if (rsize > 0) {
+ if (rec->size < rsize)
+ rec->data = realloc(rec->data, rsize);
+ recbuf = rec->data;
+ nr = readn(fd, recbuf, rsize);
+ } else {
+ if (rec->data != NULL)
+ free(rec->data);
+ rec->data = NULL;
+ }
+ rec->size = rsize;
+
+ /* Read control->size. */
+ nr = readn(fd, &csize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the control struct itself. */
+ if (csize > 0) {
+ controlbuf = control->data;
+ if (control->size < csize)
+ controlbuf = realloc(controlbuf, csize);
+ nr = readn(fd, controlbuf, csize);
+ if (nr != csize)
+ return (1);
+ } else {
+ if (control->data != NULL)
+ free(control->data);
+ controlbuf = NULL;
+ }
+ control->data = controlbuf;
+ control->size = csize;
+
+ return (0);
+}
+
+/*
+ * readn --
+ * Read a full n characters from a file descriptor, unless we get an error
+ * or EOF.
+ */
+ssize_t
+readn(fd, vptr, n)
+ int fd;
+ void *vptr;
+ size_t n;
+{
+ size_t nleft;
+ ssize_t nread;
+ char *ptr;
+
+ ptr = vptr;
+ nleft = n;
+ while (nleft > 0) {
+ if ( (nread = read(fd, ptr, nleft)) < 0) {
+ /*
+ * Call read() again on interrupted system call;
+ * on other errors, bail.
+ */
+ if (errno == EINTR)
+ nread = 0;
+ else
+ return (-1);
+ } else if (nread == 0)
+ break; /* EOF */
+
+ nleft -= nread;
+ ptr += nread;
+ }
+
+ return (n - nleft);
+}
+
+/*
+ * quote_send --
+ * The f_send function for DB_ENV->set_rep_transport.
+ */
+int
+quote_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ int fd, n, ret, t_ret;
+ machtab_t *machtab;
+ member_t *m;
+
+ machtab = (machtab_t *)dbenv->app_private;
+
+ if (eid == DB_EID_BROADCAST) {
+ /*
+ * Right now, we do not require successful transmission.
+ * I'd like to move this requiring at least one successful
+ * transmission on PERMANENT requests.
+ */
+ n = quote_send_broadcast(machtab, rec, control, flags);
+ if (n < 0 /*|| (n == 0 && LF_ISSET(DB_REP_PERMANENT))*/)
+ return (DB_REP_UNAVAIL);
+ return (0);
+ }
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ fd = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL;
+ m = LIST_NEXT(m, links)) {
+ if (m->eid == eid) {
+ fd = m->fd;
+ break;
+ }
+ }
+
+ if (fd == 0) {
+ dbenv->err(dbenv, DB_REP_UNAVAIL,
+ "quote_send: cannot find machine ID %d", eid);
+ return (DB_REP_UNAVAIL);
+ }
+
+ ret = quote_send_one(rec, control, fd, flags);
+
+ if ((t_ret = (pthread_mutex_unlock(&machtab->mtmutex))) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * quote_send_broadcast --
+ * Send a message to everybody.
+ * Returns the number of sites to which this message was successfully
+ * communicated. A -1 indicates a fatal error.
+ */
+static int
+quote_send_broadcast(machtab, rec, control, flags)
+ machtab_t *machtab;
+ const DBT *rec, *control;
+ u_int32_t flags;
+{
+ int ret, sent;
+ member_t *m, *next;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (0);
+
+ sent = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL; m = next) {
+ next = LIST_NEXT(m, links);
+ if ((ret = quote_send_one(rec, control, m->fd, flags)) != 0) {
+ (void)machtab_rem(machtab, m->eid, 0);
+ } else
+ sent++;
+ }
+
+ if (pthread_mutex_unlock(&machtab->mtmutex) != 0)
+ return (-1);
+
+ return (sent);
+}
+
+/*
+ * quote_send_one --
+ * Send a message to a single machine, given that machine's file
+ * descriptor.
+ *
+ * !!!
+ * Note that the machtab mutex should be held through this call.
+ * It doubles as a synchronizer to make sure that two threads don't
+ * intersperse writes that are part of two single messages.
+ */
+static int
+quote_send_one(rec, control, fd, flags)
+ const DBT *rec, *control;
+ int fd;
+ u_int32_t flags;
+
+{
+ int retry;
+ ssize_t bytes_left, nw;
+ u_int8_t *wp;
+
+ COMPQUIET(flags, 0);
+
+ /*
+ * The protocol is simply: write rec->size, write rec->data,
+ * write control->size, write control->data.
+ */
+ nw = write(fd, &rec->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+
+ if (rec->size > 0) {
+ nw = write(fd, rec->data, rec->size);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ if (nw != (ssize_t)rec->size) {
+ /* Try a couple of times to finish the write. */
+ wp = (u_int8_t *)rec->data + nw;
+ bytes_left = rec->size - nw;
+ for (retry = 0; bytes_left > 0 && retry < 3; retry++) {
+ nw = write(fd, wp, bytes_left);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ bytes_left -= nw;
+ wp += nw;
+ }
+ if (bytes_left > 0)
+ return (DB_REP_UNAVAIL);
+ }
+ }
+
+ nw = write(fd, &control->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+ if (control->size > 0) {
+ nw = write(fd, control->data, control->size);
+ if (nw != (ssize_t)control->size)
+ return (DB_REP_UNAVAIL);
+ }
+ return (0);
+}
diff --git a/bdb/examples_c/ex_repquote/ex_rq_util.c b/bdb/examples_c/ex_repquote/ex_rq_util.c
new file mode 100644
index 00000000000..89fd7ae485a
--- /dev/null
+++ b/bdb/examples_c/ex_repquote/ex_rq_util.c
@@ -0,0 +1,412 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: ex_rq_util.c,v 1.20 2002/08/06 05:39:04 bostic Exp $
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static int connect_site __P((DB_ENV *, machtab_t *, const char *,
+ repsite_t *, int *, int *));
+void * elect_thread __P((void *));
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} elect_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ const char *progname;
+ const char *home;
+ int fd;
+ u_int32_t eid;
+ machtab_t *tab;
+} hm_loop_args;
+
+/*
+ * This is a generic message handling loop that is used both by the
+ * master to accept messages from a client as well as by clients
+ * to communicate with other clients.
+ */
+void *
+hm_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT rec, control;
+ const char *c, *home, *progname;
+ int fd, eid, n, newm;
+ int open, pri, r, ret, t_ret, tmpid;
+ elect_args *ea;
+ hm_loop_args *ha;
+ machtab_t *tab;
+ pthread_t elect_thr;
+ repsite_t self;
+ u_int32_t timeout;
+ void *status;
+
+ ea = NULL;
+
+ ha = (hm_loop_args *)args;
+ dbenv = ha->dbenv;
+ fd = ha->fd;
+ home = ha->home;
+ eid = ha->eid;
+ progname = ha->progname;
+ tab = ha->tab;
+ free(ha);
+
+ memset(&rec, 0, sizeof(DBT));
+ memset(&control, 0, sizeof(DBT));
+
+ for (ret = 0; ret == 0;) {
+ if ((ret = get_next_message(fd, &rec, &control)) != 0) {
+ /*
+ * Close this connection; if it's the master call
+ * for an election.
+ */
+ close(fd);
+ if ((ret = machtab_rem(tab, eid, 1)) != 0)
+ break;
+
+ /*
+ * If I'm the master, I just lost a client and this
+ * thread is done.
+ */
+ if (master_eid == SELF_EID)
+ break;
+
+ /*
+ * If I was talking with the master and the master
+ * went away, I need to call an election; else I'm
+ * done.
+ */
+ if (master_eid != eid)
+ break;
+
+ master_eid = DB_EID_INVALID;
+ machtab_parm(tab, &n, &pri, &timeout);
+ if ((ret = dbenv->rep_elect(dbenv,
+ n, pri, timeout, &newm)) != 0)
+ continue;
+
+ /*
+ * Regardless of the results, the site I was talking
+ * to is gone, so I have nothing to do but exit.
+ */
+ if (newm == SELF_EID && (ret =
+ dbenv->rep_start(dbenv, NULL, DB_REP_MASTER)) == 0)
+ ret = domaster(dbenv, progname);
+ break;
+ }
+
+ tmpid = eid;
+ switch(r = dbenv->rep_process_message(dbenv,
+ &control, &rec, &tmpid)) {
+ case DB_REP_NEWSITE:
+ /*
+ * Check if we got sent connect information and if we
+ * did, if this is me or if we already have a
+ * connection to this new site. If we don't,
+ * establish a new one.
+ */
+
+ /* No connect info. */
+ if (rec.size == 0)
+ break;
+
+ /* It's me, do nothing. */
+ if (strncmp(myaddr, rec.data, rec.size) == 0)
+ break;
+
+ self.host = (char *)rec.data;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ dbenv->errx(dbenv, "Bad host specification");
+ goto out;
+ }
+ self.port = atoi(c);
+
+ /*
+ * We try to connect to the new site. If we can't,
+ * we treat it as an error since we know that the site
+ * should be up if we got a message from it (even
+ * indirectly).
+ */
+ if ((ret = connect_site(dbenv,
+ tab, progname, &self, &open, &eid)) != 0)
+ goto out;
+ break;
+ case DB_REP_HOLDELECTION:
+ if (master_eid == SELF_EID)
+ break;
+ /* Make sure that previous election has finished. */
+ if (ea != NULL) {
+ (void)pthread_join(elect_thr, &status);
+ ea = NULL;
+ }
+ if ((ea = calloc(sizeof(elect_args), 1)) == NULL) {
+ ret = errno;
+ goto out;
+ }
+ ea->dbenv = dbenv;
+ ea->machtab = tab;
+ ret = pthread_create(&elect_thr,
+ NULL, elect_thread, (void *)ea);
+ break;
+ case DB_REP_NEWMASTER:
+ /* Check if it's us. */
+ master_eid = tmpid;
+ if (tmpid == SELF_EID) {
+ if ((ret = dbenv->rep_start(dbenv,
+ NULL, DB_REP_MASTER)) != 0)
+ goto out;
+ ret = domaster(dbenv, progname);
+ }
+ break;
+ case 0:
+ break;
+ default:
+ dbenv->err(dbenv, r, "DB_ENV->rep_process_message");
+ break;
+ }
+ }
+
+out: if ((t_ret = machtab_rem(tab, eid, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't close the environment before any children exit. */
+ if (ea != NULL)
+ (void)pthread_join(elect_thr, &status);
+
+ return ((void *)ret);
+}
+
+/*
+ * This is a generic thread that spawns a thread to listen for connections
+ * on a socket and then spawns off child threads to handle each new
+ * connection.
+ */
+void *
+connect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ const char *home, *progname;
+ int fd, i, eid, ns, port, ret;
+ hm_loop_args *ha;
+ connect_args *cargs;
+ machtab_t *machtab;
+#define MAX_THREADS 25
+ pthread_t hm_thrs[MAX_THREADS];
+ pthread_attr_t attr;
+
+ ha = NULL;
+ cargs = (connect_args *)args;
+ dbenv = cargs->dbenv;
+ home = cargs->home;
+ progname = cargs->progname;
+ machtab = cargs->machtab;
+ port = cargs->port;
+
+ if ((ret = pthread_attr_init(&attr)) != 0)
+ return ((void *)EXIT_FAILURE);
+
+ if ((ret =
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ /*
+ * Loop forever, accepting connections from new machines,
+ * and forking off a thread to handle each.
+ */
+ if ((fd = listen_socket_init(progname, port)) < 0) {
+ ret = errno;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ if ((ns = listen_socket_accept(machtab,
+ progname, fd, &eid)) < 0) {
+ ret = errno;
+ goto err;
+ }
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL)
+ goto err;
+ ha->progname = progname;
+ ha->home = home;
+ ha->fd = ns;
+ ha->eid = eid;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+ if ((ret = pthread_create(&hm_thrs[i++], &attr,
+ hm_loop, (void *)ha)) != 0)
+ goto err;
+ ha = NULL;
+ }
+
+ /* If we fell out, we ended up with too many threads. */
+ dbenv->errx(dbenv, "Too many threads");
+ ret = ENOMEM;
+
+err: pthread_attr_destroy(&attr);
+ return (ret == 0 ? (void *)EXIT_SUCCESS : (void *)EXIT_FAILURE);
+}
+
+/*
+ * Open a connection to everyone that we've been told about. If we
+ * cannot open some connections, keep trying.
+ */
+void *
+connect_all(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ all_args *aa;
+ const char *home, *progname;
+ hm_loop_args *ha;
+ int failed, i, eid, nsites, open, ret, *success;
+ machtab_t *machtab;
+ repsite_t *sites;
+
+ ha = NULL;
+ aa = (all_args *)args;
+ dbenv = aa->dbenv;
+ progname = aa->progname;
+ home = aa->home;
+ machtab = aa->machtab;
+ nsites = aa->nsites;
+ sites = aa->sites;
+
+ ret = 0;
+
+ /* Some implementations of calloc are sad about alloc'ing 0 things. */
+ if ((success = calloc(nsites > 0 ? nsites : 1, sizeof(int))) == NULL) {
+ dbenv->err(dbenv, errno, "connect_all");
+ ret = 1;
+ goto err;
+ }
+
+ for (failed = nsites; failed > 0;) {
+ for (i = 0; i < nsites; i++) {
+ if (success[i])
+ continue;
+
+ ret = connect_site(dbenv, machtab,
+ progname, &sites[i], &open, &eid);
+
+ /*
+ * If we couldn't make the connection, this isn't
+ * fatal to the loop, but we have nothing further
+ * to do on this machine at the moment.
+ */
+ if (ret == DB_REP_UNAVAIL)
+ continue;
+
+ if (ret != 0)
+ goto err;
+
+ failed--;
+ success[i] = 1;
+
+ /* If the connection is already open, we're done. */
+ if (ret == 0 && open == 1)
+ continue;
+
+ }
+ sleep(1);
+ }
+
+err: free(success);
+ return (ret ? (void *)EXIT_FAILURE : (void *)EXIT_SUCCESS);
+}
+
+int
+connect_site(dbenv, machtab, progname, site, is_open, eidp)
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+ const char *progname;
+ repsite_t *site;
+ int *is_open;
+ int *eidp;
+{
+ int ret, s;
+ hm_loop_args *ha;
+ pthread_t hm_thr;
+
+ if ((s = get_connected_socket(machtab, progname,
+ site->host, site->port, is_open, eidp)) < 0)
+ return (DB_REP_UNAVAIL);
+
+ if (*is_open)
+ return (0);
+
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL) {
+ ret = errno;
+ goto err;
+ }
+
+ ha->progname = progname;
+ ha->fd = s;
+ ha->eid = *eidp;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+
+ if ((ret = pthread_create(&hm_thr, NULL,
+ hm_loop, (void *)ha)) != 0) {
+ dbenv->err(dbenv, ret, "connect site");
+ goto err1;
+ }
+
+ return (0);
+
+err1: free(ha);
+err:
+ return (ret);
+}
+
+/*
+ * We need to spawn off a new thread in which to hold an election in
+ * case we are the only thread listening on for messages.
+ */
+void *
+elect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ elect_args *eargs;
+ int n, ret, pri;
+ machtab_t *machtab;
+ u_int32_t timeout;
+
+ eargs = (elect_args *)args;
+ dbenv = eargs->dbenv;
+ machtab = eargs->machtab;
+ free(eargs);
+
+ machtab_parm(machtab, &n, &pri, &timeout);
+ while ((ret =
+ dbenv->rep_elect(dbenv, n, pri, timeout, &master_eid)) != 0)
+ sleep(2);
+
+ /* Check if it's us. */
+ if (master_eid == SELF_EID)
+ ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER);
+
+ return ((void *)(ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE));
+}
diff --git a/bdb/examples_c/ex_thread.c b/bdb/examples_c/ex_thread.c
index 93812ade764..104de37ad38 100644
--- a/bdb/examples_c/ex_thread.c
+++ b/bdb/examples_c/ex_thread.c
@@ -1,33 +1,26 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_thread.c,v 11.9 2000/05/31 15:10:04 bostic Exp $
+ * $Id: ex_thread.c,v 11.34 2002/08/15 14:37:13 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-
-#if TIME_WITH_SYS_TIME
-#include <sys/time.h>
-#include <time.h>
-#else
-#if HAVE_SYS_TIME_H
#include <sys/time.h>
-#else
-#include <time.h>
-#endif
-#endif
#include <errno.h>
#include <pthread.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
#include <unistd.h>
#endif
@@ -39,18 +32,21 @@
*/
extern int sched_yield __P((void)); /* Pthread yield function. */
-DB_ENV *db_init __P((char *));
+int db_init __P((const char *));
void *deadlock __P((void *));
-void fatal __P((char *, int, int));
+void fatal __P((const char *, int, int));
+void onint __P((int));
int main __P((int, char *[]));
int reader __P((int));
void stats __P((void));
void *trickle __P((void *));
void *tstart __P((void *));
-void usage __P((void));
+int usage __P((void));
void word __P((void));
int writer __P((int));
+int quit; /* Interrupt handling flag. */
+
struct _statistics {
int aborted; /* Write. */
int aborts; /* Read/write. */
@@ -100,11 +96,13 @@ main(argc, argv)
{
extern char *optarg;
extern int errno, optind;
+ DB_TXN *txnp;
pthread_t *tids;
int ch, i, ret;
- char *home;
+ const char *home;
void *retp;
+ txnp = NULL;
nlist = 1000;
nreaders = nwriters = 4;
home = "TESTDIR";
@@ -130,7 +128,7 @@ main(argc, argv)
break;
case '?':
default:
- usage();
+ return (usage());
}
argc -= optind;
argv += optind;
@@ -138,29 +136,41 @@ main(argc, argv)
/* Initialize the random number generator. */
srand(getpid() | time(NULL));
+ /* Register the signal handler. */
+ (void)signal(SIGINT, onint);
+
/* Build the key list. */
word();
/* Remove the previous database. */
- (void)unlink(DATABASE);
+ (void)remove(DATABASE);
/* Initialize the database environment. */
- dbenv = db_init(home);
+ if ((ret = db_init(home)) != 0)
+ return (ret);
/* Initialize the database. */
if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
(void)dbenv->close(dbenv, 0);
- return (1);
+ return (EXIT_FAILURE);
}
if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
dbp->err(dbp, ret, "set_pagesize");
goto err;
}
- if ((ret = dbp->open(dbp,
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txnp, 0)) != 0)
+ fatal("txn_begin", ret, 1);
+ if ((ret = dbp->open(dbp, txnp,
DATABASE, NULL, DB_BTREE, DB_CREATE | DB_THREAD, 0664)) != 0) {
dbp->err(dbp, ret, "%s: open", DATABASE);
goto err;
+ } else {
+ ret = txnp->commit(txnp, 0);
+ txnp = NULL;
+ if (ret != 0)
+ goto err;
}
nthreads = nreaders + nwriters + 2;
@@ -177,8 +187,9 @@ main(argc, argv)
/* Create reader/writer threads. */
for (i = 0; i < nreaders + nwriters; ++i)
- if (pthread_create(&tids[i], NULL, tstart, (void *)i))
- fatal("pthread_create", errno, 1);
+ if ((ret =
+ pthread_create(&tids[i], NULL, tstart, (void *)i)) != 0)
+ fatal("pthread_create", ret > 0 ? ret : errno, 1);
/* Create buffer pool trickle thread. */
if (pthread_create(&tids[i], NULL, trickle, &i))
@@ -193,10 +204,15 @@ main(argc, argv)
for (i = 0; i < nthreads; ++i)
(void)pthread_join(tids[i], &retp);
-err: (void)dbp->close(dbp, 0);
+ printf("Exiting\n");
+ stats();
+
+err: if (txnp != NULL)
+ (void)txnp->abort(txnp);
+ (void)dbp->close(dbp, 0);
(void)dbenv->close(dbenv, 0);
- return (0);
+ return (EXIT_SUCCESS);
}
int
@@ -219,7 +235,7 @@ reader(id)
* Read-only threads do not require transaction protection, unless
* there's a need for repeatable reads.
*/
- for (;;) {
+ while (!quit) {
/* Pick a key at random, and look it up. */
n = rand() % nlist;
key.data = list[n];
@@ -273,7 +289,7 @@ writer(id)
data.ulen = sizeof(dbuf);
data.flags = DB_DBT_USERMEM;
- for (;;) {
+ while (!quit) {
/* Pick a random key. */
n = rand() % nlist;
key.data = list[n];
@@ -286,8 +302,8 @@ writer(id)
/* Abort and retry. */
if (0) {
-retry: if ((ret = txn_abort(tid)) != 0)
- fatal("txn_abort", ret, 1);
+retry: if ((ret = tid->abort(tid)) != 0)
+ fatal("DB_TXN->abort", ret, 1);
++perf[id].aborts;
++perf[id].aborted;
}
@@ -302,7 +318,7 @@ retry: if ((ret = txn_abort(tid)) != 0)
}
/* Begin the transaction. */
- if ((ret = txn_begin(dbenv, NULL, &tid, 0)) != 0)
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0)
fatal("txn_begin", ret, 1);
/*
@@ -352,8 +368,8 @@ add: /* Add the key. 1 data item in 30 is an overflow item. */
}
commit: /* The transaction finished, commit it. */
- if ((ret = txn_commit(tid, 0)) != 0)
- fatal("txn_commit", ret, 1);
+ if ((ret = tid->commit(tid, 0)) != 0)
+ fatal("DB_TXN->commit", ret, 1);
/*
* Every time the thread completes 20 transactions, show
@@ -415,23 +431,22 @@ stats()
* db_init --
* Initialize the environment.
*/
-DB_ENV *
+int
db_init(home)
- char *home;
+ const char *home;
{
- DB_ENV *dbenv;
int ret;
- if (punish) {
- (void)db_env_set_pageyield(1);
- (void)db_env_set_func_yield(sched_yield);
- }
-
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr,
"%s: db_env_create: %s\n", progname, db_strerror(ret));
- exit (1);
+ return (EXIT_FAILURE);
+ }
+ if (punish) {
+ (void)dbenv->set_flags(dbenv, DB_YIELDCPU, 1);
+ (void)db_env_set_func_yield(sched_yield);
}
+
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
(void)dbenv->set_cachesize(dbenv, 0, 100 * 1024, 0);
@@ -442,9 +457,10 @@ db_init(home)
DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD, 0)) != 0) {
dbenv->err(dbenv, ret, NULL);
(void)dbenv->close(dbenv, 0);
- exit (1);
+ return (EXIT_FAILURE);
}
- return (dbenv);
+
+ return (0);
}
/*
@@ -478,7 +494,7 @@ tstart(arg)
/*
* deadlock --
- * Thread start function for lock_detect().
+ * Thread start function for DB_ENV->lock_detect.
*/
void *
deadlock(arg)
@@ -495,21 +511,19 @@ deadlock(arg)
t.tv_sec = 0;
t.tv_usec = 100000;
- for (;;) {
- (void)lock_detect(dbenv,
- DB_LOCK_CONFLICT, DB_LOCK_YOUNGEST, NULL);
+ while (!quit) {
+ (void)dbenv->lock_detect(dbenv, 0, DB_LOCK_YOUNGEST, NULL);
/* Check every 100ms. */
(void)select(0, NULL, NULL, NULL, &t);
}
- /* NOTREACHED */
return (NULL);
}
/*
* trickle --
- * Thread start function for memp_trickle().
+ * Thread start function for memp_trickle.
*/
void *
trickle(arg)
@@ -525,8 +539,8 @@ trickle(arg)
printf("trickle thread starting: tid: %lu\n", (u_long)tid);
fflush(stdout);
- for (;;) {
- (void)memp_trickle(dbenv, 10, &wrote);
+ while (!quit) {
+ (void)dbenv->memp_trickle(dbenv, 10, &wrote);
if (verbose) {
sprintf(buf, "trickle: wrote %d\n", wrote);
write(STDOUT_FILENO, buf, strlen(buf));
@@ -537,7 +551,6 @@ trickle(arg)
}
}
- /* NOTREACHED */
return (NULL);
}
@@ -573,7 +586,7 @@ word()
*/
void
fatal(msg, err, syserr)
- char *msg;
+ const char *msg;
int err, syserr;
{
fprintf(stderr, "%s: ", progname);
@@ -585,7 +598,7 @@ fatal(msg, err, syserr)
if (syserr)
fprintf(stderr, "%s", strerror(err));
fprintf(stderr, "\n");
- exit (1);
+ exit(EXIT_FAILURE);
/* NOTREACHED */
}
@@ -594,11 +607,23 @@ fatal(msg, err, syserr)
* usage --
* Usage message.
*/
-void
+int
usage()
{
(void)fprintf(stderr,
"usage: %s [-pv] [-h home] [-n words] [-r readers] [-w writers]\n",
progname);
- exit(1);
+ return (EXIT_FAILURE);
+}
+
+/*
+ * onint --
+ * Interrupt signal handler.
+ */
+void
+onint(signo)
+ int signo;
+{
+ signo = 0; /* Quiet compiler. */
+ quit = 1;
}
diff --git a/bdb/examples_c/ex_tpcb.c b/bdb/examples_c/ex_tpcb.c
index 2fd11510af7..017cfeb5bdd 100644
--- a/bdb/examples_c/ex_tpcb.c
+++ b/bdb/examples_c/ex_tpcb.c
@@ -1,64 +1,41 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_tpcb.c,v 11.21 2000/10/27 20:32:00 dda Exp $
+ * $Id: ex_tpcb.c,v 11.42 2002/08/06 05:39:00 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#if TIME_WITH_SYS_TIME
-#include <sys/time.h>
-#include <time.h>
-#else
-#if HAVE_SYS_TIME_H
-#include <sys/time.h>
-#else
-#include <time.h>
-#endif
-#endif
-
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
+#include <time.h>
-#ifdef DB_WIN32
-#include <sys/types.h>
-#include <sys/timeb.h>
+#ifdef _WIN32
+extern int getopt(int, char * const *, const char *);
+#else
+#include <unistd.h>
#endif
#include <db.h>
typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
-DB_ENV *db_init __P((char *, char *, int, int, int));
+DB_ENV *db_init __P((const char *, const char *, int, int, u_int32_t));
int hpopulate __P((DB *, int, int, int, int));
-int populate __P((DB *, u_int32_t, u_int32_t, int, char *));
+int populate __P((DB *, u_int32_t, u_int32_t, int, const char *));
u_int32_t random_id __P((FTYPE, int, int, int));
u_int32_t random_int __P((u_int32_t, u_int32_t));
int tp_populate __P((DB_ENV *, int, int, int, int, int));
int tp_run __P((DB_ENV *, int, int, int, int, int));
int tp_txn __P((DB_ENV *, DB *, DB *, DB *, DB *, int, int, int, int));
-#ifdef HAVE_VXWORKS
-#define ERROR_RETURN ERROR
-#define HOME "/vxtmp/vxtmp/TESTDIR"
-#define VXSHM_KEY 13
-int ex_tpcb_init __P(());
-int ex_tpcb __P(());
-#else
-#define ERROR_RETURN 1
-void invarg __P((char *, int, char *));
+int invarg __P((const char *, int, const char *));
int main __P((int, char *[]));
-void usage __P((char *));
-#endif
+int usage __P((const char *));
/*
* This program implements a basic TPC/B driver program. To create the
@@ -125,77 +102,6 @@ typedef struct _histrec {
u_int8_t pad[RECLEN - 4 * sizeof(u_int32_t)];
} histrec;
-#ifdef HAVE_VXWORKS
-int
-ex_tpcb_init()
-{
- DB_ENV *dbenv;
- int accounts, branches, ret, seed, t_ret, tellers, history, verbose;
- char *home;
- char *progname = "ex_tpcb_init"; /* Program name. */
-
- verbose = 1;
- if ((dbenv = db_init(HOME, progname, 0, 1, 0)) == NULL)
- return (ERROR_RETURN);
-
- accounts = ACCOUNTS;
- branches = BRANCHES;
- tellers = TELLERS;
- history = HISTORY;
-
- if ((ret = tp_populate(dbenv, accounts, branches, history, tellers,
- verbose)) != OK)
- fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
- if ((t_ret = dbenv->close(dbenv, 0)) != 0) {
- fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
- }
-
- return (ret == 0 ? t_ret : ret);
-}
-
-int
-ex_tpcb()
-{
- DB_ENV *dbenv;
- int accounts, branches, seed, tellers, history;
- int ch, mpool, ntxns, ret, t_ret, txn_no_sync, verbose;
- char *progname = "ex_tpcb"; /* Program name. */
-
- accounts = ACCOUNTS;
- branches = BRANCHES;
- tellers = TELLERS;
- history = HISTORY;
-
- txn_no_sync = 0;
- mpool = 0;
- ntxns = 20;
- verbose = 1;
- seed = (int)((u_int)getpid() | time(NULL));
-
- srand((u_int)seed);
-
- /* Initialize the database environment. */
- if ((dbenv = db_init(HOME, progname, mpool, 0,
- txn_no_sync ? DB_TXN_NOSYNC : 0)) == NULL)
- return (ERROR_RETURN);
-
- if (verbose)
- printf("%ld Accounts, %ld Branches, %ld Tellers, %ld History\n",
- (long)accounts, (long)branches,
- (long)tellers, (long)history);
-
- if ((ret = tp_run(dbenv, ntxns, accounts, branches, tellers, verbose))
- != OK)
- fprintf(stderr, "tp_run failed\n");
-
- if ((t_ret = dbenv->close(dbenv, 0)) != 0) {
- fprintf(stderr, "%s: %s\n", progname, db_strerror(ret));
- return (ERROR_RETURN);
- }
- return (ret == 0 ? t_ret : ret);
-}
-#else
int
main(argc, argv)
int argc;
@@ -206,29 +112,27 @@ main(argc, argv)
DB_ENV *dbenv;
int accounts, branches, seed, tellers, history;
int ch, iflag, mpool, ntxns, ret, txn_no_sync, verbose;
- char *home, *progname;
+ const char *home, *progname;
home = "TESTDIR";
progname = "ex_tpcb";
accounts = branches = history = tellers = 0;
- txn_no_sync = 0;
- mpool = ntxns = 0;
- verbose = 0;
- iflag = 0;
- seed = (int)((u_int)getpid() | time(NULL));
+ iflag = mpool = ntxns = txn_no_sync = verbose = 0;
+ seed = (int)time(NULL);
+
while ((ch = getopt(argc, argv, "a:b:c:fh:in:S:s:t:v")) != EOF)
switch (ch) {
case 'a': /* Number of account records */
if ((accounts = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 'b': /* Number of branch records */
if ((branches = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 'c': /* Cachesize in bytes */
if ((mpool = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 'f': /* Fast mode: no txn sync. */
txn_no_sync = 1;
@@ -241,26 +145,26 @@ main(argc, argv)
break;
case 'n': /* Number of transactions */
if ((ntxns = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 'S': /* Random number seed. */
if ((seed = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 's': /* Number of history records */
if ((history = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 't': /* Number of teller records */
if ((tellers = atoi(optarg)) <= 0)
- invarg(progname, ch, optarg);
+ return (invarg(progname, ch, optarg));
break;
case 'v': /* Verbose option. */
verbose = 1;
break;
case '?':
default:
- usage(progname);
+ return (usage(progname));
}
argc -= optind;
argv += optind;
@@ -270,7 +174,7 @@ main(argc, argv)
/* Initialize the database environment. */
if ((dbenv = db_init(home,
progname, mpool, iflag, txn_no_sync ? DB_TXN_NOSYNC : 0)) == NULL)
- return (1);
+ return (EXIT_FAILURE);
accounts = accounts == 0 ? ACCOUNTS : accounts;
branches = branches == 0 ? BRANCHES : branches;
@@ -284,49 +188,48 @@ main(argc, argv)
if (iflag) {
if (ntxns != 0)
- usage(progname);
+ return (usage(progname));
tp_populate(dbenv,
accounts, branches, history, tellers, verbose);
} else {
if (ntxns == 0)
- usage(progname);
+ return (usage(progname));
tp_run(dbenv, ntxns, accounts, branches, tellers, verbose);
}
if ((ret = dbenv->close(dbenv, 0)) != 0) {
fprintf(stderr, "%s: dbenv->close failed: %s\n",
progname, db_strerror(ret));
- return (1);
+ return (EXIT_FAILURE);
}
- return (0);
+ return (EXIT_SUCCESS);
}
-void
+int
invarg(progname, arg, str)
- char *progname;
+ const char *progname;
int arg;
- char *str;
+ const char *str;
{
(void)fprintf(stderr,
"%s: invalid argument for -%c: %s\n", progname, arg, str);
- exit (1);
+ return (EXIT_FAILURE);
}
-void
+int
usage(progname)
- char *progname;
+ const char *progname;
{
- char *a1, *a2;
+ const char *a1, *a2;
a1 = "[-fv] [-a accounts] [-b branches]\n";
a2 = "\t[-c cache_size] [-h home] [-S seed] [-s history] [-t tellers]";
(void)fprintf(stderr, "usage: %s -i %s %s\n", progname, a1, a2);
(void)fprintf(stderr,
" %s -n transactions %s %s\n", progname, a1, a2);
- exit(1);
+ return (EXIT_FAILURE);
}
-#endif
/*
* db_init --
@@ -334,8 +237,9 @@ usage(progname)
*/
DB_ENV *
db_init(home, prefix, cachesize, initializing, flags)
- char *home, *prefix;
- int cachesize, initializing, flags;
+ const char *home, *prefix;
+ int cachesize, initializing;
+ u_int32_t flags;
{
DB_ENV *dbenv;
u_int32_t local_flags;
@@ -347,19 +251,17 @@ db_init(home, prefix, cachesize, initializing, flags)
}
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, prefix);
-#ifdef HAVE_VXWORKS
- if ((ret = dbenv->set_shm_key(dbenv, VXSHM_KEY)) != 0) {
- dbenv->err(dbenv, ret, "set_shm_key");
- return (NULL);
- }
-#endif
(void)dbenv->set_cachesize(dbenv, 0,
cachesize == 0 ? 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+ if (flags & (DB_TXN_NOSYNC))
+ (void)dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1);
+ flags &= ~(DB_TXN_NOSYNC);
+
local_flags = flags | DB_CREATE | (initializing ? DB_INIT_MPOOL :
DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL);
if ((ret = dbenv->open(dbenv, home, local_flags, 0)) != 0) {
- dbenv->err(dbenv, ret, "DBENV->open: %s", home);
+ dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
(void)dbenv->close(dbenv, 0);
return (NULL);
}
@@ -376,7 +278,6 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
int accounts, branches, history, tellers, verbose;
{
DB *dbp;
- char dbname[100];
u_int32_t balance, idnum, oflags;
u_int32_t end_anum, end_bnum, end_tnum;
u_int32_t start_anum, start_bnum, start_tnum;
@@ -384,23 +285,18 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
idnum = BEGID;
balance = 500000;
-#ifdef HAVE_VXWORKS
- oflags = DB_CREATE;
-#else
oflags = DB_CREATE | DB_TRUNCATE;
-#endif
if ((ret = db_create(&dbp, env, 0)) != 0) {
env->err(env, ret, "db_create");
- return (ERROR_RETURN);
+ return (1);
}
(void)dbp->set_h_nelem(dbp, (u_int32_t)accounts);
- snprintf(dbname, sizeof(dbname), "account");
- if ((ret = dbp->open(dbp, dbname, NULL,
+ if ((ret = dbp->open(dbp, NULL, "account", NULL,
DB_HASH, oflags, 0644)) != 0) {
env->err(env, ret, "DB->open: account");
- return (ERROR_RETURN);
+ return (1);
}
start_anum = idnum;
@@ -409,7 +305,7 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
end_anum = idnum - 1;
if ((ret = dbp->close(dbp, 0)) != 0) {
env->err(env, ret, "DB->close: account");
- return (ERROR_RETURN);
+ return (1);
}
if (verbose)
printf("Populated accounts: %ld - %ld\n",
@@ -422,16 +318,15 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
*/
if ((ret = db_create(&dbp, env, 0)) != 0) {
env->err(env, ret, "db_create");
- return (ERROR_RETURN);
+ return (1);
}
(void)dbp->set_h_ffactor(dbp, 1);
(void)dbp->set_h_nelem(dbp, (u_int32_t)branches);
(void)dbp->set_pagesize(dbp, 512);
- snprintf(dbname, sizeof(dbname), "branch");
- if ((ret = dbp->open(dbp, dbname, NULL,
+ if ((ret = dbp->open(dbp, NULL, "branch", NULL,
DB_HASH, oflags, 0644)) != 0) {
env->err(env, ret, "DB->open: branch");
- return (ERROR_RETURN);
+ return (1);
}
start_bnum = idnum;
populate(dbp, idnum, balance, branches, "branch");
@@ -439,7 +334,7 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
end_bnum = idnum - 1;
if ((ret = dbp->close(dbp, 0)) != 0) {
env->err(env, ret, "DB->close: branch");
- return (ERROR_RETURN);
+ return (1);
}
if (verbose)
printf("Populated branches: %ld - %ld\n",
@@ -451,16 +346,15 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
*/
if ((ret = db_create(&dbp, env, 0)) != 0) {
env->err(env, ret, "db_create");
- return (ERROR_RETURN);
+ return (1);
}
(void)dbp->set_h_ffactor(dbp, 0);
(void)dbp->set_h_nelem(dbp, (u_int32_t)tellers);
(void)dbp->set_pagesize(dbp, 512);
- snprintf(dbname, sizeof(dbname), "teller");
- if ((ret = dbp->open(dbp, dbname, NULL,
+ if ((ret = dbp->open(dbp, NULL, "teller", NULL,
DB_HASH, oflags, 0644)) != 0) {
env->err(env, ret, "DB->open: teller");
- return (ERROR_RETURN);
+ return (1);
}
start_tnum = idnum;
@@ -469,7 +363,7 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
end_tnum = idnum - 1;
if ((ret = dbp->close(dbp, 0)) != 0) {
env->err(env, ret, "DB->close: teller");
- return (ERROR_RETURN);
+ return (1);
}
if (verbose)
printf("Populated tellers: %ld - %ld\n",
@@ -477,20 +371,19 @@ tp_populate(env, accounts, branches, history, tellers, verbose)
if ((ret = db_create(&dbp, env, 0)) != 0) {
env->err(env, ret, "db_create");
- return (ERROR_RETURN);
+ return (1);
}
(void)dbp->set_re_len(dbp, HISTORY_LEN);
- snprintf(dbname, sizeof(dbname), "history");
- if ((ret = dbp->open(dbp, dbname, NULL,
+ if ((ret = dbp->open(dbp, NULL, "history", NULL,
DB_RECNO, oflags, 0644)) != 0) {
env->err(env, ret, "DB->open: history");
- return (ERROR_RETURN);
+ return (1);
}
hpopulate(dbp, history, accounts, branches, tellers);
if ((ret = dbp->close(dbp, 0)) != 0) {
env->err(env, ret, "DB->close: history");
- return (ERROR_RETURN);
+ return (1);
}
return (0);
}
@@ -500,7 +393,7 @@ populate(dbp, start_id, balance, nrecs, msg)
DB *dbp;
u_int32_t start_id, balance;
int nrecs;
- char *msg;
+ const char *msg;
{
DBT kdbt, ddbt;
defrec drec;
@@ -521,7 +414,7 @@ populate(dbp, start_id, balance, nrecs, msg)
(dbp->put)(dbp, NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
dbp->err(dbp,
ret, "Failure initializing %s file\n", msg);
- return (ERROR_RETURN);
+ return (1);
}
}
return (0);
@@ -552,7 +445,7 @@ hpopulate(dbp, history, accounts, branches, tellers)
hrec.tid = random_id(TELLER, accounts, branches, tellers);
if ((ret = dbp->put(dbp, NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
dbp->err(dbp, ret, "dbp->put");
- return (ERROR_RETURN);
+ return (1);
}
}
return (0);
@@ -606,67 +499,56 @@ tp_run(dbenv, n, accounts, branches, tellers, verbose)
int n, accounts, branches, tellers, verbose;
{
DB *adb, *bdb, *hdb, *tdb;
- char dbname[100];
double gtps, itps;
int failed, ifailed, ret, txns;
time_t starttime, curtime, lasttime;
-#ifndef DB_WIN32
- pid_t pid;
-
- pid = getpid();
-#else
- int pid;
- pid = 0;
-#endif
+ adb = bdb = hdb = tdb = NULL;
+ txns = failed = 0;
/*
* Open the database files.
*/
if ((ret = db_create(&adb, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
- return (ERROR_RETURN);
+ goto err;
}
- snprintf(dbname, sizeof(dbname), "account");
- if ((ret = adb->open(adb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ if ((ret = adb->open(adb, NULL, "account", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
dbenv->err(dbenv, ret, "DB->open: account");
- return (ERROR_RETURN);
+ goto err;
}
-
if ((ret = db_create(&bdb, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
- return (ERROR_RETURN);
+ goto err;
}
- snprintf(dbname, sizeof(dbname), "branch");
- if ((ret = bdb->open(bdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
+ if ((ret = bdb->open(bdb, NULL, "branch", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
dbenv->err(dbenv, ret, "DB->open: branch");
- return (ERROR_RETURN);
+ goto err;
}
-
- if ((ret = db_create(&tdb, dbenv, 0)) != 0) {
+ if ((ret = db_create(&hdb, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
- return (ERROR_RETURN);
+ goto err;
}
- snprintf(dbname, sizeof(dbname), "teller");
- if ((ret = tdb->open(tdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
- dbenv->err(dbenv, ret, "DB->open: teller");
- return (ERROR_RETURN);
+ if ((ret = hdb->open(hdb, NULL, "history", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: history");
+ goto err;
}
-
- if ((ret = db_create(&hdb, dbenv, 0)) != 0) {
+ if ((ret = db_create(&tdb, dbenv, 0)) != 0) {
dbenv->err(dbenv, ret, "db_create");
- return (ERROR_RETURN);
+ goto err;
}
- snprintf(dbname, sizeof(dbname), "history");
- if ((ret = hdb->open(hdb, dbname, NULL, DB_UNKNOWN, 0, 0)) != 0) {
- dbenv->err(dbenv, ret, "DB->open: history");
- return (ERROR_RETURN);
+ if ((ret = tdb->open(tdb, NULL, "teller", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->open: teller");
+ goto err;
}
- txns = failed = ifailed = 0;
starttime = time(NULL);
lasttime = starttime;
- while (n-- > 0) {
+ for (ifailed = 0; n-- > 0;) {
txns++;
ret = tp_txn(dbenv, adb, bdb, tdb, hdb,
accounts, branches, tellers, verbose);
@@ -678,8 +560,7 @@ tp_run(dbenv, n, accounts, branches, tellers, verbose)
curtime = time(NULL);
gtps = (double)(txns - failed) / (curtime - starttime);
itps = (double)(5000 - ifailed) / (curtime - lasttime);
- printf("[%d] %d txns %d failed ", (int)pid,
- txns, failed);
+ printf("%d txns %d failed ", txns, failed);
printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
gtps, itps);
lasttime = curtime;
@@ -687,13 +568,17 @@ tp_run(dbenv, n, accounts, branches, tellers, verbose)
}
}
- (void)adb->close(adb, 0);
- (void)bdb->close(bdb, 0);
- (void)tdb->close(tdb, 0);
- (void)hdb->close(hdb, 0);
+err: if (adb != NULL)
+ (void)adb->close(adb, 0);
+ if (bdb != NULL)
+ (void)bdb->close(bdb, 0);
+ if (tdb != NULL)
+ (void)tdb->close(tdb, 0);
+ if (hdb != NULL)
+ (void)hdb->close(hdb, 0);
printf("%ld transactions begun %ld failed\n", (long)txns, (long)failed);
- return (0);
+ return (ret == 0 ? 0 : 1);
}
/*
@@ -711,7 +596,7 @@ tp_txn(dbenv, adb, bdb, tdb, hdb, accounts, branches, tellers, verbose)
db_recno_t key;
defrec rec;
histrec hrec;
- int account, branch, teller;
+ int account, branch, teller, ret;
t = NULL;
acurs = bcurs = tcurs = NULL;
@@ -746,7 +631,7 @@ tp_txn(dbenv, adb, bdb, tdb, hdb, accounts, branches, tellers, verbose)
d_histdbt.flags = DB_DBT_PARTIAL;
/* START TIMING */
- if (txn_begin(dbenv, NULL, &t, 0) != 0)
+ if (dbenv->txn_begin(dbenv, NULL, &t, 0) != 0)
goto err;
if (adb->cursor(adb, t, &acurs, 0) != 0 ||
@@ -789,7 +674,9 @@ tp_txn(dbenv, adb, bdb, tdb, hdb, accounts, branches, tellers, verbose)
tcurs->c_close(tcurs) != 0)
goto err;
- if (txn_commit(t, 0) != 0)
+ ret = t->commit(t, 0);
+ t = NULL;
+ if (ret != 0)
goto err;
/* END TIMING */
@@ -802,7 +689,7 @@ err: if (acurs != NULL)
if (tcurs != NULL)
(void)tcurs->c_close(tcurs);
if (t != NULL)
- (void)txn_abort(t);
+ (void)t->abort(t);
if (verbose)
printf("Transaction A=%ld B=%ld T=%ld failed\n",
diff --git a/bdb/examples_c/ex_tpcb.h b/bdb/examples_c/ex_tpcb.h
index ef90bc53234..22b79c37e80 100644
--- a/bdb/examples_c/ex_tpcb.h
+++ b/bdb/examples_c/ex_tpcb.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: ex_tpcb.h,v 11.4 2000/05/17 19:21:02 bostic Exp $
+ * $Id: ex_tpcb.h,v 11.6 2002/01/11 15:52:06 bostic Exp $
*/
#ifndef _TPCB_H_
diff --git a/bdb/examples_cxx/AccessExample.cpp b/bdb/examples_cxx/AccessExample.cpp
index ae885aa8388..921463b3a54 100644
--- a/bdb/examples_cxx/AccessExample.cpp
+++ b/bdb/examples_cxx/AccessExample.cpp
@@ -1,29 +1,26 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: AccessExample.cpp,v 11.7 2000/12/06 18:58:23 bostic Exp $
+ * $Id: AccessExample.cpp,v 11.18 2002/01/23 15:33:20 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#include <iostream.h>
+#include <iostream>
+#include <iomanip>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#ifndef _MSC_VER
-#include <unistd.h>
-#endif
-#endif
-#include <iomanip.h>
#include <db_cxx.h>
+using std::cin;
+using std::cout;
+using std::cerr;
+
class AccessExample
{
public:
@@ -38,14 +35,8 @@ private:
void operator = (const AccessExample &);
};
-static void usage(); // forward
-
-int main(int argc, char *argv[])
+int main()
{
- if (argc > 1) {
- usage();
- }
-
// Use a try block just to report any errors.
// An alternate approach to using exceptions is to
// use error models (see DbEnv::set_error_model()) so
@@ -54,20 +45,14 @@ int main(int argc, char *argv[])
try {
AccessExample app;
app.run();
- return 0;
+ return (EXIT_SUCCESS);
}
catch (DbException &dbe) {
cerr << "AccessExample: " << dbe.what() << "\n";
- return 1;
+ return (EXIT_FAILURE);
}
}
-static void usage()
-{
- cerr << "usage: AccessExample\n";
- exit(1);
-}
-
const char AccessExample::FileName[] = "access.db";
AccessExample::AccessExample()
@@ -77,7 +62,7 @@ AccessExample::AccessExample()
void AccessExample::run()
{
// Remove the previous database.
- (void)unlink(FileName);
+ (void)remove(FileName);
// Create the database object.
// There is no environment for this simple example.
@@ -87,7 +72,7 @@ void AccessExample::run()
db.set_errpfx("AccessExample");
db.set_pagesize(1024); /* Page size: 1K. */
db.set_cachesize(0, 32 * 1024, 0);
- db.open(FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
//
// Insert records into the database, where the key is the user
diff --git a/bdb/examples_cxx/BtRecExample.cpp b/bdb/examples_cxx/BtRecExample.cpp
index 98d9626b969..b56c3fe5837 100644
--- a/bdb/examples_cxx/BtRecExample.cpp
+++ b/bdb/examples_cxx/BtRecExample.cpp
@@ -1,35 +1,31 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: BtRecExample.cpp,v 11.6 2000/02/19 20:57:59 bostic Exp $
+ * $Id: BtRecExample.cpp,v 11.21 2002/01/23 15:33:20 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+
#include <errno.h>
-#include <iostream.h>
+#include <iostream>
+#include <iomanip>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
-#include <iomanip.h>
#include <db_cxx.h>
+using std::cout;
+using std::cerr;
+
#define DATABASE "access.db"
#define WORDLIST "../test/wordlist"
-void usage();
-extern "C" int getopt(int, char * const *, const char *);
-
-char *progname = "BtRecExample"; // Program name.
+const char *progname = "BtRecExample"; // Program name.
class BtRecExample
{
@@ -38,7 +34,7 @@ public:
~BtRecExample();
void run();
void stats();
- void show(char *msg, Dbt *key, Dbt *data);
+ void show(const char *msg, Dbt *key, Dbt *data);
private:
Db *dbp;
@@ -51,7 +47,7 @@ BtRecExample::BtRecExample(FILE *fp)
int ret;
// Remove the previous database.
- (void)unlink(DATABASE);
+ (void)remove(DATABASE);
dbp = new Db(NULL, 0);
@@ -60,7 +56,7 @@ BtRecExample::BtRecExample(FILE *fp)
dbp->set_pagesize(1024); // 1K page sizes.
dbp->set_flags(DB_RECNUM); // Record numbers.
- dbp->open(DATABASE, NULL, DB_BTREE, DB_CREATE, 0664);
+ dbp->open(NULL, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664);
//
// Insert records into the database, where the key is the word
@@ -107,7 +103,7 @@ void BtRecExample::stats()
{
DB_BTREE_STAT *statp;
- dbp->stat(&statp, NULL, 0);
+ dbp->stat(&statp, 0);
cout << progname << ": database contains "
<< (u_long)statp->bt_ndata << " records\n";
@@ -191,34 +187,22 @@ void BtRecExample::run()
// show --
// Display a key/data pair.
//
-void BtRecExample::show(char *msg, Dbt *key, Dbt *data)
+void BtRecExample::show(const char *msg, Dbt *key, Dbt *data)
{
cout << msg << (char *)key->get_data()
<< " : " << (char *)data->get_data() << "\n";
}
int
-main(int argc, char *argv[])
+main()
{
- extern char *optarg;
- extern int optind;
FILE *fp;
- int ch;
-
- while ((ch = getopt(argc, argv, "")) != EOF)
- switch (ch) {
- case '?':
- default:
- usage();
- }
- argc -= optind;
- argv += optind;
// Open the word database.
if ((fp = fopen(WORDLIST, "r")) == NULL) {
fprintf(stderr, "%s: open %s: %s\n",
progname, WORDLIST, db_strerror(errno));
- exit (1);
+ return (EXIT_FAILURE);
}
try {
@@ -233,15 +217,8 @@ main(int argc, char *argv[])
}
catch (DbException &dbe) {
cerr << "Exception: " << dbe.what() << "\n";
- return dbe.get_errno();
+ return (EXIT_FAILURE);
}
- return (0);
-}
-
-void
-usage()
-{
- (void)fprintf(stderr, "usage: %s\n", progname);
- exit(1);
+ return (EXIT_SUCCESS);
}
diff --git a/bdb/examples_cxx/EnvExample.cpp b/bdb/examples_cxx/EnvExample.cpp
index bef1f3d1ace..4eeb9f115e2 100644
--- a/bdb/examples_cxx/EnvExample.cpp
+++ b/bdb/examples_cxx/EnvExample.cpp
@@ -1,28 +1,27 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: EnvExample.cpp,v 11.12 2000/10/27 20:32:00 dda Exp $
+ * $Id: EnvExample.cpp,v 11.24 2002/01/11 15:52:15 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <errno.h>
-#include <iostream.h>
+#include <iostream>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
#include <db_cxx.h>
+using std::ostream;
+using std::cout;
+using std::cerr;
+
#ifdef macintosh
#define DATABASE_HOME ":database"
#define CONFIG_DATA_DIR ":database"
@@ -36,10 +35,10 @@
#endif
#endif
-void db_setup(char *, char *, ostream&);
-void db_teardown(char *, char *, ostream&);
+void db_setup(const char *, const char *, ostream&);
+void db_teardown(const char *, const char *, ostream&);
-char *progname = "EnvExample"; /* Program name. */
+const char *progname = "EnvExample"; /* Program name. */
//
// An example of a program creating/configuring a Berkeley DB environment.
@@ -54,7 +53,7 @@ main(int, char **)
// and check error returns from all methods.
//
try {
- char *data_dir, *home;
+ const char *data_dir, *home;
//
// All of the shared database files live in /home/database,
@@ -64,21 +63,21 @@ main(int, char **)
data_dir = CONFIG_DATA_DIR;
cout << "Setup env\n";
- db_setup(DATABASE_HOME, data_dir, cerr);
+ db_setup(home, data_dir, cerr);
cout << "Teardown env\n";
- db_teardown(DATABASE_HOME, data_dir, cerr);
- return 0;
+ db_teardown(home, data_dir, cerr);
+ return (EXIT_SUCCESS);
}
catch (DbException &dbe) {
- cerr << "AccessExample: " << dbe.what() << "\n";
- return 1;
+ cerr << "EnvExample: " << dbe.what() << "\n";
+ return (EXIT_FAILURE);
}
}
// Note that any of the db calls can throw DbException
void
-db_setup(char *home, char *data_dir, ostream& err_stream)
+db_setup(const char *home, const char *data_dir, ostream& err_stream)
{
//
// Create an environment object and initialize it for error
@@ -98,7 +97,7 @@ db_setup(char *home, char *data_dir, ostream& err_stream)
(void)dbenv->set_data_dir(data_dir);
// Open the environment with full transactional support.
- dbenv->open(DATABASE_HOME,
+ dbenv->open(home,
DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0);
// Do something interesting...
@@ -108,7 +107,7 @@ db_setup(char *home, char *data_dir, ostream& err_stream)
}
void
-db_teardown(char *home, char *data_dir, ostream& err_stream)
+db_teardown(const char *home, const char *data_dir, ostream& err_stream)
{
// Remove the shared database regions.
DbEnv *dbenv = new DbEnv(0);
diff --git a/bdb/examples_cxx/LockExample.cpp b/bdb/examples_cxx/LockExample.cpp
index cfab2868098..167900b9476 100644
--- a/bdb/examples_cxx/LockExample.cpp
+++ b/bdb/examples_cxx/LockExample.cpp
@@ -1,27 +1,26 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: LockExample.cpp,v 11.8 2001/01/04 14:23:30 dda Exp $
+ * $Id: LockExample.cpp,v 11.22 2002/01/11 15:52:15 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <errno.h>
-#include <iostream.h>
+#include <iostream>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
#include <db_cxx.h>
-char *progname = "LockExample"; // Program name.
+using std::cin;
+using std::cout;
+using std::cerr;
+
+const char *progname = "LockExample"; // Program name.
//
// An example of a program using DBLock and related classes.
@@ -30,18 +29,20 @@ class LockExample : public DbEnv
{
public:
void run();
+ int error_code() { return (ecode); }
LockExample(const char *home, u_int32_t maxlocks, int do_unlink);
private:
static const char FileName[];
+ int ecode;
// no need for copy and assignment
LockExample(const LockExample &);
void operator = (const LockExample &);
};
-static void usage(); // forward
+static int usage(); // forward
int
main(int argc, char *argv[])
@@ -57,44 +58,51 @@ main(int argc, char *argv[])
for (int argnum = 1; argnum < argc; ++argnum) {
if (strcmp(argv[argnum], "-h") == 0) {
if (++argnum >= argc)
- usage();
+ return (usage());
home = argv[argnum];
}
else if (strcmp(argv[argnum], "-m") == 0) {
if (++argnum >= argc)
- usage();
+ return (usage());
if ((i = atoi(argv[argnum])) <= 0)
- usage();
+ return (usage());
maxlocks = (u_int32_t)i; /* XXX: possible overflow. */
}
else if (strcmp(argv[argnum], "-u") == 0) {
do_unlink = 1;
}
else {
- usage();
+ return (usage());
}
}
try {
+ int ecode;
+
if (do_unlink) {
// Create an environment that immediately
// removes all files.
LockExample tmp(home, maxlocks, do_unlink);
+ if ((ecode = tmp.error_code()) != 0)
+ return (ecode);
}
LockExample app(home, maxlocks, do_unlink);
+ if ((ecode = app.error_code()) != 0)
+ return (ecode);
app.run();
app.close(0);
- return 0;
+ return (EXIT_SUCCESS);
}
catch (DbException &dbe) {
cerr << "LockExample: " << dbe.what() << "\n";
- return 1;
+ return (EXIT_FAILURE);
}
}
LockExample::LockExample(const char *home, u_int32_t maxlocks, int do_unlink)
: DbEnv(0)
+, ecode(0)
{
int ret;
@@ -102,7 +110,7 @@ LockExample::LockExample(const char *home, u_int32_t maxlocks, int do_unlink)
if ((ret = remove(home, DB_FORCE)) != 0) {
cerr << progname << ": DbEnv::remove: "
<< strerror(errno) << "\n";
- exit (1);
+ ecode = EXIT_FAILURE;
}
}
else {
@@ -198,7 +206,7 @@ void LockExample::run()
continue;
}
DbLock lock = locks[lockid];
- ret = lock.put(this);
+ ret = lock_put(&lock);
did_get = 0;
}
@@ -228,9 +236,9 @@ void LockExample::run()
delete locks;
}
-static void
+static int
usage()
{
cerr << "usage: LockExample [-u] [-h home] [-m maxlocks]\n";
- exit(1);
+ return (EXIT_FAILURE);
}
diff --git a/bdb/examples_cxx/MpoolExample.cpp b/bdb/examples_cxx/MpoolExample.cpp
index cf0f5f7e6a4..276cb94d66d 100644
--- a/bdb/examples_cxx/MpoolExample.cpp
+++ b/bdb/examples_cxx/MpoolExample.cpp
@@ -1,43 +1,44 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: MpoolExample.cpp,v 11.9 2000/10/27 20:32:01 dda Exp $
+ * $Id: MpoolExample.cpp,v 11.23 2002/01/11 15:52:15 bostic Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <errno.h>
#include <fcntl.h>
-#include <iostream.h>
+#include <iostream>
+#include <fstream>
#include <stdlib.h>
#include <string.h>
#include <time.h>
-#include <unistd.h>
-#endif
#include <db_cxx.h>
+using std::cout;
+using std::cerr;
+using std::ios;
+using std::ofstream;
+
#define MPOOL "mpool"
-void init(char *, int, int);
-void run(DB_ENV *, int, int, int);
+int init(const char *, int, int);
+int run(DB_ENV *, int, int, int);
-static void usage();
+static int usage();
-char *progname = "MpoolExample"; // Program name.
+const char *progname = "MpoolExample"; // Program name.
class MpoolExample : public DbEnv
{
public:
MpoolExample();
- void initdb(const char *home, int cachesize);
- void run(int hits, int pagesize, int npages);
+ int initdb(const char *home, int cachesize);
+ int run(int hits, int pagesize, int npages);
private:
static const char FileName[];
@@ -49,6 +50,7 @@ private:
int main(int argc, char *argv[])
{
+ int ret;
int cachesize = 20 * 1024;
int hits = 1000;
int npages = 50;
@@ -77,7 +79,8 @@ int main(int argc, char *argv[])
}
// Initialize the file.
- init(MPOOL, pagesize, npages);
+ if ((ret = init(MPOOL, pagesize, npages)) != 0)
+ return (ret);
try {
MpoolExample app;
@@ -87,14 +90,16 @@ int main(int argc, char *argv[])
<< "; pagesize: " << pagesize
<< "; N pages: " << npages << "\n";
- app.initdb(NULL, cachesize);
- app.run(hits, pagesize, npages);
+ if ((ret = app.initdb(NULL, cachesize)) != 0)
+ return (ret);
+ if ((ret = app.run(hits, pagesize, npages)) != 0)
+ return (ret);
cout << "MpoolExample: completed\n";
- return 0;
+ return (EXIT_SUCCESS);
}
catch (DbException &dbe) {
cerr << "MpoolExample: " << dbe.what() << "\n";
- return 1;
+ return (EXIT_FAILURE);
}
}
@@ -102,21 +107,16 @@ int main(int argc, char *argv[])
// init --
// Create a backing file.
//
-void
-init(char *file, int pagesize, int npages)
+int
+init(const char *file, int pagesize, int npages)
{
- //
// Create a file with the right number of pages, and store a page
// number on each page.
- //
- int fd;
- int flags = O_CREAT | O_RDWR | O_TRUNC;
-#ifdef DB_WIN32
- flags |= O_BINARY;
-#endif
- if ((fd = open(file, flags, 0666)) < 0) {
- cerr << "MpoolExample: " << file << ": " << strerror(errno) << "\n";
- exit(1);
+ ofstream of(file, ios::out | ios::binary);
+
+ if (of.fail()) {
+ cerr << "MpoolExample: " << file << ": open failed\n";
+ return (EXIT_FAILURE);
}
char *p = new char[pagesize];
memset(p, 0, pagesize);
@@ -124,21 +124,22 @@ init(char *file, int pagesize, int npages)
// The pages are numbered from 0.
for (int cnt = 0; cnt <= npages; ++cnt) {
*(db_pgno_t *)p = cnt;
- if (write(fd, p, pagesize) != pagesize) {
- cerr << "MpoolExample: " << file
- << ": " << strerror(errno) << "\n";
- exit(1);
+ of.write(p, pagesize);
+ if (of.fail()) {
+ cerr << "MpoolExample: " << file << ": write failed\n";
+ return (EXIT_FAILURE);
}
}
delete [] p;
+ return (EXIT_SUCCESS);
}
-static void
+static int
usage()
{
cerr << "usage: MpoolExample [-c cachesize] "
<< "[-h hits] [-n npages] [-p pagesize]\n";
- exit(1);
+ return (EXIT_FAILURE);
}
// Note: by using DB_CXX_NO_EXCEPTIONS, we get explicit error returns
@@ -150,61 +151,68 @@ MpoolExample::MpoolExample()
{
}
-void MpoolExample::initdb(const char *home, int cachesize)
+int MpoolExample::initdb(const char *home, int cachesize)
{
set_error_stream(&cerr);
set_errpfx("MpoolExample");
set_cachesize(0, cachesize, 0);
open(home, DB_CREATE | DB_INIT_MPOOL, 0);
+ return (EXIT_SUCCESS);
}
//
// run --
// Get a set of pages.
//
-void
+int
MpoolExample::run(int hits, int pagesize, int npages)
{
db_pgno_t pageno;
- int cnt;
+ int cnt, ret;
void *p;
- // Open the file in the pool.
- DbMpoolFile *dbmfp;
+ // Open the file in the environment.
+ DbMpoolFile *mfp;
- DbMpoolFile::open(this, MPOOL, 0, 0, pagesize, NULL, &dbmfp);
+ if ((ret = memp_fcreate(&mfp, 0)) != 0) {
+ cerr << "MpoolExample: memp_fcreate failed: "
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
+ }
+ mfp->open(MPOOL, 0, 0, pagesize);
cout << "retrieve " << hits << " random pages... ";
srand((unsigned int)time(NULL));
for (cnt = 0; cnt < hits; ++cnt) {
pageno = (rand() % npages) + 1;
- if ((errno = dbmfp->get(&pageno, 0, &p)) != 0) {
+ if ((ret = mfp->get(&pageno, 0, &p)) != 0) {
cerr << "MpoolExample: unable to retrieve page "
<< (unsigned long)pageno << ": "
- << strerror(errno) << "\n";
- exit(1);
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
}
if (*(db_pgno_t *)p != pageno) {
cerr << "MpoolExample: wrong page retrieved ("
<< (unsigned long)pageno << " != "
<< *(int *)p << ")\n";
- exit(1);
+ return (EXIT_FAILURE);
}
- if ((errno = dbmfp->put(p, 0)) != 0) {
+ if ((ret = mfp->put(p, 0)) != 0) {
cerr << "MpoolExample: unable to return page "
<< (unsigned long)pageno << ": "
- << strerror(errno) << "\n";
- exit(1);
+ << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
}
}
cout << "successful.\n";
// Close the pool.
- if ((errno = close(0)) != 0) {
- cerr << "MpoolExample: " << strerror(errno) << "\n";
- exit(1);
+ if ((ret = close(0)) != 0) {
+ cerr << "MpoolExample: " << strerror(ret) << "\n";
+ return (EXIT_FAILURE);
}
+ return (EXIT_SUCCESS);
}
diff --git a/bdb/examples_cxx/TpcbExample.cpp b/bdb/examples_cxx/TpcbExample.cpp
index f4ca72df8e3..a57fa6aee8a 100644
--- a/bdb/examples_cxx/TpcbExample.cpp
+++ b/bdb/examples_cxx/TpcbExample.cpp
@@ -1,54 +1,35 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: TpcbExample.cpp,v 11.14 2000/10/27 20:32:01 dda Exp $
+ * $Id: TpcbExample.cpp,v 11.30 2002/02/13 06:08:34 mjc Exp $
*/
-#include "db_config.h"
-
-#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#if TIME_WITH_SYS_TIME
-#include <sys/time.h>
-#include <time.h>
-#else
-#if HAVE_SYS_TIME_H
-#include <sys/time.h>
-#else
-#include <time.h>
-#endif
-#endif
-
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
-#endif
-
-#ifdef DB_WIN32
-#include <sys/types.h>
-#include <sys/timeb.h>
-#endif
+#include <time.h>
-#include <iostream.h>
-#include <iomanip.h>
+#include <iostream>
+#include <iomanip>
#include <db_cxx.h>
-typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
+using std::cout;
+using std::cerr;
-void errExit(int err, const char *); // show err as errno and exit
+typedef enum { ACCOUNT, BRANCH, TELLER } FTYPE;
-void invarg(int, char *);
+static int invarg(int, char *);
u_int32_t random_id(FTYPE, u_int32_t, u_int32_t, u_int32_t);
u_int32_t random_int(u_int32_t, u_int32_t);
-static void usage(void);
+static int usage(void);
int verbose;
-char *progname = "TpcbExample"; // Program name.
+const char *progname = "TpcbExample"; // Program name.
class TpcbExample : public DbEnv
{
@@ -58,7 +39,7 @@ public:
int txn(Db *, Db *, Db *, Db *,
int, int, int);
void populateHistory(Db *, int, u_int32_t, u_int32_t, u_int32_t);
- void populateTable(Db *, u_int32_t, u_int32_t, int, char *);
+ void populateTable(Db *, u_int32_t, u_int32_t, int, const char *);
// Note: the constructor creates a DbEnv(), which is
// not fully initialized until the DbEnv::open() method
@@ -139,7 +120,8 @@ main(int argc, char *argv[])
unsigned long seed;
int accounts, branches, tellers, history;
int iflag, mpool, ntxns, txn_no_sync;
- char *home, *endarg;
+ const char *home;
+ char *endarg;
home = "TESTDIR";
accounts = branches = history = tellers = 0;
@@ -147,24 +129,24 @@ main(int argc, char *argv[])
mpool = ntxns = 0;
verbose = 0;
iflag = 0;
- seed = (unsigned long)getpid();
+ seed = (unsigned long)time(NULL);
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "-a") == 0) {
// Number of account records
if ((accounts = atoi(argv[++i])) <= 0)
- invarg('a', argv[i]);
+ return (invarg('a', argv[i]));
}
else if (strcmp(argv[i], "-b") == 0) {
// Number of branch records
if ((branches = atoi(argv[++i])) <= 0)
- invarg('b', argv[i]);
+ return (invarg('b', argv[i]));
}
else if (strcmp(argv[i], "-c") == 0) {
// Cachesize in bytes
if ((mpool = atoi(argv[++i])) <= 0)
- invarg('c', argv[i]);
+ return (invarg('c', argv[i]));
}
else if (strcmp(argv[i], "-f") == 0) {
// Fast mode: no txn sync.
@@ -181,30 +163,30 @@ main(int argc, char *argv[])
else if (strcmp(argv[i], "-n") == 0) {
// Number of transactions
if ((ntxns = atoi(argv[++i])) <= 0)
- invarg('n', argv[i]);
+ return (invarg('n', argv[i]));
}
else if (strcmp(argv[i], "-S") == 0) {
// Random number seed.
seed = strtoul(argv[++i], &endarg, 0);
if (*endarg != '\0')
- invarg('S', argv[i]);
+ return (invarg('S', argv[i]));
}
else if (strcmp(argv[i], "-s") == 0) {
// Number of history records
if ((history = atoi(argv[++i])) <= 0)
- invarg('s', argv[i]);
+ return (invarg('s', argv[i]));
}
else if (strcmp(argv[i], "-t") == 0) {
// Number of teller records
if ((tellers = atoi(argv[++i])) <= 0)
- invarg('t', argv[i]);
+ return (invarg('t', argv[i]));
}
else if (strcmp(argv[i], "-v") == 0) {
// Verbose option.
verbose = 1;
}
else {
- usage();
+ return (usage());
}
}
@@ -216,9 +198,9 @@ main(int argc, char *argv[])
history = history == 0 ? HISTORY : history;
if (verbose)
- cout << (long)accounts << " Accounts "
- << (long)branches << " Branches "
- << (long)tellers << " Tellers "
+ cout << (long)accounts << " Accounts, "
+ << (long)branches << " Branches, "
+ << (long)tellers << " Tellers, "
<< (long)history << " History\n";
try {
@@ -226,43 +208,44 @@ main(int argc, char *argv[])
// Must be done in within a try block, unless you
// change the error model in the environment options.
//
- TpcbExample app(home, mpool, iflag, txn_no_sync ? DB_TXN_NOSYNC : 0);
+ TpcbExample app(home, mpool, iflag,
+ txn_no_sync ? DB_TXN_NOSYNC : 0);
if (iflag) {
if (ntxns != 0)
- usage();
+ return (usage());
app.populate(accounts, branches, history, tellers);
}
else {
if (ntxns == 0)
- usage();
+ return (usage());
app.run(ntxns, accounts, branches, tellers);
}
app.close(0);
- return 0;
+ return (EXIT_SUCCESS);
}
catch (DbException &dbe) {
cerr << "TpcbExample: " << dbe.what() << "\n";
- return 1;
+ return (EXIT_FAILURE);
}
}
-void
+static int
invarg(int arg, char *str)
{
cerr << "TpcbExample: invalid argument for -"
<< (char)arg << ": " << str << "\n";
- exit(1);
+ return (EXIT_FAILURE);
}
-static void
+static int
usage()
{
cerr << "usage: TpcbExample [-fiv] [-a accounts] [-b branches]\n"
<< " [-c cachesize] [-h home] [-n transactions ]\n"
<< " [-S seed] [-s history] [-t tellers]\n";
- exit(1);
+ return (EXIT_FAILURE);
}
TpcbExample::TpcbExample(const char *home, int cachesize,
@@ -274,7 +257,11 @@ TpcbExample::TpcbExample(const char *home, int cachesize,
set_error_stream(&cerr);
set_errpfx("TpcbExample");
(void)set_cachesize(0, cachesize == 0 ?
- 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+ 4 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ if (flags & (DB_TXN_NOSYNC))
+ set_flags(DB_TXN_NOSYNC, 1);
+ flags &= ~(DB_TXN_NOSYNC);
local_flags = flags | DB_CREATE | DB_INIT_MPOOL;
if (!initializing)
@@ -302,9 +289,10 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
dbp = new Db(this, 0);
dbp->set_h_nelem((unsigned int)accounts);
- if ((err = dbp->open("account", NULL, DB_HASH,
- DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
- errExit(err, "Open of account file failed");
+ if ((err = dbp->open(NULL, "account", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Account file create failed", err);
+ throw except;
}
start_anum = idnum;
@@ -312,7 +300,8 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
idnum += accounts;
end_anum = idnum - 1;
if ((err = dbp->close(0)) != 0) {
- errExit(err, "Account file close failed");
+ DbException except("Account file close failed", err);
+ throw except;
}
delete dbp;
if (verbose)
@@ -329,16 +318,18 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
dbp->set_h_nelem((unsigned int)branches);
dbp->set_pagesize(512);
- if ((err = dbp->open("branch", NULL, DB_HASH,
- DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
- errExit(err, "Branch file create failed");
+ if ((err = dbp->open(NULL, "branch", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Branch file create failed", err);
+ throw except;
}
start_bnum = idnum;
populateTable(dbp, idnum, balance, branches, "branch");
idnum += branches;
end_bnum = idnum - 1;
if ((err = dbp->close(0)) != 0) {
- errExit(err, "Close of branch file failed");
+ DbException except("Close of branch file failed", err);
+ throw except;
}
delete dbp;
@@ -355,9 +346,10 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
dbp->set_h_nelem((unsigned int)tellers);
dbp->set_pagesize(512);
- if ((err = dbp->open("teller", NULL, DB_HASH,
- DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
- errExit(err, "Teller file create failed");
+ if ((err = dbp->open(NULL, "teller", NULL, DB_HASH,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Teller file create failed", err);
+ throw except;
}
start_tnum = idnum;
@@ -365,7 +357,8 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
idnum += tellers;
end_tnum = idnum - 1;
if ((err = dbp->close(0)) != 0) {
- errExit(err, "Close of teller file failed");
+ DbException except("Close of teller file failed", err);
+ throw except;
}
delete dbp;
if (verbose)
@@ -374,22 +367,24 @@ TpcbExample::populate(int accounts, int branches, int history, int tellers)
dbp = new Db(this, 0);
dbp->set_re_len(HISTORY_LEN);
- if ((err = dbp->open("history", NULL, DB_RECNO,
- DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
- errExit(err, "Create of history file failed");
+ if ((err = dbp->open(NULL, "history", NULL, DB_RECNO,
+ DB_CREATE | DB_TRUNCATE, 0644)) != 0) {
+ DbException except("Create of history file failed", err);
+ throw except;
}
populateHistory(dbp, history, accounts, branches, tellers);
if ((err = dbp->close(0)) != 0) {
- errExit(err, "Close of history file failed");
+ DbException except("Close of history file failed", err);
+ throw except;
}
delete dbp;
}
void
TpcbExample::populateTable(Db *dbp,
- u_int32_t start_id, u_int32_t balance,
- int nrecs, char *msg)
+ u_int32_t start_id, u_int32_t balance,
+ int nrecs, const char *msg)
{
Defrec drec;
memset(&drec.pad[0], 1, sizeof(drec.pad));
@@ -405,14 +400,15 @@ TpcbExample::populateTable(Db *dbp,
dbp->put(NULL, &kdbt, &ddbt, DB_NOOVERWRITE)) != 0) {
cerr << "Failure initializing " << msg << " file: "
<< strerror(err) << "\n";
- exit(1);
+ DbException except("failure initializing file", err);
+ throw except;
}
}
}
void
-TpcbExample::populateHistory(Db *dbp, int nrecs,
- u_int32_t accounts, u_int32_t branches, u_int32_t tellers)
+TpcbExample::populateHistory(Db *dbp, int nrecs, u_int32_t accounts,
+ u_int32_t branches, u_int32_t tellers)
{
Histrec hrec;
memset(&hrec.pad[0], 1, sizeof(hrec.pad));
@@ -430,7 +426,9 @@ TpcbExample::populateHistory(Db *dbp, int nrecs,
int err;
key = (db_recno_t)i;
if ((err = dbp->put(NULL, &kdbt, &ddbt, DB_APPEND)) != 0) {
- errExit(err, "Failure initializing history file");
+ DbException except("failure initializing history file",
+ err);
+ throw except;
}
}
}
@@ -478,15 +476,6 @@ TpcbExample::run(int n, int accounts, int branches, int tellers)
double gtps, itps;
int failed, ifailed, ret, txns;
time_t starttime, curtime, lasttime;
-#ifndef DB_WIN32
- pid_t pid;
-
- pid = getpid();
-#else
- int pid;
-
- pid = 0;
-#endif
//
// Open the database files.
@@ -494,20 +483,32 @@ TpcbExample::run(int n, int accounts, int branches, int tellers)
int err;
adb = new Db(this, 0);
- if ((err = adb->open("account", NULL, DB_UNKNOWN, 0, 0)) != 0)
- errExit(err, "Open of account file failed");
+ if ((err = adb->open(NULL, "account", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of account file failed", err);
+ throw except;
+ }
bdb = new Db(this, 0);
- if ((err = bdb->open("branch", NULL, DB_UNKNOWN, 0, 0)) != 0)
- errExit(err, "Open of branch file failed");
+ if ((err = bdb->open(NULL, "branch", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of branch file failed", err);
+ throw except;
+ }
tdb = new Db(this, 0);
- if ((err = tdb->open("teller", NULL, DB_UNKNOWN, 0, 0)) != 0)
- errExit(err, "Open of teller file failed");
+ if ((err = tdb->open(NULL, "teller", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of teller file failed", err);
+ throw except;
+ }
hdb = new Db(this, 0);
- if ((err = hdb->open("history", NULL, DB_UNKNOWN, 0, 0)) != 0)
- errExit(err, "Open of history file failed");
+ if ((err = hdb->open(NULL, "history", NULL, DB_UNKNOWN,
+ DB_AUTO_COMMIT, 0)) != 0) {
+ DbException except("Open of history file failed", err);
+ throw except;
+ }
txns = failed = ifailed = 0;
starttime = time(NULL);
@@ -527,10 +528,9 @@ TpcbExample::run(int n, int accounts, int branches, int tellers)
// We use printf because it provides much simpler
// formatting than iostreams.
//
- printf("[%d] %d txns %d failed ", (int)pid,
- txns, failed);
+ printf("%d txns %d failed ", txns, failed);
printf("%6.2f TPS (gross) %6.2f TPS (interval)\n",
- gtps, itps);
+ gtps, itps);
lasttime = curtime;
ifailed = 0;
}
@@ -550,7 +550,7 @@ TpcbExample::run(int n, int accounts, int branches, int tellers)
//
int
TpcbExample::txn(Db *adb, Db *bdb, Db *tdb, Db *hdb,
- int accounts, int branches, int tellers)
+ int accounts, int branches, int tellers)
{
Dbc *acurs = NULL;
Dbc *bcurs = NULL;
@@ -560,7 +560,7 @@ TpcbExample::txn(Db *adb, Db *bdb, Db *tdb, Db *hdb,
db_recno_t key;
Defrec rec;
Histrec hrec;
- int account, branch, teller;
+ int account, branch, teller, ret;
Dbt d_dbt;
Dbt d_histdbt;
@@ -628,11 +628,12 @@ TpcbExample::txn(Db *adb, Db *bdb, Db *tdb, Db *hdb,
if (hdb->put(t, &k_histdbt, &d_histdbt, DB_APPEND) != 0)
goto err;
- if (acurs->close() != 0 || bcurs->close() != 0 ||
- tcurs->close() != 0)
+ if (acurs->close() != 0 || bcurs->close() != 0 || tcurs->close() != 0)
goto err;
- if (t->commit(0) != 0)
+ ret = t->commit(0);
+ t = NULL;
+ if (ret != 0)
goto err;
// END TIMING
@@ -654,13 +655,3 @@ err:
<< " T=" << (long)teller << " failed\n";
return (-1);
}
-
-void errExit(int err, const char *s)
-{
- cerr << progname << ": ";
- if (s != NULL) {
- cerr << s << ": ";
- }
- cerr << strerror(err) << "\n";
- exit(1);
-}
diff --git a/bdb/fileops/fileops.src b/bdb/fileops/fileops.src
new file mode 100644
index 00000000000..1fd39dc3c45
--- /dev/null
+++ b/bdb/fileops/fileops.src
@@ -0,0 +1,111 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: fileops.src,v 1.8 2002/04/06 18:25:55 bostic Exp $
+ */
+
+PREFIX __fop
+DBPRIVATE
+
+INCLUDE #include "db_config.h"
+INCLUDE
+INCLUDE #ifndef NO_SYSTEM_INCLUDES
+INCLUDE #include <sys/types.h>
+INCLUDE
+INCLUDE #include <ctype.h>
+INCLUDE #include <string.h>
+INCLUDE #endif
+INCLUDE
+INCLUDE #include "db_int.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
+INCLUDE #include "dbinc/fop.h"
+INCLUDE
+
+/*
+ * create -- create a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ * mode: file system mode
+ */
+BEGIN create 143
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG mode u_int32_t o
+END
+
+/*
+ * remove -- remove a file system object.
+ *
+ * name: name in the file system
+ * appname: indicates if the name needs to go through __db_appname
+ */
+BEGIN remove 144
+DBT name DBT s
+DBT fid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * write: log the writing of data into an object.
+ *
+ * name: file containing the page.
+ * appname: indicates if the name needs to go through __db_appname
+ * offset: offset in the file.
+ * page: the actual meta-data page.
+ * flag: non-0 indicates that this is a tempfile, so we needn't undo
+ * these modifications (we'll toss the file).
+ */
+BEGIN write 145
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG offset u_int32_t lu
+PGDBT page DBT s
+ARG flag u_int32_t lu
+END
+
+/*
+ * rename: move a file from one name to another.
+ * The appname value indicates if this is a path name that should be used
+ * directly (i.e., no interpretation) or if it is a pathname that should
+ * be interpreted via calls to __db_appname. The fileid is the 20-byte
+ * DB fileid of the file being renamed. We need to check it on recovery
+ * so that we don't inadvertently overwrite good files.
+ */
+BEGIN rename 146
+DBT oldname DBT s
+DBT newname DBT s
+DBT fileid DBT s
+ARG appname u_int32_t lu
+END
+
+/*
+ * File removal record. This is a DB-level log record that indicates
+ * we've just completed some form of file removal. The purpose of this
+ * log record is to logically identify the particular instance of the
+ * named file so that during recovery, in deciding if we should roll-forward
+ * a remove or a rename, we can make sure that we don't roll one forward and
+ * delete or overwrite the wrong file.
+ * real_fid: The 20-byte unique file identifier of the original file being
+ * removed.
+ * tmp_fid: The unique fid of the tmp file that is removed.
+ * name: The pre- __db_appname name of the file
+ * child: The transaction that removed or renamed the file.
+ */
+ */
+BEGIN file_remove 141
+DBT real_fid DBT s
+DBT tmp_fid DBT s
+DBT name DBT s
+ARG appname u_int32_t lu
+ARG child u_int32_t lx
+END
diff --git a/bdb/fileops/fileops_auto.c b/bdb/fileops/fileops_auto.c
new file mode 100644
index 00000000000..f38640b7480
--- /dev/null
+++ b/bdb/fileops/fileops_auto.c
@@ -0,0 +1,1371 @@
+/* Do not edit: automatically built by gen_rec.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_dispatch.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+#include "dbinc/fop.h"
+
+/*
+ * PUBLIC: int __fop_create_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, u_int32_t, u_int32_t));
+ */
+int
+__fop_create_log(dbenv, txnid, ret_lsnp, flags,
+ name, appname, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t mode;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_create;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)mode;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_create_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_create_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_create_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_create_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_create_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_create_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_create_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_create: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\tmode: %o\n", argp->mode);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_create_read __P((DB_ENV *, void *, __fop_create_args **));
+ */
+int
+__fop_create_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_create_args **argpp;
+{
+ __fop_create_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_create_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->mode = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *, u_int32_t));
+ */
+int
+__fop_remove_log(dbenv, txnid, ret_lsnp, flags,
+ name, fid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ const DBT *fid;
+ u_int32_t appname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_remove;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t) + (fid == NULL ? 0 : fid->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ if (fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &fid->size, sizeof(fid->size));
+ bp += sizeof(fid->size);
+ memcpy(bp, fid->data, fid->size);
+ bp += fid->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_remove_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_remove_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_remove_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_remove_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_remove_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_remove_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_remove: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfid: ");
+ for (i = 0; i < argp->fid.size; i++) {
+ ch = ((u_int8_t *)argp->fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_remove_read __P((DB_ENV *, void *, __fop_remove_args **));
+ */
+int
+__fop_remove_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_remove_args **argpp;
+{
+ __fop_remove_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_remove_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memset(&argp->fid, 0, sizeof(argp->fid));
+ memcpy(&argp->fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->fid.data = bp;
+ bp += argp->fid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, u_int32_t, u_int32_t, const DBT *,
+ * PUBLIC: u_int32_t));
+ */
+int
+__fop_write_log(dbenv, txnid, ret_lsnp, flags,
+ name, appname, offset, page, flag)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t offset;
+ const DBT *page;
+ u_int32_t flag;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_write;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t) + (page == NULL ? 0 : page->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)offset;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ if (page == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &page->size, sizeof(page->size));
+ bp += sizeof(page->size);
+ memcpy(bp, page->data, page->size);
+ bp += page->size;
+ }
+
+ uinttmp = (u_int32_t)flag;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_write_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_write_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_write_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_write_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_write_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_write_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_write: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\toffset: %lu\n", (u_long)argp->offset);
+ (void)printf("\tpage: ");
+ for (i = 0; i < argp->page.size; i++) {
+ ch = ((u_int8_t *)argp->page.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tflag: %lu\n", (u_long)argp->flag);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_write_read __P((DB_ENV *, void *, __fop_write_args **));
+ */
+int
+__fop_write_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_write_args **argpp;
+{
+ __fop_write_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_write_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->offset = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memset(&argp->page, 0, sizeof(argp->page));
+ memcpy(&argp->page.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->page.data = bp;
+ bp += argp->page.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->flag = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_log __P((DB_ENV *, DB_TXN *, DB_LSN *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *, const DBT *, u_int32_t));
+ */
+int
+__fop_rename_log(dbenv, txnid, ret_lsnp, flags,
+ oldname, newname, fileid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *oldname;
+ const DBT *newname;
+ const DBT *fileid;
+ u_int32_t appname;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_rename;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (oldname == NULL ? 0 : oldname->size)
+ + sizeof(u_int32_t) + (newname == NULL ? 0 : newname->size)
+ + sizeof(u_int32_t) + (fileid == NULL ? 0 : fileid->size)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (oldname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &oldname->size, sizeof(oldname->size));
+ bp += sizeof(oldname->size);
+ memcpy(bp, oldname->data, oldname->size);
+ bp += oldname->size;
+ }
+
+ if (newname == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &newname->size, sizeof(newname->size));
+ bp += sizeof(newname->size);
+ memcpy(bp, newname->data, newname->size);
+ bp += newname->size;
+ }
+
+ if (fileid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &fileid->size, sizeof(fileid->size));
+ bp += sizeof(fileid->size);
+ memcpy(bp, fileid->data, fileid->size);
+ bp += fileid->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_rename_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_rename_getpgnos __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_rename_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_print __P((DB_ENV *, DBT *, DB_LSN *,
+ * PUBLIC: db_recops, void *));
+ */
+int
+__fop_rename_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_rename_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_rename_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_rename: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\toldname: ");
+ for (i = 0; i < argp->oldname.size; i++) {
+ ch = ((u_int8_t *)argp->oldname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tnewname: ");
+ for (i = 0; i < argp->newname.size; i++) {
+ ch = ((u_int8_t *)argp->newname.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tfileid: ");
+ for (i = 0; i < argp->fileid.size; i++) {
+ ch = ((u_int8_t *)argp->fileid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_rename_read __P((DB_ENV *, void *, __fop_rename_args **));
+ */
+int
+__fop_rename_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_rename_args **argpp;
+{
+ __fop_rename_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_rename_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->oldname, 0, sizeof(argp->oldname));
+ memcpy(&argp->oldname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->oldname.data = bp;
+ bp += argp->oldname.size;
+
+ memset(&argp->newname, 0, sizeof(argp->newname));
+ memcpy(&argp->newname.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->newname.data = bp;
+ bp += argp->newname.size;
+
+ memset(&argp->fileid, 0, sizeof(argp->fileid));
+ memcpy(&argp->fileid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->fileid.data = bp;
+ bp += argp->fileid.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_log __P((DB_ENV *, DB_TXN *,
+ * PUBLIC: DB_LSN *, u_int32_t, const DBT *, const DBT *, const DBT *,
+ * PUBLIC: u_int32_t, u_int32_t));
+ */
+int
+__fop_file_remove_log(dbenv, txnid, ret_lsnp, flags,
+ real_fid, tmp_fid, name, appname, child)
+ DB_ENV *dbenv;
+ DB_TXN *txnid;
+ DB_LSN *ret_lsnp;
+ u_int32_t flags;
+ const DBT *real_fid;
+ const DBT *tmp_fid;
+ const DBT *name;
+ u_int32_t appname;
+ u_int32_t child;
+{
+ DBT logrec;
+ DB_LSN *lsnp, null_lsn;
+ u_int32_t zero;
+ u_int32_t uinttmp;
+ u_int32_t npad, rectype, txn_num;
+ int ret;
+ u_int8_t *bp;
+
+ rectype = DB___fop_file_remove;
+ npad = 0;
+
+ if (txnid == NULL) {
+ txn_num = 0;
+ null_lsn.file = 0;
+ null_lsn.offset = 0;
+ lsnp = &null_lsn;
+ } else {
+ if (TAILQ_FIRST(&txnid->kids) != NULL &&
+ (ret = __txn_activekids(dbenv, rectype, txnid)) != 0)
+ return (ret);
+ txn_num = txnid->txnid;
+ lsnp = &txnid->last_lsn;
+ }
+
+ logrec.size = sizeof(rectype) + sizeof(txn_num) + sizeof(DB_LSN)
+ + sizeof(u_int32_t) + (real_fid == NULL ? 0 : real_fid->size)
+ + sizeof(u_int32_t) + (tmp_fid == NULL ? 0 : tmp_fid->size)
+ + sizeof(u_int32_t) + (name == NULL ? 0 : name->size)
+ + sizeof(u_int32_t)
+ + sizeof(u_int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ npad =
+ ((DB_CIPHER *)dbenv->crypto_handle)->adj_size(logrec.size);
+ logrec.size += npad;
+ }
+
+ if ((ret = __os_malloc(dbenv,
+ logrec.size, &logrec.data)) != 0)
+ return (ret);
+
+ if (npad > 0)
+ memset((u_int8_t *)logrec.data + logrec.size - npad, 0, npad);
+
+ bp = logrec.data;
+
+ memcpy(bp, &rectype, sizeof(rectype));
+ bp += sizeof(rectype);
+
+ memcpy(bp, &txn_num, sizeof(txn_num));
+ bp += sizeof(txn_num);
+
+ memcpy(bp, lsnp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ if (real_fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &real_fid->size, sizeof(real_fid->size));
+ bp += sizeof(real_fid->size);
+ memcpy(bp, real_fid->data, real_fid->size);
+ bp += real_fid->size;
+ }
+
+ if (tmp_fid == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &tmp_fid->size, sizeof(tmp_fid->size));
+ bp += sizeof(tmp_fid->size);
+ memcpy(bp, tmp_fid->data, tmp_fid->size);
+ bp += tmp_fid->size;
+ }
+
+ if (name == NULL) {
+ zero = 0;
+ memcpy(bp, &zero, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ } else {
+ memcpy(bp, &name->size, sizeof(name->size));
+ bp += sizeof(name->size);
+ memcpy(bp, name->data, name->size);
+ bp += name->size;
+ }
+
+ uinttmp = (u_int32_t)appname;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ uinttmp = (u_int32_t)child;
+ memcpy(bp, &uinttmp, sizeof(uinttmp));
+ bp += sizeof(uinttmp);
+
+ DB_ASSERT((u_int32_t)(bp - (u_int8_t *)logrec.data) <= logrec.size);
+ ret = dbenv->log_put(dbenv,
+ ret_lsnp, (DBT *)&logrec, flags | DB_NOCOPY);
+ if (txnid != NULL && ret == 0)
+ txnid->last_lsn = *ret_lsnp;
+#ifdef LOG_DIAGNOSTIC
+ if (ret != 0)
+ (void)__fop_file_remove_print(dbenv,
+ (DBT *)&logrec, ret_lsnp, NULL, NULL);
+#endif
+ __os_free(dbenv, logrec.data);
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_getpgnos __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_getpgnos(dbenv, rec, lsnp, notused1, summary)
+ DB_ENV *dbenv;
+ DBT *rec;
+ DB_LSN *lsnp;
+ db_recops notused1;
+ void *summary;
+{
+ TXN_RECS *t;
+ int ret;
+ COMPQUIET(rec, NULL);
+ COMPQUIET(notused1, DB_TXN_ABORT);
+
+ t = (TXN_RECS *)summary;
+
+ if ((ret = __rep_check_alloc(dbenv, t, 1)) != 0)
+ return (ret);
+
+ t->array[t->npages].flags = LSN_PAGE_NOLOCK;
+ t->array[t->npages].lsn = *lsnp;
+ t->array[t->npages].fid = DB_LOGFILEID_INVALID;
+ memset(&t->array[t->npages].pgdesc, 0,
+ sizeof(t->array[t->npages].pgdesc));
+
+ t->npages++;
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_print __P((DB_ENV *, DBT *,
+ * PUBLIC: DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_print(dbenv, dbtp, lsnp, notused2, notused3)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops notused2;
+ void *notused3;
+{
+ __fop_file_remove_args *argp;
+ u_int32_t i;
+ int ch;
+ int ret;
+
+ notused2 = DB_TXN_ABORT;
+ notused3 = NULL;
+
+ if ((ret = __fop_file_remove_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+ (void)printf(
+ "[%lu][%lu]__fop_file_remove: rec: %lu txnid %lx prevlsn [%lu][%lu]\n",
+ (u_long)lsnp->file,
+ (u_long)lsnp->offset,
+ (u_long)argp->type,
+ (u_long)argp->txnid->txnid,
+ (u_long)argp->prev_lsn.file,
+ (u_long)argp->prev_lsn.offset);
+ (void)printf("\treal_fid: ");
+ for (i = 0; i < argp->real_fid.size; i++) {
+ ch = ((u_int8_t *)argp->real_fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\ttmp_fid: ");
+ for (i = 0; i < argp->tmp_fid.size; i++) {
+ ch = ((u_int8_t *)argp->tmp_fid.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tname: ");
+ for (i = 0; i < argp->name.size; i++) {
+ ch = ((u_int8_t *)argp->name.data)[i];
+ printf(isprint(ch) || ch == 0x0a ? "%c" : "%#x ", ch);
+ }
+ (void)printf("\n");
+ (void)printf("\tappname: %lu\n", (u_long)argp->appname);
+ (void)printf("\tchild: 0x%lx\n", (u_long)argp->child);
+ (void)printf("\n");
+ __os_free(dbenv, argp);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_file_remove_read __P((DB_ENV *, void *,
+ * PUBLIC: __fop_file_remove_args **));
+ */
+int
+__fop_file_remove_read(dbenv, recbuf, argpp)
+ DB_ENV *dbenv;
+ void *recbuf;
+ __fop_file_remove_args **argpp;
+{
+ __fop_file_remove_args *argp;
+ u_int32_t uinttmp;
+ u_int8_t *bp;
+ int ret;
+
+ if ((ret = __os_malloc(dbenv,
+ sizeof(__fop_file_remove_args) + sizeof(DB_TXN), &argp)) != 0)
+ return (ret);
+
+ argp->txnid = (DB_TXN *)&argp[1];
+
+ bp = recbuf;
+ memcpy(&argp->type, bp, sizeof(argp->type));
+ bp += sizeof(argp->type);
+
+ memcpy(&argp->txnid->txnid, bp, sizeof(argp->txnid->txnid));
+ bp += sizeof(argp->txnid->txnid);
+
+ memcpy(&argp->prev_lsn, bp, sizeof(DB_LSN));
+ bp += sizeof(DB_LSN);
+
+ memset(&argp->real_fid, 0, sizeof(argp->real_fid));
+ memcpy(&argp->real_fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->real_fid.data = bp;
+ bp += argp->real_fid.size;
+
+ memset(&argp->tmp_fid, 0, sizeof(argp->tmp_fid));
+ memcpy(&argp->tmp_fid.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->tmp_fid.data = bp;
+ bp += argp->tmp_fid.size;
+
+ memset(&argp->name, 0, sizeof(argp->name));
+ memcpy(&argp->name.size, bp, sizeof(u_int32_t));
+ bp += sizeof(u_int32_t);
+ argp->name.data = bp;
+ bp += argp->name.size;
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->appname = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ memcpy(&uinttmp, bp, sizeof(uinttmp));
+ argp->child = (u_int32_t)uinttmp;
+ bp += sizeof(uinttmp);
+
+ *argpp = argp;
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_print __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_print(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_print, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_print, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_print, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_print, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_print, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_getpgnos __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_getpgnos(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_getpgnos, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_getpgnos, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_getpgnos, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_getpgnos, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_getpgnos, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
+
+/*
+ * PUBLIC: int __fop_init_recover __P((DB_ENV *, int (***)(DB_ENV *,
+ * PUBLIC: DBT *, DB_LSN *, db_recops, void *), size_t *));
+ */
+int
+__fop_init_recover(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_create_recover, DB___fop_create)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_remove_recover, DB___fop_remove)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_write_recover, DB___fop_write)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_rename_recover, DB___fop_rename)) != 0)
+ return (ret);
+ if ((ret = __db_add_recovery(dbenv, dtabp, dtabsizep,
+ __fop_file_remove_recover, DB___fop_file_remove)) != 0)
+ return (ret);
+ return (0);
+}
diff --git a/bdb/fileops/fop_basic.c b/bdb/fileops/fop_basic.c
new file mode 100644
index 00000000000..08160ab2e1a
--- /dev/null
+++ b/bdb/fileops/fop_basic.c
@@ -0,0 +1,275 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_basic.c,v 1.23 2002/08/11 02:11:23 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+
+/*
+ * This file implements the basic file-level operations. This code
+ * ought to be fairly independent of DB, other than through its
+ * error-reporting mechanism.
+ */
+
+/*
+ * __fop_create --
+ * Create a (transactionally protected) file system object. This is used
+ * to create DB files now, potentially blobs, queue extents and anything
+ * else you wish to store in a file system object.
+ *
+ * PUBLIC: int __fop_create __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_FH *, const char *, APPNAME, int));
+ */
+int
+__fop_create(dbenv, txn, fhp, name, appname, mode)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+ APPNAME appname;
+ int mode;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data;
+ char *real_name;
+ int do_close, ret;
+
+ ret = 0;
+ real_name = NULL;
+
+ if (fhp != NULL)
+ do_close = 0;
+ else {
+ fhp = &fh;
+ memset(fhp, 0, sizeof(fh));
+ do_close = 1;
+ }
+
+ if (mode == 0)
+ mode = __db_omode("rw----");
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = (void *)name;
+ data.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_create_log(dbenv,
+ txn, &lsn, DB_FLUSH, &data, (u_int32_t)appname, mode)) != 0)
+ goto err;
+ }
+
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_POSTLOG, ret, name);
+
+ ret =
+ __os_open(dbenv, real_name, DB_OSO_CREATE | DB_OSO_EXCL, mode, fhp);
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (do_close && F_ISSET(fhp, DB_FH_VALID))
+ __os_closehandle(dbenv, fhp);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_remove --
+ * Remove a file system object.
+ *
+ * PUBLIC: int __fop_remove __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, u_int8_t *, const char *, APPNAME));
+ */
+int
+__fop_remove(dbenv, txn, fileid, name, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ u_int8_t *fileid;
+ const char *name;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fdbt, ndbt;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (txn == NULL) {
+ if (fileid != NULL && (ret = dbenv->memp_nameop(
+ dbenv, fileid, NULL, real_name, NULL)) != 0)
+ goto err;
+ } else {
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&fdbt, 0, sizeof(ndbt));
+ fdbt.data = fileid;
+ fdbt.size = fileid == NULL ? 0 : DB_FILE_ID_LEN;
+ memset(&ndbt, 0, sizeof(ndbt));
+ ndbt.data = (void *)name;
+ ndbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_remove_log(dbenv,
+ txn, &lsn, 0, &ndbt, &fdbt, appname)) != 0)
+ goto err;
+ }
+ ret = __txn_remevent(dbenv, txn, real_name, fileid);
+ }
+
+err: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_write
+ *
+ * Write "size" bytes from "buf" to file "name" beginning at offset "off."
+ * If the file is open, supply a handle in fhp. Istmp indicate if this is
+ * an operation that needs to be undone in the face of failure (i.e., if
+ * this is a write to a temporary file, we're simply going to remove the
+ * file, so don't worry about undoing the write).
+ *
+ * Currently, we *only* use this with istmp true. If we need more general
+ * handling, then we'll have to zero out regions on abort (and possibly
+ * log the before image of the data in the log record).
+ *
+ * PUBLIC: int __fop_write __P((DB_ENV *, DB_TXN *, const char *, APPNAME,
+ * PUBLIC: DB_FH *, u_int32_t, u_int8_t *, u_int32_t, u_int32_t));
+ */
+int
+__fop_write(dbenv, txn, name, appname, fhp, off, buf, size, istmp)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ APPNAME appname;
+ DB_FH *fhp;
+ u_int32_t off;
+ u_int8_t *buf;
+ u_int32_t size, istmp;
+{
+ DB_FH fh;
+ DB_LSN lsn;
+ DBT data, namedbt;
+ char *real_name;
+ int ret, t_ret, we_opened;
+ size_t nbytes;
+
+ ret = 0;
+ we_opened = 0;
+ real_name = NULL;
+
+ if ((ret =
+ __db_appname(dbenv, appname, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&data, 0, sizeof(data));
+ data.data = buf;
+ data.size = size;
+ memset(&namedbt, 0, sizeof(namedbt));
+ namedbt.data = (void *)name;
+ namedbt.size = (u_int32_t)strlen(name) + 1;
+ if ((ret = __fop_write_log(dbenv,
+ txn, &lsn, 0, &namedbt, appname, off, &data, istmp)) != 0)
+ goto err;
+ }
+
+ if (fhp == NULL) {
+ /* File isn't open; we need to reopen it. */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, &fh)) != 0)
+ goto err;
+ fhp = &fh;
+ we_opened = 1;
+ } else
+ we_opened = 0;
+
+ /* Seek to offset. */
+ if ((ret = __os_seek(dbenv, fhp, 0, 0, off, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ /* Now do the write. */
+ if ((ret = __os_write(dbenv, fhp, buf, size, &nbytes)) != 0)
+ goto err;
+
+err: if (we_opened)
+ if ((t_ret = __os_closehandle(dbenv, fhp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ return (ret);
+}
+
+/*
+ * __fop_rename --
+ * Change a file's name.
+ *
+ * PUBLIC: int __fop_rename __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int8_t *, APPNAME));
+ */
+int
+__fop_rename(dbenv, txn, oldname, newname, fid, appname)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *oldname;
+ const char *newname;
+ u_int8_t *fid;
+ APPNAME appname;
+{
+ DB_LSN lsn;
+ DBT fiddbt, new, old;
+ int ret;
+ char *n, *o;
+
+ if ((ret = __db_appname(dbenv, appname, oldname, 0, NULL, &o)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv, appname, newname, 0, NULL, &n)) != 0)
+ goto err;
+
+ if (DBENV_LOGGING(dbenv)) {
+ memset(&old, 0, sizeof(old));
+ memset(&new, 0, sizeof(new));
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ old.data = (void *)oldname;
+ old.size = (u_int32_t)strlen(oldname) + 1;
+ new.data = (void *)newname;
+ new.size = (u_int32_t)strlen(newname) + 1;
+ fiddbt.data = fid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ if ((ret = __fop_rename_log(dbenv, txn, &lsn,
+ DB_FLUSH, &old, &new, &fiddbt, (u_int32_t)appname)) != 0)
+ goto err;
+ }
+
+ ret = dbenv->memp_nameop(dbenv, fid, newname, o, n);
+
+err: if (o != oldname)
+ __os_free(dbenv, o);
+ if (n != newname)
+ __os_free(dbenv, n);
+ return (ret);
+}
diff --git a/bdb/fileops/fop_rec.c b/bdb/fileops/fop_rec.c
new file mode 100644
index 00000000000..67720e01d13
--- /dev/null
+++ b/bdb/fileops/fop_rec.c
@@ -0,0 +1,308 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_rec.c,v 1.18 2002/08/14 20:27:01 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/fop.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+/*
+ * __fop_create_recover --
+ * Recovery function for create.
+ *
+ * PUBLIC: int __fop_create_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_create_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ DB_FH fh;
+ __fop_create_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_create_print);
+ REC_NOOP_INTRO(__fop_create_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_UNDO(op))
+ (void)__os_unlink(dbenv, real_name);
+ else if (DB_REDO(op))
+ if ((ret = __os_open(dbenv, real_name,
+ DB_OSO_CREATE | DB_OSO_EXCL, argp->mode, &fh)) == 0)
+ __os_closehandle(dbenv, &fh);
+
+ *lsnp = argp->prev_lsn;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_remove_recover --
+ * Recovery function for remove.
+ *
+ * PUBLIC: int __fop_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_remove_args *argp;
+ char *real_name;
+ int ret;
+
+ real_name = NULL;
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_remove_print);
+ REC_NOOP_INTRO(__fop_remove_read);
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ if (DB_REDO(op) && (ret = dbenv->memp_nameop(dbenv,
+ (u_int8_t *)argp->fid.data, NULL, real_name, NULL)) != 0)
+ goto out;
+
+ *lsnp = argp->prev_lsn;
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_write_recover --
+ * Recovery function for writechunk.
+ *
+ * PUBLIC: int __fop_write_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_write_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_write_args *argp;
+ int ret;
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_write_print);
+ REC_NOOP_INTRO(__fop_write_read);
+
+ if (DB_UNDO(op))
+ DB_ASSERT(argp->flag != 0);
+ else if (DB_REDO(op))
+ ret = __fop_write(dbenv,
+ argp->txnid, argp->name.data, argp->appname, NULL,
+ argp->offset, argp->page.data, argp->page.size, argp->flag);
+
+ *lsnp = argp->prev_lsn;
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_rename_recover --
+ * Recovery function for rename.
+ *
+ * PUBLIC: int __fop_rename_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_rename_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_rename_args *argp;
+ DBMETA *meta;
+ char *real_new, *real_old, *src;
+ int ret;
+ u_int8_t *fileid, mbuf[DBMETASIZE];
+
+ real_new = NULL;
+ real_old = NULL;
+ ret = 0;
+ meta = (DBMETA *)&mbuf[0];
+
+ COMPQUIET(info, NULL);
+ REC_PRINT(__fop_rename_print);
+ REC_NOOP_INTRO(__fop_rename_read);
+ fileid = argp->fileid.data;
+
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->newname.data, 0, NULL, &real_new)) != 0)
+ goto out;
+ if ((ret = __db_appname(dbenv, (APPNAME)argp->appname,
+ (const char *)argp->oldname.data, 0, NULL, &real_old)) != 0)
+ goto out;
+
+ /*
+ * Verify that we are manipulating the correct file. We should always
+ * be OK on an ABORT or an APPLY, but during recovery, we have to
+ * check.
+ */
+ if (op != DB_TXN_ABORT && op != DB_TXN_APPLY) {
+ src = DB_UNDO(op) ? real_new : real_old;
+ /*
+ * Interpret any error as meaning that the file either doesn't
+ * exist, doesn't have a meta-data page, or is in some other
+ * way, shape or form, incorrect, so that we should not restore
+ * it.
+ */
+ if (__fop_read_meta(
+ dbenv, src, mbuf, DBMETASIZE, NULL, 1, 0) != 0)
+ goto done;
+ if (__db_chk_meta(dbenv, NULL, meta, 1) != 0)
+ goto done;
+ if (memcmp(argp->fileid.data, meta->uid, DB_FILE_ID_LEN) != 0)
+ goto done;
+ }
+
+ if (DB_UNDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->oldname.data, real_new, real_old);
+ if (DB_REDO(op))
+ (void)dbenv->memp_nameop(dbenv, fileid,
+ (const char *)argp->newname.data, real_old, real_new);
+
+done: *lsnp = argp->prev_lsn;
+out: if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+
+ REC_NOOP_CLOSE;
+}
+
+/*
+ * __fop_file_remove_recover --
+ * Recovery function for file_remove. On the REDO pass, we need to
+ * make sure no one recreated the file while we weren't looking. On an
+ * undo pass must check if the file we are interested in is the one that
+ * exists and then set the status of the child transaction depending on
+ * what we find out.
+ *
+ * PUBLIC: int __fop_file_remove_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__fop_file_remove_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __fop_file_remove_args *argp;
+ DBMETA *meta;
+ char *real_name;
+ int is_real, is_tmp, ret;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t cstat;
+
+ real_name = NULL;
+ is_real = is_tmp = 0;
+ meta = (DBMETA *)&mbuf[0];
+ REC_PRINT(__fop_file_remove_print);
+ REC_NOOP_INTRO(__fop_file_remove_read);
+
+ /*
+ * This record is only interesting on the backward, forward, and
+ * apply phases.
+ */
+ if (op != DB_TXN_BACKWARD_ROLL &&
+ op != DB_TXN_FORWARD_ROLL && op != DB_TXN_APPLY)
+ goto done;
+
+ if ((ret = __db_appname(dbenv,
+ (APPNAME)argp->appname, argp->name.data, 0, NULL, &real_name)) != 0)
+ goto out;
+
+ /* Verify that we are manipulating the correct file. */
+ if ((ret = __fop_read_meta(dbenv,
+ real_name, mbuf, DBMETASIZE, NULL, 1, 0)) != 0) {
+ /* File does not exist. */
+ cstat = TXN_EXPECTED;
+ } else {
+ /*
+ * We can ignore errors here since we'll simply fail the
+ * checks below and assume this is the wrong file.
+ */
+ (void)__db_chk_meta(dbenv, NULL, meta, 1);
+ is_real =
+ memcmp(argp->real_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+ is_tmp =
+ memcmp(argp->tmp_fid.data, meta->uid, DB_FILE_ID_LEN) == 0;
+
+ if (!is_real && !is_tmp)
+ /* File exists, but isn't what we were removing. */
+ cstat = TXN_IGNORE;
+ else
+ /* File exists and is the one that we were removing. */
+ cstat = TXN_COMMIT;
+ }
+
+ if (DB_UNDO(op)) {
+ /* On the backward pass, we leave a note for the child txn. */
+ if ((ret = __db_txnlist_update(dbenv,
+ info, argp->child, cstat, NULL)) == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->child, cstat, NULL);
+ } else if (DB_REDO(op)) {
+ /*
+ * On the forward pass, check if someone recreated the
+ * file while we weren't looking.
+ */
+ if (cstat == TXN_COMMIT)
+ (void)dbenv->memp_nameop(dbenv,
+ is_real ? argp->real_fid.data : argp->tmp_fid.data,
+ NULL, real_name, NULL);
+ }
+
+done: *lsnp = argp->prev_lsn;
+ ret = 0;
+
+out: if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ REC_NOOP_CLOSE;
+}
diff --git a/bdb/fileops/fop_util.c b/bdb/fileops/fop_util.c
new file mode 100644
index 00000000000..ea6d86ab08d
--- /dev/null
+++ b/bdb/fileops/fop_util.c
@@ -0,0 +1,928 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: fop_util.c,v 1.52 2002/09/10 02:41:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+static int __fop_set_pgsize __P((DB *, DB_FH *, const char *));
+
+/*
+ * Acquire the environment meta-data lock. The parameters are the
+ * environment (ENV), the locker id to use in acquiring the lock (ID)
+ * and a pointer to a DB_LOCK.
+ */
+#define GET_ENVLOCK(ENV, ID, L) do { \
+ DBT __dbt; \
+ u_int32_t __lockval; \
+ \
+ if (LOCKING_ON((ENV))) { \
+ __lockval = 0; \
+ __dbt.data = &__lockval; \
+ __dbt.size = sizeof(__lockval); \
+ if ((ret = (ENV)->lock_get((ENV), (ID), \
+ 0, &__dbt, DB_LOCK_WRITE, (L))) != 0) \
+ goto err; \
+ } \
+} while (0)
+
+#define REL_ENVLOCK(ENV, L) \
+ (!LOCK_ISSET(*(L)) ? 0 : (ENV)->lock_put((ENV), (L)))
+
+/*
+ * If our caller is doing fcntl(2) locking, then we can't close it
+ * because that would discard the caller's lock. Otherwise, close
+ * the handle.
+ */
+#define CLOSE_HANDLE(D, F) { \
+ if (F_ISSET((F), DB_FH_VALID)) { \
+ if (LF_ISSET(DB_FCNTL_LOCKING)) \
+ (D)->saved_open_fhp = (F); \
+ else if ((ret = __os_closehandle((D)->dbenv,(F))) != 0) \
+ goto err; \
+ } \
+}
+
+/*
+ * __fop_lock_handle --
+ *
+ * Get the handle lock for a database. If the envlock is specified,
+ * do this as a lock_vec call that releases the enviroment lock before
+ * acquiring the handle lock.
+ *
+ * PUBLIC: int __fop_lock_handle __P((DB_ENV *,
+ * PUBLIC: DB *, u_int32_t, db_lockmode_t, DB_LOCK *, u_int32_t));
+ *
+ */
+int
+__fop_lock_handle(dbenv, dbp, locker, mode, elock, flags)
+ DB_ENV *dbenv;
+ DB *dbp;
+ u_int32_t locker;
+ db_lockmode_t mode;
+ DB_LOCK *elock;
+ u_int32_t flags;
+{
+ DBT fileobj;
+ DB_LOCKREQ reqs[2], *ereq;
+ DB_LOCK_ILOCK lock_desc;
+ int ret;
+
+ if (!LOCKING_ON(dbenv) || F_ISSET(dbp, DB_AM_COMPENSATE))
+ return (0);
+
+ /*
+ * If we are in recovery, the only locking we should be
+ * doing is on the global environment.
+ */
+ if (IS_RECOVERING(dbenv)) {
+ if (elock != NULL)
+ REL_ENVLOCK(dbenv, elock);
+ return (0);
+ }
+
+ memcpy(&lock_desc.fileid, &dbp->fileid, DB_FILE_ID_LEN);
+ lock_desc.pgno = dbp->meta_pgno;
+ lock_desc.type = DB_HANDLE_LOCK;
+
+ memset(&fileobj, 0, sizeof(fileobj));
+ fileobj.data = &lock_desc;
+ fileobj.size = sizeof(lock_desc);
+ DB_TEST_SUBLOCKS(dbenv, flags);
+ if (elock == NULL)
+ ret = dbenv->lock_get(dbenv, locker,
+ flags, &fileobj, mode, &dbp->handle_lock);
+ else {
+ reqs[0].op = DB_LOCK_PUT;
+ reqs[0].lock = *elock;
+ reqs[1].op = DB_LOCK_GET;
+ reqs[1].mode = mode;
+ reqs[1].obj = &fileobj;
+ reqs[1].timeout = 0;
+ if ((ret = __lock_vec(dbenv,
+ locker, flags, reqs, 2, &ereq)) == 0) {
+ dbp->handle_lock = reqs[1].lock;
+ LOCK_INIT(*elock);
+ } else if (ereq != reqs)
+ LOCK_INIT(*elock);
+ }
+
+ dbp->cur_lid = locker;
+ return (ret);
+}
+
+/*
+ * __fop_file_setup --
+ *
+ * Perform all the needed checking and locking to open up or create a
+ * file.
+ *
+ * There's a reason we don't push this code down into the buffer cache.
+ * The problem is that there's no information external to the file that
+ * we can use as a unique ID. UNIX has dev/inode pairs, but they are
+ * not necessarily unique after reboot, if the file was mounted via NFS.
+ * Windows has similar problems, as the FAT filesystem doesn't maintain
+ * dev/inode numbers across reboot. So, we must get something from the
+ * file we can use to ensure that, even after a reboot, the file we're
+ * joining in the cache is the right file for us to join. The solution
+ * we use is to maintain a file ID that's stored in the database, and
+ * that's why we have to open and read the file before calling into the
+ * buffer cache or obtaining a lock (we use this unique fileid to lock
+ * as well as to identify like files in the cache).
+ *
+ * PUBLIC: int __fop_file_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, int, u_int32_t, u_int32_t *));
+ */
+int
+__fop_file_setup(dbp, txn, name, mode, flags, retidp)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ int mode;
+ u_int32_t flags, *retidp;
+{
+ DB_ENV *dbenv;
+ DB_FH fh, *fhp;
+ DB_LOCK elock, tmp_lock;
+ DB_TXN *stxn;
+ db_lockmode_t lmode;
+ u_int32_t locker, oflags;
+ u_int8_t mbuf[DBMETASIZE];
+ int created_fhp, created_locker, ret, tmp_created, truncating;
+ char *real_name, *real_tmpname, *tmpname;
+
+ DB_ASSERT(name != NULL);
+
+ *retidp = TXN_INVALID;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ LOCK_INIT(tmp_lock);
+ stxn = NULL;
+ created_fhp = created_locker = 0;
+ real_name = real_tmpname = tmpname = NULL;
+ tmp_created = truncating = 0;
+
+ /*
+ * If we open a file handle and our caller is doing fcntl(2) locking,
+ * we can't close it because that would discard the caller's lock.
+ * Save it until we close the DB handle.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ if ((ret = __os_malloc(dbenv, sizeof(*fhp), &fhp)) != 0)
+ return (ret);
+ created_fhp = 1;
+ } else
+ fhp = &fh;
+ memset(fhp, 0, sizeof(*fhp));
+
+ /*
+ * Get a lockerid for this handle. There are paths through queue
+ * rename and remove where this dbp already has a locker, so make
+ * sure we don't clobber it and conflict.
+ */
+ if (LOCKING_ON(dbenv) &&
+ !F_ISSET(dbp, DB_AM_COMPENSATE) && dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ created_locker = 1;
+ }
+
+ locker = txn == NULL ? dbp->lid : txn->txnid;
+
+ /* Get the real backing file name. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, name, 0, NULL, &real_name)) != 0)
+ goto err;
+
+ /* Fill in the default file mode. */
+ if (mode == 0)
+ mode = __db_omode("rwrw--");
+
+ oflags = 0;
+ if (LF_ISSET(DB_RDONLY))
+ oflags |= DB_OSO_RDONLY;
+
+retry: if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if ((ret = __os_exists(real_name, NULL)) == 0) {
+ if (LF_ISSET(DB_EXCL)) {
+ ret = EEXIST;
+ goto err;
+ }
+reopen: if ((ret = __fop_read_meta(dbenv, real_name,
+ mbuf, sizeof(mbuf), fhp, 0, oflags)) != 0)
+ goto err;
+
+ if ((ret = __db_meta_setup(dbenv,
+ dbp, real_name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, get our handle lock. */
+ lmode = LF_ISSET(DB_TRUNCATE) ? DB_LOCK_WRITE : DB_LOCK_READ;
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, NULL, DB_LOCK_NOWAIT)) == 0) {
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+ } else {
+ /* Someone else has file locked; need to wait. */
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+ ret = __fop_lock_handle(dbenv,
+ dbp, locker, lmode, &elock, 0);
+ if (ret == DB_LOCK_NOTEXIST)
+ goto retry;
+ if (ret != 0)
+ goto err;
+ /*
+ * XXX I need to convince myself that I don't need
+ * to re-read the metadata page here.
+ * XXX If you do need to re-read it you'd better
+ * decrypt it too...
+ */
+ if ((ret = __os_open(dbenv, real_name, 0, 0, fhp)) != 0)
+ goto err;
+ }
+
+ /*
+ * Check for a truncate which needs to leap over to the
+ * create case.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ /*
+ * Sadly, we need to close and reopen the handle
+ * in order to do the actual truncate. We couldn't
+ * do the truncate on the initial open because we
+ * needed to read the old file-id in order to lock.
+ */
+ if ((ret = __os_closehandle(dbenv, fhp)) != 0)
+ goto err;
+ if ((ret = __os_open(dbenv,
+ real_name, DB_OSO_TRUNC, 0, fhp)) != 0)
+ goto err;
+ /*
+ * This is not-transactional, so we'll do the
+ * open/create in-place.
+ */
+ tmp_lock = dbp->handle_lock;
+ truncating = 1;
+ tmpname = (char *)name;
+ goto creat2;
+ }
+
+ /*
+ * Check for a file in the midst of a rename
+ */
+ if (F_ISSET(dbp, DB_AM_IN_RENAME)) {
+ if (LF_ISSET(DB_CREATE)) {
+ F_CLR(dbp, DB_AM_IN_RENAME);
+ goto create;
+ } else {
+ ret = ENOENT;
+ goto err;
+ }
+ }
+
+ CLOSE_HANDLE(dbp, fhp);
+ goto done;
+ }
+
+ /* File does not exist. */
+ if (!LF_ISSET(DB_CREATE))
+ goto err;
+ ret = 0;
+
+ /*
+ * Need to create file; we need to set up the file,
+ * the fileid and the locks. Then we need to call
+ * the appropriate routines to create meta-data pages.
+ */
+ if ((ret = REL_ENVLOCK(dbenv, &elock)) != 0)
+ goto err;
+
+create: if ((ret = __db_backup_name(dbenv, name, txn, &tmpname)) != 0)
+ goto err;
+ if (TXN_ON(dbenv) && txn != NULL &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv,
+ stxn, fhp, tmpname, DB_APP_DATA, mode)) != 0)
+ goto err;
+ tmp_created = 1;
+creat2: if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, tmpname, 0, NULL, &real_tmpname)) != 0)
+ goto err;
+
+ /* Set the pagesize if it isn't yet set. */
+ if (dbp->pgsize == 0 &&
+ (ret = __fop_set_pgsize(dbp, fhp, real_tmpname)) != 0)
+ goto errmsg;
+
+ /* Construct a file_id. */
+ if ((ret = __os_fileid(dbenv, real_tmpname, 1, dbp->fileid)) != 0)
+ goto errmsg;
+
+ if ((ret = __db_new_file(dbp, stxn, fhp, tmpname)) != 0)
+ goto err;
+ CLOSE_HANDLE(dbp, fhp);
+
+ /* Now move the file into place. */
+ if (!F_ISSET(dbp, DB_AM_COMPENSATE))
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (!truncating && __os_exists(real_name, NULL) == 0) {
+ /*
+ * Someone managed to create the file; remove our temp
+ * and try to open the file that now exists.
+ */
+ (void)__fop_remove(dbenv,
+ NULL, dbp->fileid, tmpname, DB_APP_DATA);
+ if (LOCKING_ON(dbenv))
+ dbenv->lock_put(dbenv, &dbp->handle_lock);
+ LOCK_INIT(dbp->handle_lock);
+
+ /* If we have a saved handle; close it. */
+ if (LF_ISSET(DB_FCNTL_LOCKING))
+ (void)__os_closehandle(dbenv, fhp);
+ if (stxn != NULL) {
+ ret = stxn->abort(stxn);
+ stxn = NULL;
+ }
+ if (ret != 0)
+ goto err;
+ goto reopen;
+ }
+
+ /* We've successfully created, move the file into place. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+ if (!truncating && (ret = __fop_rename(dbenv,
+ stxn, tmpname, name, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+
+ /* If this was a truncate; release lock on the old file. */
+ if (LOCK_ISSET(tmp_lock) && (ret = __lock_put(dbenv, &tmp_lock)) != 0)
+ goto err;
+
+ if (stxn != NULL) {
+ *retidp = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+ } else
+ *retidp = TXN_INVALID;
+
+ if (ret != 0)
+ goto err;
+
+ F_SET(dbp, DB_AM_CREATED);
+
+ if (0) {
+errmsg: __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+
+err: if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmp_created && txn == NULL)
+ (void)__fop_remove(dbenv,
+ NULL, NULL, tmpname, DB_APP_DATA);
+ if (F_ISSET(fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, fhp);
+ if (LOCK_ISSET(tmp_lock))
+ __lock_put(dbenv, &tmp_lock);
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+ if (LOCK_ISSET(elock))
+ (void)REL_ENVLOCK(dbenv, &elock);
+ if (created_locker) {
+ (void)__lock_id_free(dbenv, dbp->lid);
+ dbp->lid = DB_LOCK_INVALIDID;
+ }
+ if (created_fhp)
+ __os_free(dbenv, fhp);
+ }
+
+done: if (!truncating && tmpname != NULL)
+ __os_free(dbenv, tmpname);
+ if (real_name != NULL)
+ __os_free(dbenv, real_name);
+ if (real_tmpname != NULL)
+ __os_free(dbenv, real_tmpname);
+
+ return (ret);
+}
+
+/*
+ * __fop_set_pgsize --
+ * Set the page size based on file information.
+ */
+static int
+__fop_set_pgsize(dbp, fhp, name)
+ DB *dbp;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ u_int32_t iopsize;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * Use the filesystem's optimum I/O size as the pagesize if a pagesize
+ * not specified. Some filesystems have 64K as their optimum I/O size,
+ * but as that results in fairly large default caches, we limit the
+ * default pagesize to 16K.
+ */
+ if ((ret = __os_ioinfo(dbenv, name, fhp, NULL, NULL, &iopsize)) != 0) {
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ return (ret);
+ }
+ if (iopsize < 512)
+ iopsize = 512;
+ if (iopsize > 16 * 1024)
+ iopsize = 16 * 1024;
+
+ /*
+ * Sheer paranoia, but we don't want anything that's not a power-of-2
+ * (we rely on that for alignment of various types on the pages), and
+ * we want a multiple of the sector size as well. If the value
+ * we got out of __os_ioinfo looks bad, use a default instead.
+ */
+ if (!IS_VALID_PAGESIZE(iopsize))
+ iopsize = DB_DEF_IOSIZE;
+
+ dbp->pgsize = iopsize;
+ F_SET(dbp, DB_AM_PGDEF);
+
+ return (0);
+}
+
+/*
+ * __fop_subdb_setup --
+ *
+ * Subdb setup is significantly simpler than file setup. In terms of
+ * locking, for the duration of the operation/transaction, the locks on
+ * the meta-data page will suffice to protect us from simultaneous operations
+ * on the sub-database. Before we complete the operation though, we'll get a
+ * handle lock on the subdatabase so that on one else can try to remove it
+ * while we've got it open. We use an object that looks like the meta-data
+ * page lock with a different type (DB_HANDLE_LOCK) for the long-term handle.
+ * locks.
+ *
+ * PUBLIC: int __fop_subdb_setup __P((DB *, DB_TXN *,
+ * PUBLIC: const char *, const char *, int, u_int32_t));
+ */
+int
+__fop_subdb_setup(dbp, txn, mname, name, mode, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *mname, *name;
+ int mode;
+ u_int32_t flags;
+{
+ DB *mdbp;
+ DB_ENV *dbenv;
+ int do_remove, ret;
+
+ mdbp = NULL;
+ dbenv = dbp->dbenv;
+
+ if ((ret = __db_master_open(dbp, txn, mname, flags, mode, &mdbp)) != 0)
+ return (ret);
+
+ /*
+ * We are going to close this instance of the master, so we can
+ * steal its handle instead of reopening a handle on the database.
+ */
+ if (LF_ISSET(DB_FCNTL_LOCKING)) {
+ dbp->saved_open_fhp = mdbp->saved_open_fhp;
+ mdbp->saved_open_fhp = NULL;
+ }
+
+ /* Now copy the pagesize. */
+ dbp->pgsize = mdbp->pgsize;
+ F_SET(dbp, DB_AM_SUBDB);
+
+ if (name != NULL && (ret = __db_master_update(mdbp, dbp, txn,
+ name, dbp->type, MU_OPEN, NULL, flags)) != 0)
+ goto err;
+
+ /*
+ * Hijack the master's locker ID as well, so that our locks don't
+ * conflict with the master's. Since we're closing the master,
+ * that lid would just have been freed anyway. Once we've gotten
+ * the locker id, we need to acquire the handle lock for this
+ * subdatabase.
+ */
+ dbp->lid = mdbp->lid;
+ mdbp->lid = DB_LOCK_INVALIDID;
+
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, mname);
+
+ /*
+ * We copy our fileid from our master so that we all open
+ * the same file in mpool. We'll use the meta-pgno to lock
+ * so that we end up with different handle locks.
+ */
+
+ memcpy(dbp->fileid, mdbp->fileid, DB_FILE_ID_LEN);
+ if ((ret = __fop_lock_handle(dbenv, dbp,
+ txn == NULL ? dbp->lid : txn->txnid,
+ F_ISSET(dbp, DB_AM_CREATED) || LF_ISSET(DB_WRITEOPEN) ?
+ DB_LOCK_WRITE : DB_LOCK_READ, NULL, 0)) != 0)
+ goto err;
+
+ if ((ret = __db_init_subdb(mdbp, dbp, name, txn)) != 0)
+ goto err;
+
+ /*
+ * In the file create case, these happen in separate places so we have
+ * two different tests. They end up in the same place for subdbs, but
+ * for compatibility with file testing, we put them both here anyway.
+ */
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOGMETA, ret, mname);
+ DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, mname);
+
+ /*
+ * File exists and we have the appropriate locks; we should now
+ * process a normal open.
+ */
+ if (F_ISSET(mdbp, DB_AM_CREATED)) {
+ F_SET(dbp, DB_AM_CREATED_MSTR);
+ F_CLR(mdbp, DB_AM_DISCARD);
+ }
+
+ /*
+ * The master's handle lock is under the control of the
+ * subdb (it acquired the master's locker. We want to
+ * keep the master's handle lock so that no one can remove
+ * the file while the subdb is open. If we register the
+ * trade event and then invalidate the copy of the lock
+ * in the master's handle, that will accomplish this. However,
+ * before we register this event, we'd better remove any
+ * events that we've already registered for the master.
+ */
+
+ if (!F_ISSET(dbp, DB_AM_RECOVER) && txn != NULL) {
+ /* Unregister old master events. */
+ __txn_remlock(dbenv,
+ txn, &mdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ /* Now register the new event. */
+ if ((ret = __txn_lockevent(dbenv,
+ txn, dbp, &mdbp->handle_lock, dbp->lid)) != 0)
+ goto err;
+ }
+ LOCK_INIT(mdbp->handle_lock);
+ return (__db_close_i(mdbp, txn, 0));
+
+err:
+DB_TEST_RECOVERY_LABEL
+ if (LOCK_ISSET(dbp->handle_lock) && txn == NULL)
+ __lock_put(dbenv, &dbp->handle_lock);
+
+ /* If we created the master file then we need to remove it. */
+ if (mdbp != NULL) {
+ do_remove = F_ISSET(mdbp, DB_AM_CREATED) ? 1 : 0;
+ if (do_remove)
+ F_SET(mdbp, DB_AM_DISCARD);
+ (void)__db_close_i(mdbp, txn, 0);
+ if (do_remove) {
+ (void)db_create(&mdbp, dbp->dbenv, 0);
+ (void)__db_remove_i(mdbp, txn, mname, NULL);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * __fop_remove_setup --
+ * Open handle appropriately and lock for removal of a database file.
+ *
+ * PUBLIC: int __fop_remove_setup __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, u_int32_t));
+ */
+int
+__fop_remove_setup(dbp, txn, name, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ u_int8_t mbuf[DBMETASIZE];
+ int ret;
+
+ COMPQUIET(flags, 0);
+ dbenv = dbp->dbenv;
+ PANIC_CHECK(dbenv);
+ LOCK_INIT(elock);
+
+ /* Create locker if necessary. */
+ if (LOCKING_ON(dbenv)) {
+ if (txn != NULL)
+ dbp->lid = txn->txnid;
+ else if (dbp->lid == DB_LOCK_INVALIDID) {
+ if ((ret = __lock_id(dbenv, &dbp->lid)) != 0)
+ goto err;
+ }
+ }
+
+ /*
+ * Lock environment to protect file open. That will enable us to
+ * read the meta-data page and get the fileid so that we can lock
+ * the handle.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if ((ret = __fop_read_meta(dbenv,
+ name, mbuf, sizeof(mbuf), NULL, 0, 0)) != 0)
+ goto err;
+
+ if ((ret =
+ __db_meta_setup(dbenv, dbp, name, (DBMETA *)mbuf, flags, 1)) != 0)
+ goto err;
+
+ /* Now, release the environment and get the handle lock. */
+ if ((ret = __fop_lock_handle(dbenv,
+ dbp, dbp->lid, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ return (0);
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ return (ret);
+}
+
+/*
+ * __fop_read_meta --
+ * Read the meta-data page from a file and return it in buf. The
+ * open file handle is returned in fhp.
+ *
+ * PUBLIC: int __fop_read_meta __P((DB_ENV *,
+ * PUBLIC: const char *, u_int8_t *, size_t, DB_FH *, int, u_int32_t));
+ */
+int
+__fop_read_meta(dbenv, name, buf, size, fhp, errok, flags)
+ DB_ENV *dbenv;
+ const char *name;
+ u_int8_t *buf;
+ size_t size;
+ DB_FH *fhp;
+ int errok;
+ u_int32_t flags;
+{
+ DB_FH fh, *lfhp;
+ size_t nr;
+ int ret;
+
+ lfhp = fhp == NULL ? &fh : fhp;
+ memset(lfhp, 0, sizeof(*fhp));
+ if ((ret = __os_open(dbenv, name, flags, 0, lfhp)) != 0)
+ goto err;
+ if ((ret = __os_read(dbenv, lfhp, buf, size, &nr)) != 0) {
+ if (!errok)
+ __db_err(dbenv, "%s: %s", name, db_strerror(ret));
+ goto err;
+ }
+
+ if (nr != size) {
+ if (!errok)
+ __db_err(dbenv,
+ "%s: unexpected file type or format", name);
+ ret = EINVAL;
+ goto err;
+ }
+
+err: /*
+ * On error, we always close the handle. If there is no error,
+ * then we only return the handle if the user didn't pass us
+ * a handle into which to return it. If fhp is valid, then
+ * lfhp is the same as fhp.
+ */
+ if (F_ISSET(lfhp, DB_FH_VALID) && (ret != 0 || fhp == NULL))
+ __os_closehandle(dbenv, lfhp);
+ return (ret);
+}
+
+/*
+ * __fop_dummy --
+ * This implements the creation and name swapping of dummy files that
+ * we use for remove and rename (remove is simply a rename with a delayed
+ * remove).
+ *
+ * PUBLIC: int __fop_dummy __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, u_int32_t));
+ */
+int
+__fop_dummy(dbp, txn, old, new, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *old, *new;
+ u_int32_t flags;
+{
+ DB *tmpdbp;
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ DB_LSN lsn;
+ DBT fiddbt, namedbt, tmpdbt;
+ DB_TXN *stxn;
+ char *back;
+ char *realback, *realnew, *realold;
+ int ret, t_ret;
+ u_int8_t mbuf[DBMETASIZE];
+ u_int32_t locker, stxnid;
+
+ dbenv = dbp->dbenv;
+ LOCK_INIT(elock);
+ realback = NULL;
+ realnew = NULL;
+ realold = NULL;
+ back = NULL;
+ stxn = NULL;
+ tmpdbp = NULL;
+
+ DB_ASSERT(txn != NULL);
+ locker = txn->txnid;
+
+ /* Begin sub transaction to encapsulate the rename. */
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, txn, &stxn, 0)) != 0)
+ goto err;
+
+ /* We need to create a dummy file as a place holder. */
+ if ((ret = __db_backup_name(dbenv, new, stxn, &back)) != 0)
+ goto err;
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, back, flags, NULL, &realback)) != 0)
+ goto err;
+ if ((ret = __fop_create(dbenv, stxn, NULL, back, DB_APP_DATA, 0)) != 0)
+ goto err;
+
+ memset(mbuf, 0, sizeof(mbuf));
+ if ((ret =
+ __os_fileid(dbenv, realback, 1, ((DBMETA *)mbuf)->uid)) != 0)
+ goto err;
+ ((DBMETA *)mbuf)->magic = DB_RENAMEMAGIC;
+ if ((ret = __fop_write(dbenv,
+ stxn, back, DB_APP_DATA, NULL, 0, mbuf, DBMETASIZE, 1)) != 0)
+ goto err;
+
+ /* Create a dummy dbp handle. */
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ goto err;
+ memcpy(&tmpdbp->fileid, ((DBMETA *)mbuf)->uid, DB_FILE_ID_LEN);
+
+ /* Now, lock the name space while we initialize this file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &realnew)) != 0)
+ goto err;
+ GET_ENVLOCK(dbenv, locker, &elock);
+ if (__os_exists(realnew, NULL) == 0) {
+ ret = EEXIST;
+ goto err;
+ }
+
+ /*
+ * While we have the namespace locked, do the renames and then
+ * swap for the handle lock.
+ */
+ if ((ret = __fop_rename(dbenv,
+ stxn, old, new, dbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_rename(dbenv,
+ stxn, back, old, tmpdbp->fileid, DB_APP_DATA)) != 0)
+ goto err;
+ if ((ret = __fop_lock_handle(dbenv,
+ tmpdbp, locker, DB_LOCK_WRITE, &elock, 0)) != 0)
+ goto err;
+
+ /*
+ * We just acquired a transactional lock on the tmp handle.
+ * We need to null out the tmp handle's lock so that it
+ * doesn't create problems for us in the close path.
+ */
+ LOCK_INIT(tmpdbp->handle_lock);
+
+ if (stxn != NULL) {
+ /* Commit the child. */
+ stxnid = stxn->txnid;
+ ret = stxn->commit(stxn, 0);
+ stxn = NULL;
+
+ /* Now log the child information in the parent. */
+ memset(&fiddbt, 0, sizeof(fiddbt));
+ memset(&tmpdbt, 0, sizeof(fiddbt));
+ memset(&namedbt, 0, sizeof(namedbt));
+ fiddbt.data = dbp->fileid;
+ fiddbt.size = DB_FILE_ID_LEN;
+ tmpdbt.data = tmpdbp->fileid;
+ tmpdbt.size = DB_FILE_ID_LEN;
+ namedbt.data = (void *)old;
+ namedbt.size = (u_int32_t)strlen(old) + 1;
+ if ((t_ret =
+ __fop_file_remove_log(dbenv, txn, &lsn, 0, &fiddbt,
+ &tmpdbt, &namedbt, DB_APP_DATA, stxnid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ /* This is a delayed delete of the dummy file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, flags, NULL, &realold)) != 0)
+ goto err;
+ if ((ret = __txn_remevent(dbenv, txn, realold, NULL)) != 0)
+ goto err;
+
+err: (void)REL_ENVLOCK(dbenv, &elock);
+ if (stxn != NULL)
+ (void)stxn->abort(stxn);
+ if (tmpdbp != NULL &&
+ (t_ret = __db_close_i(tmpdbp, NULL, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (realold != NULL)
+ __os_free(dbenv, realold);
+ if (realnew != NULL)
+ __os_free(dbenv, realnew);
+ if (realback != NULL)
+ __os_free(dbenv, realback);
+ if (back != NULL)
+ __os_free(dbenv, back);
+ return (ret);
+}
+
+/*
+ * __fop_dbrename --
+ * Do the appropriate file locking and file system operations
+ * to effect a dbrename in the absence of transactions (__fop_dummy
+ * and the subsequent calls in __db_rename do the work for the
+ * transactional case).
+ *
+ * PUBLIC: int __fop_dbrename __P((DB *, const char *, const char *));
+ */
+int
+__fop_dbrename(dbp, old, new)
+ DB *dbp;
+ const char *old, *new;
+{
+ DB_ENV *dbenv;
+ DB_LOCK elock;
+ char *real_new, *real_old;
+ int ret, tret;
+
+ dbenv = dbp->dbenv;
+ real_new = NULL;
+ real_old = NULL;
+ LOCK_INIT(elock);
+
+ /* Find the real newname of the file. */
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, new, 0, NULL, &real_new)) != 0)
+ goto err;
+
+ /*
+ * It is an error to rename a file over one that already exists,
+ * as that wouldn't be transaction-safe.
+ */
+ GET_ENVLOCK(dbenv, dbp->lid, &elock);
+ if (__os_exists(real_new, NULL) == 0) {
+ ret = EEXIST;
+ __db_err(dbenv, "rename: file %s exists", real_new);
+ goto err;
+ }
+
+ if ((ret = __db_appname(dbenv,
+ DB_APP_DATA, old, 0, NULL, &real_old)) != 0)
+ goto err;
+
+ ret = dbenv->memp_nameop(dbenv, dbp->fileid, new, real_old, real_new);
+
+err: if ((tret = REL_ENVLOCK(dbenv, &elock)) != 0 && ret == 0)
+ ret = tret;
+ if (real_old != NULL)
+ __os_free(dbenv, real_old);
+ if (real_new != NULL)
+ __os_free(dbenv, real_new);
+ return (ret);
+}
diff --git a/bdb/hash/hash.c b/bdb/hash/hash.c
index e96fd4898f0..2f972a3238d 100644
--- a/bdb/hash/hash.c
+++ b/bdb/hash/hash.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash.c,v 11.94 2001/01/03 16:42:26 ubell Exp $";
+static const char revid[] = "$Id: hash.c,v 11.166 2002/08/06 06:11:25 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -54,446 +54,70 @@ static const char revid[] = "$Id: hash.c,v 11.94 2001/01/03 16:42:26 ubell Exp $
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "db_ext.h"
-#include "db_shash.h"
-#include "db_swap.h"
-#include "hash.h"
-#include "btree.h"
-#include "log.h"
-#include "lock.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+static int __ham_bulk __P((DBC *, DBT *, u_int32_t));
static int __ham_c_close __P((DBC *, db_pgno_t, int *));
static int __ham_c_del __P((DBC *));
static int __ham_c_destroy __P((DBC *));
static int __ham_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
static int __ham_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
static int __ham_c_writelock __P((DBC *));
-static int __ham_del_dups __P((DBC *, DBT *));
-static int __ham_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
static int __ham_dup_return __P((DBC *, DBT *, u_int32_t));
static int __ham_expand_table __P((DBC *));
-static int __ham_init_htab __P((DBC *,
- const char *, db_pgno_t, u_int32_t, u_int32_t));
static int __ham_lookup __P((DBC *,
const DBT *, u_int32_t, db_lockmode_t, db_pgno_t *));
static int __ham_overwrite __P((DBC *, DBT *, u_int32_t));
/*
- * __ham_metachk --
+ * __ham_quick_delete --
+ * When performing a DB->del operation that does not involve secondary
+ * indices and is not removing an off-page duplicate tree, we can
+ * speed things up substantially by removing the entire duplicate
+ * set, if any is present, in one operation, rather than by conjuring
+ * up and deleting each of the items individually. (All are stored
+ * in one big HKEYDATA structure.) We don't bother to distinguish
+ * on-page duplicate sets from single, non-dup items; they're deleted
+ * in exactly the same way.
*
- * PUBLIC: int __ham_metachk __P((DB *, const char *, HMETA *));
- */
-int
-__ham_metachk(dbp, name, hashm)
- DB *dbp;
- const char *name;
- HMETA *hashm;
-{
- DB_ENV *dbenv;
- u_int32_t vers;
- int ret;
-
- dbenv = dbp->dbenv;
-
- /*
- * At this point, all we know is that the magic number is for a Hash.
- * Check the version, the database may be out of date.
- */
- vers = hashm->dbmeta.version;
- if (F_ISSET(dbp, DB_AM_SWAP))
- M_32_SWAP(vers);
- switch (vers) {
- case 4:
- case 5:
- case 6:
- __db_err(dbenv,
- "%s: hash version %lu requires a version upgrade",
- name, (u_long)vers);
- return (DB_OLD_VERSION);
- case 7:
- break;
- default:
- __db_err(dbenv,
- "%s: unsupported hash version: %lu", name, (u_long)vers);
- return (EINVAL);
- }
-
- /* Swap the page if we need to. */
- if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __ham_mswap((PAGE *)hashm)) != 0)
- return (ret);
-
- /* Check the type. */
- if (dbp->type != DB_HASH && dbp->type != DB_UNKNOWN)
- return (EINVAL);
- dbp->type = DB_HASH;
- DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
-
- /*
- * Check application info against metadata info, and set info, flags,
- * and type based on metadata info.
- */
- if ((ret = __db_fchk(dbenv,
- "DB->open", hashm->dbmeta.flags,
- DB_HASH_DUP | DB_HASH_SUBDB | DB_HASH_DUPSORT)) != 0)
- return (ret);
-
- if (F_ISSET(&hashm->dbmeta, DB_HASH_DUP))
- F_SET(dbp, DB_AM_DUP);
- else
- if (F_ISSET(dbp, DB_AM_DUP)) {
- __db_err(dbenv,
- "%s: DB_DUP specified to open method but not set in database",
- name);
- return (EINVAL);
- }
-
- if (F_ISSET(&hashm->dbmeta, DB_HASH_SUBDB))
- F_SET(dbp, DB_AM_SUBDB);
- else
- if (F_ISSET(dbp, DB_AM_SUBDB)) {
- __db_err(dbenv,
- "%s: multiple databases specified but not supported in file",
- name);
- return (EINVAL);
- }
-
- if (F_ISSET(&hashm->dbmeta, DB_HASH_DUPSORT)) {
- if (dbp->dup_compare == NULL)
- dbp->dup_compare = __bam_defcmp;
- } else
- if (dbp->dup_compare != NULL) {
- __db_err(dbenv,
- "%s: duplicate sort function specified but not set in database",
- name);
- return (EINVAL);
- }
-
- /* Set the page size. */
- dbp->pgsize = hashm->dbmeta.pagesize;
-
- /* Copy the file's ID. */
- memcpy(dbp->fileid, hashm->dbmeta.uid, DB_FILE_ID_LEN);
-
- return (0);
-}
-
-/*
- * __ham_open --
+ * This function is called by __db_delete when the appropriate
+ * conditions are met, and it performs the delete in the optimized way.
*
- * PUBLIC: int __ham_open __P((DB *, const char *, db_pgno_t, u_int32_t));
+ * The cursor should be set to the first item in the duplicate
+ * set, or to the sole key/data pair when the key does not have a
+ * duplicate set, before the function is called.
+ *
+ * PUBLIC: int __ham_quick_delete __P((DBC *));
*/
int
-__ham_open(dbp, name, base_pgno, flags)
- DB *dbp;
- const char *name;
- db_pgno_t base_pgno;
- u_int32_t flags;
-{
- DB_ENV *dbenv;
- DBC *dbc;
- HASH_CURSOR *hcp;
- HASH *hashp;
- int need_sync, ret, t_ret;
-
- dbc = NULL;
- dbenv = dbp->dbenv;
- need_sync = 0;
-
- /* Initialize the remaining fields/methods of the DB. */
- dbp->del = __ham_delete;
- dbp->stat = __ham_stat;
-
- /*
- * Get a cursor. If DB_CREATE is specified, we may be creating
- * pages, and to do that safely in CDB we need a write cursor.
- * In STD_LOCKING mode, we'll synchronize using the meta page
- * lock instead.
- */
- if ((ret = dbp->cursor(dbp,
- dbp->open_txn, &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ?
- DB_WRITECURSOR : 0)) != 0)
- return (ret);
-
- hcp = (HASH_CURSOR *)dbc->internal;
- hashp = dbp->h_internal;
- hashp->meta_pgno = base_pgno;
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto err1;
-
- /*
- * If this is a new file, initialize it, and put it back dirty.
- *
- * Initialize the hdr structure.
- */
- if (hcp->hdr->dbmeta.magic == DB_HASHMAGIC) {
- /* File exists, verify the data in the header. */
- if (hashp->h_hash == NULL)
- hashp->h_hash = hcp->hdr->dbmeta.version < 5
- ? __ham_func4 : __ham_func5;
- if (!F_ISSET(dbp, DB_RDONLY) &&
- hashp->h_hash(dbp,
- CHARKEY, sizeof(CHARKEY)) != hcp->hdr->h_charkey) {
- __db_err(dbp->dbenv,
- "hash: incompatible hash function");
- ret = EINVAL;
- goto err2;
- }
- if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
- F_SET(dbp, DB_AM_DUP);
- if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
- F_SET(dbp, DB_AM_DUPSORT);
- if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB))
- F_SET(dbp, DB_AM_SUBDB);
- } else if (!IS_RECOVERING(dbenv)) {
- /*
- * File does not exist, we must initialize the header. If
- * locking is enabled that means getting a write lock first.
- * During recovery the meta page will be in the log.
- */
- dbc->lock.pgno = base_pgno;
-
- if (STD_LOCKING(dbc) &&
- ((ret = lock_put(dbenv, &hcp->hlock)) != 0 ||
- (ret = lock_get(dbenv, dbc->locker,
- DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
- &dbc->lock_dbt, DB_LOCK_WRITE, &hcp->hlock)) != 0))
- goto err2;
- else if (CDB_LOCKING(dbp->dbenv)) {
- DB_ASSERT(LF_ISSET(DB_CREATE));
- if ((ret = lock_get(dbenv, dbc->locker,
- DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
- &dbc->mylock)) != 0)
- goto err2;
- }
- if ((ret = __ham_init_htab(dbc, name,
- base_pgno, hashp->h_nelem, hashp->h_ffactor)) != 0)
- goto err2;
-
- need_sync = 1;
- }
-
-err2: /* Release the meta data page */
- if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
- ret = t_ret;
-err1: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
-
- /* Sync the file so that we know that the meta data goes to disk. */
- if (ret == 0 && need_sync)
- ret = dbp->sync(dbp, 0);
-#if CONFIG_TEST
- if (ret == 0)
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
-
-DB_TEST_RECOVERY_LABEL
-#endif
- if (ret != 0)
- (void)__ham_db_close(dbp);
-
- return (ret);
-}
-
-/************************** LOCAL CREATION ROUTINES **********************/
-/*
- * Returns 0 on No Error
- */
-static int
-__ham_init_htab(dbc, name, pgno, nelem, ffactor)
+__ham_quick_delete(dbc)
DBC *dbc;
- const char *name;
- db_pgno_t pgno;
- u_int32_t nelem, ffactor;
{
- DB *dbp;
- DB_LOCK metalock;
- DB_LSN orig_lsn;
- DBMETA *mmeta;
- HASH_CURSOR *hcp;
- HASH *hashp;
- PAGE *h;
- db_pgno_t mpgno;
- int32_t l2, nbuckets;
- int dirty_mmeta, i, ret, t_ret;
-
- hcp = (HASH_CURSOR *)dbc->internal;
- dbp = dbc->dbp;
- hashp = dbp->h_internal;
- mmeta = NULL;
- h = NULL;
- ret = 0;
- dirty_mmeta = 0;
- metalock.off = LOCK_INVALID;
-
- if (hashp->h_hash == NULL)
- hashp->h_hash = DB_HASHVERSION < 5 ? __ham_func4 : __ham_func5;
-
- if (nelem != 0 && ffactor != 0) {
- nelem = (nelem - 1) / ffactor + 1;
- l2 = __db_log2(nelem > 2 ? nelem : 2);
- } else
- l2 = 1;
- nbuckets = 1 << l2;
-
- orig_lsn = hcp->hdr->dbmeta.lsn;
- memset(hcp->hdr, 0, sizeof(HMETA));
- ZERO_LSN(hcp->hdr->dbmeta.lsn);
- hcp->hdr->dbmeta.pgno = pgno;
- hcp->hdr->dbmeta.magic = DB_HASHMAGIC;
- hcp->hdr->dbmeta.version = DB_HASHVERSION;
- hcp->hdr->dbmeta.pagesize = dbp->pgsize;
- hcp->hdr->dbmeta.type = P_HASHMETA;
- hcp->hdr->dbmeta.free = PGNO_INVALID;
- hcp->hdr->max_bucket = hcp->hdr->high_mask = nbuckets - 1;
- hcp->hdr->low_mask = (nbuckets >> 1) - 1;
- hcp->hdr->ffactor = ffactor;
- hcp->hdr->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
- memcpy(hcp->hdr->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
-
- if (F_ISSET(dbp, DB_AM_DUP))
- F_SET(&hcp->hdr->dbmeta, DB_HASH_DUP);
- if (F_ISSET(dbp, DB_AM_SUBDB))
- F_SET(&hcp->hdr->dbmeta, DB_HASH_SUBDB);
- if (dbp->dup_compare != NULL)
- F_SET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT);
-
- if ((ret = memp_fset(dbp->mpf, hcp->hdr, DB_MPOOL_DIRTY)) != 0)
- goto err;
-
- /*
- * Create the first and second buckets pages so that we have the
- * page numbers for them and we can store that page number
- * in the meta-data header (spares[0]).
- */
- hcp->hdr->spares[0] = nbuckets;
- if ((ret = memp_fget(dbp->mpf,
- &hcp->hdr->spares[0], DB_MPOOL_NEW_GROUP, &h)) != 0)
- goto err;
-
- P_INIT(h, dbp->pgsize, hcp->hdr->spares[0], PGNO_INVALID,
- PGNO_INVALID, 0, P_HASH);
-
- /* Fill in the last fields of the meta data page. */
- hcp->hdr->spares[0] -= (nbuckets - 1);
- for (i = 1; i <= l2; i++)
- hcp->hdr->spares[i] = hcp->hdr->spares[0];
- for (; i < NCACHED; i++)
- hcp->hdr->spares[i] = PGNO_INVALID;
-
- /*
- * Before we are about to put any dirty pages, we need to log
- * the meta-data page create.
- */
- ret = __db_log_page(dbp, name, &orig_lsn, pgno, (PAGE *)hcp->hdr);
-
- if (dbp->open_txn != NULL) {
- mmeta = (DBMETA *) hcp->hdr;
- if (F_ISSET(dbp, DB_AM_SUBDB)) {
-
- /*
- * If this is a subdatabase, then we need to
- * get the LSN off the master meta data page
- * because that's where free pages are linked
- * and during recovery we need to access
- * that page and roll it backward/forward
- * correctly with respect to LSN.
- */
- mpgno = PGNO_BASE_MD;
- if ((ret = __db_lget(dbc,
- 0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
- goto err;
- if ((ret = memp_fget(dbp->mpf,
- &mpgno, 0, (PAGE **)&mmeta)) != 0)
- goto err;
- }
- if ((t_ret = __ham_groupalloc_log(dbp->dbenv,
- dbp->open_txn, &LSN(mmeta), 0, dbp->log_fileid,
- &LSN(mmeta), hcp->hdr->spares[0],
- hcp->hdr->max_bucket + 1, mmeta->free)) != 0 && ret == 0)
- ret = t_ret;
- if (ret == 0) {
- /* need to update real LSN for buffer manager */
- dirty_mmeta = 1;
- }
-
- }
-
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
-
-DB_TEST_RECOVERY_LABEL
-err: if (h != NULL &&
- (t_ret = memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0 && ret == 0)
- ret = t_ret;
-
- if (F_ISSET(dbp, DB_AM_SUBDB) && mmeta != NULL)
- if ((t_ret = memp_fput(dbp->mpf, mmeta,
- dirty_mmeta ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
- ret = t_ret;
- if (metalock.off != LOCK_INVALID)
- (void)__TLPUT(dbc, metalock);
-
- return (ret);
-}
-
-static int
-__ham_delete(dbp, txn, key, flags)
- DB *dbp;
- DB_TXN *txn;
- DBT *key;
- u_int32_t flags;
-{
- DBC *dbc;
- HASH_CURSOR *hcp;
- db_pgno_t pgno;
int ret, t_ret;
- /*
- * This is the only access method routine called directly from
- * the dbp, so we have to do error checking.
- */
-
- PANIC_CHECK(dbp->dbenv);
- DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->del");
- DB_CHECK_TXN(dbp, txn);
-
- if ((ret =
- __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
- return (ret);
-
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
+ if ((ret = __ham_get_meta(dbc)) != 0)
return (ret);
- DEBUG_LWRITE(dbc, txn, "ham_delete", key, NULL, flags);
+ /* Assert that we're not using secondary indices. */
+ DB_ASSERT(!F_ISSET(dbc->dbp, DB_AM_SECONDARY));
+ /*
+ * We should assert that we're not a primary either, but that
+ * would require grabbing the dbp's mutex, so we don't bother.
+ */
- hcp = (HASH_CURSOR *)dbc->internal;
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
+ /* Assert that we're set, but not to an off-page duplicate. */
+ DB_ASSERT(IS_INITIALIZED(dbc));
+ DB_ASSERT(((HASH_CURSOR *)dbc->internal)->opd == NULL);
- pgno = PGNO_INVALID;
- if ((ret = __ham_lookup(dbc, key, 0, DB_LOCK_WRITE, &pgno)) == 0) {
- if (F_ISSET(hcp, H_OK)) {
- if (pgno == PGNO_INVALID)
- ret = __ham_del_pair(dbc, 1);
- else {
- /* When we close the cursor in __ham_del_dups,
- * that will make the off-page dup tree go
- * go away as well as our current entry. When
- * it updates cursors, ours should get marked
- * as H_DELETED.
- */
- ret = __ham_del_dups(dbc, key);
- }
- } else
- ret = DB_NOTFOUND;
- }
+ ret = __ham_del_pair(dbc, 1);
if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
ret = t_ret;
-out: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
return (ret);
}
@@ -517,8 +141,8 @@ __ham_c_init(dbc)
1, sizeof(struct cursor_t), &new_curs)) != 0)
return (ret);
if ((ret = __os_malloc(dbenv,
- dbc->dbp->pgsize, NULL, &new_curs->split_buf)) != 0) {
- __os_free(new_curs, sizeof(*new_curs));
+ dbc->dbp->pgsize, &new_curs->split_buf)) != 0) {
+ __os_free(dbenv, new_curs);
return (ret);
}
@@ -527,8 +151,10 @@ __ham_c_init(dbc)
dbc->c_count = __db_c_count;
dbc->c_del = __db_c_del;
dbc->c_dup = __db_c_dup;
- dbc->c_get = __db_c_get;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __ham_bulk;
dbc->c_am_close = __ham_c_close;
dbc->c_am_del = __ham_c_del;
dbc->c_am_destroy = __ham_c_destroy;
@@ -551,12 +177,14 @@ __ham_c_close(dbc, root_pgno, rmroot)
db_pgno_t root_pgno;
int *rmroot;
{
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
HKEYDATA *dp;
int doroot, gotmeta, ret, t_ret;
u_int32_t dirty;
COMPQUIET(rmroot, 0);
+ mpf = dbc->dbp->mpf;
dirty = 0;
doroot = gotmeta = ret = 0;
hcp = (HASH_CURSOR *) dbc->internal;
@@ -568,9 +196,14 @@ __ham_c_close(dbc, root_pgno, rmroot)
gotmeta = 1;
if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
goto out;
- dp = (HKEYDATA *)H_PAIRDATA(hcp->page, hcp->indx);
- DB_ASSERT(HPAGE_PTYPE(dp) == H_OFFDUP);
- memcpy(&root_pgno, HOFFPAGE_PGNO(dp), sizeof(db_pgno_t));
+ dp = (HKEYDATA *)H_PAIRDATA(dbc->dbp, hcp->page, hcp->indx);
+
+ /* If its not a dup we aborted before we changed it. */
+ if (HPAGE_PTYPE(dp) == H_OFFDUP)
+ memcpy(&root_pgno,
+ HOFFPAGE_PGNO(dp), sizeof(db_pgno_t));
+ else
+ root_pgno = PGNO_INVALID;
if ((ret =
hcp->opd->c_am_close(hcp->opd, root_pgno, &doroot)) != 0)
@@ -583,7 +216,7 @@ __ham_c_close(dbc, root_pgno, rmroot)
}
out: if (hcp->page != NULL && (t_ret =
- memp_fput(dbc->dbp->mpf, hcp->page, dirty)) != 0 && ret == 0)
+ mpf->put(mpf, hcp->page, dirty)) != 0 && ret == 0)
ret = t_ret;
if (gotmeta != 0 && (t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
ret = t_ret;
@@ -605,8 +238,8 @@ __ham_c_destroy(dbc)
hcp = (HASH_CURSOR *)dbc->internal;
if (hcp->split_buf != NULL)
- __os_free(hcp->split_buf, dbc->dbp->pgsize);
- __os_free(hcp, sizeof(HASH_CURSOR));
+ __os_free(dbc->dbp->dbenv, hcp->split_buf);
+ __os_free(dbc->dbp->dbenv, hcp);
return (0);
}
@@ -623,6 +256,7 @@ __ham_c_count(dbc, recnop)
db_recno_t *recnop;
{
DB *dbp;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
db_indx_t len;
db_recno_t recno;
@@ -630,22 +264,23 @@ __ham_c_count(dbc, recnop)
u_int8_t *p, *pend;
dbp = dbc->dbp;
- hcp = (HASH_CURSOR *) dbc->internal;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
recno = 0;
if ((ret = __ham_get_cpage(dbc, DB_LOCK_READ)) != 0)
return (ret);
- switch (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))) {
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
case H_KEYDATA:
case H_OFFPAGE:
recno = 1;
break;
case H_DUPLICATE:
- p = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
pend = p +
- LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
for (; p < pend; recno++) {
/* p may be odd, so copy rather than just dereffing */
memcpy(&len, p, sizeof(db_indx_t));
@@ -654,14 +289,13 @@ __ham_c_count(dbc, recnop)
break;
default:
- ret = __db_unknown_type(dbp->dbenv, "__ham_c_count",
- HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx)));
+ ret = __db_pgfmt(dbp->dbenv, hcp->pgno);
goto err;
}
*recnop = recno;
-err: if ((t_ret = memp_fput(dbc->dbp->mpf, hcp->page, 0)) != 0 && ret == 0)
+err: if ((t_ret = mpf->put(mpf, hcp->page, 0)) != 0 && ret == 0)
ret = t_ret;
hcp->page = NULL;
return (ret);
@@ -673,10 +307,12 @@ __ham_c_del(dbc)
{
DB *dbp;
DBT repldbt;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
int ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
if (F_ISSET(hcp, H_DELETED))
@@ -689,12 +325,12 @@ __ham_c_del(dbc)
goto out;
/* Off-page duplicates. */
- if (HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP)
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP)
goto out;
if (F_ISSET(hcp, H_ISDUP)) { /* On-page duplicate. */
if (hcp->dup_off == 0 &&
- DUP_SIZE(hcp->dup_len) == LEN_HDATA(hcp->page,
+ DUP_SIZE(hcp->dup_len) == LEN_HDATA(dbp, hcp->page,
hcp->hdr->dbmeta.pagesize, hcp->indx))
ret = __ham_del_pair(dbc, 1);
else {
@@ -703,21 +339,25 @@ __ham_c_del(dbc)
repldbt.doff = hcp->dup_off;
repldbt.dlen = DUP_SIZE(hcp->dup_len);
repldbt.size = 0;
- repldbt.data = HKEYDATA_DATA(H_PAIRDATA(hcp->page,
+ repldbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
hcp->indx));
- ret = __ham_replpair(dbc, &repldbt, 0);
- hcp->dup_tlen -= DUP_SIZE(hcp->dup_len);
- F_SET(hcp, H_DELETED);
- ret = __ham_c_update(dbc, DUP_SIZE(hcp->dup_len), 0, 1);
+ if ((ret = __ham_replpair(dbc, &repldbt, 0)) == 0) {
+ hcp->dup_tlen -= DUP_SIZE(hcp->dup_len);
+ F_SET(hcp, H_DELETED);
+ ret = __ham_c_update(dbc,
+ DUP_SIZE(hcp->dup_len), 0, 1);
+ }
}
} else /* Not a duplicate */
ret = __ham_del_pair(dbc, 1);
-out: if (ret == 0 && hcp->page != NULL &&
- (t_ret = memp_fput(dbp->mpf, hcp->page, DB_MPOOL_DIRTY)) != 0)
- ret = t_ret;
- hcp->page = NULL;
+out: if (hcp->page != NULL) {
+ if ((t_ret = mpf->put(mpf,
+ hcp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) && ret == 0)
+ ret = t_ret;
+ hcp->page = NULL;
+ }
if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
ret = t_ret;
return (ret);
@@ -760,7 +400,7 @@ __ham_c_dup(orig_dbc, new_dbc)
* holds a lock of the correct type, so if we need a write lock and
* request it, we know that we'll get it.
*/
- if (orig->lock.off == LOCK_INVALID || orig_dbc->txn != NULL)
+ if (!LOCK_ISSET(orig->lock) || orig_dbc->txn != NULL)
return (0);
return (__ham_lock_bucket(new_dbc, DB_LOCK_READ));
@@ -775,12 +415,14 @@ __ham_c_get(dbc, key, data, flags, pgnop)
db_pgno_t *pgnop;
{
DB *dbp;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
db_lockmode_t lock_type;
int get_key, ret, t_ret;
hcp = (HASH_CURSOR *)dbc->internal;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
/* Clear OR'd in additional bits so we can check for flag equality. */
if (F_ISSET(dbc, DBC_RMW))
@@ -827,6 +469,7 @@ __ham_c_get(dbc, key, data, flags, pgnop)
case DB_SET:
case DB_SET_RANGE:
case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
ret = __ham_lookup(dbc, key, 0, lock_type, pgnop);
get_key = 0;
break;
@@ -856,11 +499,11 @@ __ham_c_get(dbc, key, data, flags, pgnop)
goto err;
else if (F_ISSET(hcp, H_OK)) {
if (*pgnop == PGNO_INVALID)
- ret = __ham_dup_return (dbc, data, flags);
+ ret = __ham_dup_return(dbc, data, flags);
break;
} else if (!F_ISSET(hcp, H_NOMORE)) {
__db_err(dbp->dbenv,
- "H_NOMORE returned to __ham_c_get");
+ "H_NOMORE returned to __ham_c_get");
ret = EINVAL;
break;
}
@@ -872,7 +515,7 @@ __ham_c_get(dbc, key, data, flags, pgnop)
case DB_LAST:
case DB_PREV:
case DB_PREV_NODUP:
- ret = memp_fput(dbp->mpf, hcp->page, 0);
+ ret = mpf->put(mpf, hcp->page, 0);
hcp->page = NULL;
if (hcp->bucket == 0) {
ret = DB_NOTFOUND;
@@ -890,7 +533,7 @@ __ham_c_get(dbc, key, data, flags, pgnop)
case DB_FIRST:
case DB_NEXT:
case DB_NEXT_NODUP:
- ret = memp_fput(dbp->mpf, hcp->page, 0);
+ ret = mpf->put(mpf, hcp->page, 0);
hcp->page = NULL;
hcp->indx = NDX_INVALID;
hcp->bucket++;
@@ -907,6 +550,7 @@ __ham_c_get(dbc, key, data, flags, pgnop)
break;
case DB_GET_BOTH:
case DB_GET_BOTHC:
+ case DB_GET_BOTH_RANGE:
case DB_NEXT_DUP:
case DB_SET:
case DB_SET_RANGE:
@@ -940,6 +584,382 @@ err: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
return (ret);
}
+/*
+ * __ham_bulk -- Return bulk data from a hash table.
+ */
+static int
+__ham_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *cp;
+ PAGE *pg;
+ db_indx_t dup_len, dup_off, dup_tlen, indx, *inp;
+ db_lockmode_t lock_mode;
+ db_pgno_t pgno;
+ int32_t *endp, key_off, *offp, *saveoff;
+ u_int32_t key_size, size, space;
+ u_int8_t *dbuf, *dp, *hk, *np, *tmp;
+ int is_dup, is_key;
+ int need_pg, next_key, no_dup, pagesize, ret, t_ret;
+
+ ret = 0;
+ key_off = 0;
+ dup_len = dup_off = dup_tlen = 0;
+ size = 0;
+ dbp = dbc->dbp;
+ pagesize = dbp->pgsize;
+ mpf = dbp->mpf;
+ cp = (HASH_CURSOR *)dbc->internal;
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ next_key = is_key && LF_ISSET(DB_OPFLAGS_MASK) != DB_NEXT_DUP;
+ no_dup = LF_ISSET(DB_OPFLAGS_MASK) == DB_NEXT_NODUP;
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table from the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+ key_size = 0;
+ lock_mode = F_ISSET(dbc, DBC_RMW) ? DB_LOCK_WRITE: DB_LOCK_READ;
+
+next_pg:
+ need_pg = 1;
+ indx = cp->indx;
+ pg = cp->page;
+ inp = P_INP(dbp, pg);
+
+ do {
+ if (is_key) {
+ hk = H_PAIRKEY(dbp, pg, indx);
+ if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
+ memcpy(&key_size,
+ HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno,
+ HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ size = key_size;
+ if (key_size > space)
+ goto get_key_space;
+ if ((ret = __bam_bulk_overflow(
+ dbc, key_size, pgno, np)) != 0)
+ return (ret);
+ space -= key_size;
+ key_off = (int32_t)(np - dbuf);
+ np += key_size;
+ } else {
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+get_key_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ goto back_up;
+ }
+ memcpy(dp,
+ (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ key_size = LEN_HKEY(dbp, pg, pagesize, indx);
+ key_off = (int32_t)(inp[indx] - HOFFSET(pg)
+ + dp - dbuf + SSZA(HKEYDATA, data));
+ }
+ }
+
+ hk = H_PAIRDATA(dbp, pg, indx);
+ switch (HPAGE_PTYPE(hk)) {
+ case H_DUPLICATE:
+ case H_KEYDATA:
+ if (need_pg) {
+ dp = np;
+ size = pagesize - HOFFSET(pg);
+ if (space < size) {
+back_up:
+ if (indx != 0) {
+ indx -= 2;
+ /* XXX
+ * It's not clear that this is
+ * the right way to fix this,
+ * but here goes.
+ * If we are backing up onto a
+ * duplicate, then we need to
+ * position ourselves at the
+ * end of the duplicate set.
+ * We probably need to make
+ * this work for H_OFFDUP too.
+ * It might be worth making a
+ * dummy cursor and calling
+ * __ham_item_prev.
+ */
+ tmp = H_PAIRDATA(dbp, pg, indx);
+ if (HPAGE_PTYPE(tmp) ==
+ H_DUPLICATE) {
+ dup_off = dup_tlen =
+ LEN_HDATA(dbp, pg,
+ pagesize, indx + 1);
+ memcpy(&dup_len,
+ HKEYDATA_DATA(tmp),
+ sizeof(db_indx_t));
+ }
+ goto get_space;
+ }
+ /* indx == 0 */
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0) {
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(mpf,
+ cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if (cp->bucket == 0) {
+ cp->indx = indx =
+ NDX_INVALID;
+ goto get_space;
+ }
+ if ((ret =
+ __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket--;
+ cp->pgno = BUCKET_TO_PAGE(cp,
+ cp->bucket);
+ cp->indx = NDX_INVALID;
+ if ((ret = __ham_release_meta(
+ dbc)) != 0)
+ return (ret);
+ if ((ret = __ham_item_prev(dbc,
+ lock_mode, &pgno)) != 0)
+ return (ret);
+ }
+ indx = cp->indx;
+get_space:
+ /*
+ * See if we put any data in the buffer.
+ */
+ if (offp >= endp ||
+ F_ISSET(dbc, DBC_TRANSIENT)) {
+ data->size = ALIGN(size +
+ data->ulen - space,
+ sizeof(u_int32_t));
+ return (ENOMEM);
+ }
+ /*
+ * Don't continue; we're all out
+ * of space, even though we're
+ * returning success.
+ */
+ next_key = 0;
+ break;
+ }
+ memcpy(dp, (u_int8_t *)pg + HOFFSET(pg), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+
+ /*
+ * We're about to crack the offset(s) and length(s)
+ * out of an H_KEYDATA or H_DUPLICATE item.
+ * There are three cases:
+ * 1. We were moved into a duplicate set by
+ * the standard hash cursor code. Respect
+ * the dup_off and dup_tlen we were given.
+ * 2. We stumbled upon a duplicate set while
+ * walking the page on our own. We need to
+ * recognize it as a dup and set dup_off and
+ * dup_tlen.
+ * 3. The current item is not a dup.
+ */
+ if (F_ISSET(cp, H_ISDUP)) {
+ /* Case 1 */
+ is_dup = 1;
+ dup_len = cp->dup_len;
+ dup_off = cp->dup_off;
+ dup_tlen = cp->dup_tlen;
+ } else if (HPAGE_PTYPE(hk) == H_DUPLICATE) {
+ /* Case 2 */
+ is_dup = 1;
+ /*
+ * If we run out of memory and bail,
+ * make sure the fact we're in a dup set
+ * isn't ignored later.
+ */
+ F_SET(cp, H_ISDUP);
+ dup_off = 0;
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk), sizeof(db_indx_t));
+ dup_tlen = LEN_HDATA(dbp, pg, pagesize, indx);
+ } else
+ /* Case 3 */
+ is_dup = dup_len = dup_off = dup_tlen = 0;
+
+ do {
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ size += (is_key ? 4 : 2) * sizeof(*offp);
+ /*
+ * Since space is an unsigned, if we happen
+ * to wrap, then this comparison will turn out
+ * to be true. XXX Wouldn't it be better to
+ * simply check above that space is greater than
+ * the value we're about to subtract???
+ */
+ if (space > data->ulen) {
+ if (!is_dup || dup_off == 0)
+ goto back_up;
+ dup_off -= (db_indx_t)DUP_SIZE(offp[1]);
+ goto get_space;
+ }
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ if (is_dup) {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data) +
+ dup_off + sizeof(db_indx_t));
+ memcpy(&dup_len,
+ HKEYDATA_DATA(hk) + dup_off,
+ sizeof(db_indx_t));
+ dup_off += DUP_SIZE(dup_len);
+ *offp-- = dup_len;
+ } else {
+ *offp-- = (int32_t)(
+ inp[indx + 1] - HOFFSET(pg) +
+ dp - dbuf + SSZA(HKEYDATA, data));
+ *offp-- = LEN_HDATA(dbp, pg,
+ pagesize, indx);
+ }
+ } while (is_dup && dup_off < dup_tlen && no_dup == 0);
+ F_CLR(cp, H_ISDUP);
+ break;
+ case H_OFFDUP:
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ if (is_key) {
+ space -= 2 * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+ saveoff = offp;
+ if ((ret = __bam_bulk_duplicates(dbc,
+ pgno, dbuf, is_key ? offp + 2 : NULL,
+ &offp, &np, &space, no_dup)) != 0) {
+ if (ret == ENOMEM) {
+ size = space;
+ if (is_key && saveoff == offp) {
+ offp += 2;
+ goto back_up;
+ }
+ goto get_space;
+ }
+ return (ret);
+ }
+ break;
+ case H_OFFPAGE:
+ space -= (is_key ? 4 : 2) * sizeof(*offp);
+ if (space > data->ulen)
+ goto back_up;
+
+ memcpy(&size, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
+ memcpy(&pgno, HOFFPAGE_PGNO(hk), sizeof(db_pgno_t));
+ if (size > space)
+ goto back_up;
+
+ if ((ret =
+ __bam_bulk_overflow(dbc, size, pgno, np)) != 0)
+ return (ret);
+
+ if (is_key) {
+ *offp-- = key_off;
+ *offp-- = key_size;
+ }
+
+ *offp-- = (int32_t)(np - dbuf);
+ *offp-- = size;
+
+ np += size;
+ space -= size;
+ break;
+ }
+ } while (next_key && (indx += 2) < NUM_ENT(pg));
+
+ cp->indx = indx;
+ cp->dup_len = dup_len;
+ cp->dup_off = dup_off;
+ cp->dup_tlen = dup_tlen;
+
+ /* If we are off the page then try to the next page. */
+ if (ret == 0 && next_key && indx >= NUM_ENT(pg)) {
+ if ((ret = __ham_item_next(dbc, lock_mode, &pgno)) == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ if ((ret = mpf->put(dbc->dbp->mpf, cp->page, 0)) != 0)
+ return (ret);
+ cp->page = NULL;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ return (ret);
+
+ cp->bucket++;
+ if (cp->bucket > cp->hdr->max_bucket) {
+ /*
+ * Restore cursor to its previous state. We're past
+ * the last item in the last bucket, so the next
+ * DBC->c_get(DB_NEXT) will return DB_NOTFOUND.
+ */
+ cp->bucket--;
+ ret = DB_NOTFOUND;
+ } else {
+ /*
+ * Start on the next bucket.
+ *
+ * Note that if this new bucket happens to be empty,
+ * but there's another non-empty bucket after it,
+ * we'll return early. This is a rare case, and we
+ * don't guarantee any particular number of keys
+ * returned on each call, so just let the next call
+ * to bulk get move forward by yet another bucket.
+ */
+ cp->pgno = BUCKET_TO_PAGE(cp, cp->bucket);
+ cp->indx = NDX_INVALID;
+ F_CLR(cp, H_ISDUP);
+ ret = __ham_item_next(dbc, lock_mode, &pgno);
+ }
+
+ if ((t_ret = __ham_release_meta(dbc)) != 0)
+ return (t_ret);
+ if (ret == 0)
+ goto next_pg;
+ if (ret != DB_NOTFOUND)
+ return (ret);
+ }
+ *offp = (u_int32_t) -1;
+ return (0);
+}
+
static int
__ham_c_put(dbc, key, data, flags, pgnop)
DBC *dbc;
@@ -949,6 +969,7 @@ __ham_c_put(dbc, key, data, flags, pgnop)
db_pgno_t *pgnop;
{
DB *dbp;
+ DB_MPOOLFILE *mpf;
DBT tmp_val, *myval;
HASH_CURSOR *hcp;
u_int32_t nbytes;
@@ -962,6 +983,7 @@ __ham_c_put(dbc, key, data, flags, pgnop)
COMPQUIET(myval, NULL);
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
if (F_ISSET(hcp, H_DELETED) &&
@@ -984,8 +1006,7 @@ __ham_c_put(dbc, key, data, flags, pgnop)
ret = 0;
if (hcp->seek_found_page != PGNO_INVALID &&
hcp->seek_found_page != hcp->pgno) {
- if ((ret = memp_fput(dbp->mpf, hcp->page, 0))
- != 0)
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
goto err2;
hcp->page = NULL;
hcp->pgno = hcp->seek_found_page;
@@ -1000,9 +1021,10 @@ __ham_c_put(dbc, key, data, flags, pgnop)
* and then write the new bytes represented by
* val.
*/
- if ((ret = __ham_init_dbt(dbp->dbenv,
- &tmp_val, data->size + data->doff,
- &dbc->rdata.data, &dbc->rdata.ulen)) == 0) {
+ if ((ret = __ham_init_dbt(dbp->dbenv, &tmp_val,
+ data->size + data->doff,
+ &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) == 0) {
memset(tmp_val.data, 0, data->doff);
memcpy((u_int8_t *)tmp_val.data +
data->doff, data->data, data->size);
@@ -1038,8 +1060,8 @@ done: if (ret == 0 && F_ISSET(hcp, H_EXPAND)) {
F_CLR(hcp, H_EXPAND);
}
- if (ret == 0 &&
- (t_ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY)) != 0)
+ if (hcp->page != NULL &&
+ (t_ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
err2: if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
@@ -1058,17 +1080,30 @@ __ham_expand_table(dbc)
DBC *dbc;
{
DB *dbp;
- PAGE *h;
+ DB_LOCK metalock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
HASH_CURSOR *hcp;
- db_pgno_t pgno;
- u_int32_t old_bucket, new_bucket;
- int ret;
+ PAGE *h;
+ db_pgno_t pgno, mpgno;
+ u_int32_t newalloc, new_bucket, old_bucket;
+ int dirty_meta, got_meta, logn, new_double, ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
if ((ret = __ham_dirty_meta(dbc)) != 0)
return (ret);
+ LOCK_INIT(metalock);
+ mmeta = (DBMETA *) hcp->hdr;
+ mpgno = mmeta->pgno;
+ h = NULL;
+ dirty_meta = 0;
+ got_meta = 0;
+ newalloc = 0;
+
/*
* If the split point is about to increase, make sure that we
* have enough extra pages. The calculation here is weird.
@@ -1078,86 +1113,116 @@ __ham_expand_table(dbc)
* see what the log of one greater than that is; here we have to
* look at the log of max + 2. VERY NASTY STUFF.
*
- * It just got even nastier. With subdatabases, we have to request
- * a chunk of contiguous pages, so we do that here using an
- * undocumented feature of mpool (the MPOOL_NEW_GROUP flag) to
- * give us a number of contiguous pages. Ouch.
+ * We figure out what we need to do, then we log it, then request
+ * the pages from mpool. We don't want to fail after extending
+ * the file.
+ *
+ * If the page we are about to split into has already been allocated,
+ * then we simply need to get it to get its LSN. If it hasn't yet
+ * been allocated, then we know it's LSN (0,0).
*/
- if (hcp->hdr->max_bucket == hcp->hdr->high_mask) {
- /*
- * Ask mpool to give us a set of contiguous page numbers
- * large enough to contain the next doubling.
- *
- * Figure out how many new pages we need. This will return
- * us the last page. We calculate its page number, initialize
- * the page and then write it back to reserve all the pages
- * in between. It is possible that the allocation of new pages
- * has already been done, but the tranaction aborted. Since
- * we don't undo the allocation, check for a valid pgno before
- * doing the allocation.
- */
- pgno = hcp->hdr->max_bucket + 1;
- if (hcp->hdr->spares[__db_log2(pgno) + 1] == PGNO_INVALID)
- /* Allocate a group of pages. */
- ret = memp_fget(dbp->mpf,
- &pgno, DB_MPOOL_NEW_GROUP, &h);
- else {
- /* Just read in the last page of the batch */
- pgno = hcp->hdr->spares[__db_log2(pgno) + 1] +
- hcp->hdr->max_bucket + 1;
- /* Move to the last page of the group. */
- pgno += hcp->hdr->max_bucket;
- ret = memp_fget(dbp->mpf,
- &pgno, DB_MPOOL_CREATE, &h);
- }
- if (ret != 0)
- return (ret);
- P_INIT(h, dbp->pgsize, pgno,
- PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
- pgno -= hcp->hdr->max_bucket;
- } else {
- pgno = BUCKET_TO_PAGE(hcp, hcp->hdr->max_bucket + 1);
+ new_bucket = hcp->hdr->max_bucket + 1;
+ old_bucket = new_bucket & hcp->hdr->low_mask;
+
+ new_double = hcp->hdr->max_bucket == hcp->hdr->high_mask;
+ logn = __db_log2(new_bucket);
+
+ if (!new_double || hcp->hdr->spares[logn + 1] != PGNO_INVALID) {
+ /* Page exists; get it so we can get its LSN */
+ pgno = BUCKET_TO_PAGE(hcp, new_bucket);
if ((ret =
- memp_fget(dbp->mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
- return (ret);
+ mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ lsn = h->lsn;
+ } else {
+ /* Get the master meta-data page to do allocation. */
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc,
+ 0, mpgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret =
+ mpf->get(mpf, &mpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto err;
+ got_meta = 1;
+ }
+ pgno = mmeta->last_pgno + 1;
+ ZERO_LSN(lsn);
+ newalloc = 1;
}
- /* Now we can log the meta-data split. */
- if (DB_LOGGING(dbc)) {
- if ((ret = __ham_metagroup_log(dbp->dbenv,
- dbc->txn, &h->lsn, 0, dbp->log_fileid,
- hcp->hdr->max_bucket, pgno, &hcp->hdr->dbmeta.lsn,
- &h->lsn)) != 0) {
- (void)memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY);
- return (ret);
- }
+ /* Log the meta-data split first. */
+ if (DBC_LOGGING(dbc)) {
+ /*
+ * We always log the page number of the first page of
+ * the allocation group. However, the LSN that we log
+ * is either the LSN on the first page (if we did not
+ * do the actual allocation here) or the LSN on the last
+ * page of the unit (if we did do the allocation here).
+ */
+ if ((ret = __ham_metagroup_log(dbp, dbc->txn,
+ &lsn, 0, hcp->hdr->max_bucket, mpgno, &mmeta->lsn,
+ hcp->hdr->dbmeta.pgno, &hcp->hdr->dbmeta.lsn,
+ pgno, &lsn, newalloc)) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(lsn);
- hcp->hdr->dbmeta.lsn = h->lsn;
- }
+ hcp->hdr->dbmeta.lsn = lsn;
- /* If we allocated some new pages, write out the last page. */
- if ((ret = memp_fput(dbp->mpf, h, DB_MPOOL_DIRTY)) != 0)
- return (ret);
+ if (new_double && hcp->hdr->spares[logn + 1] == PGNO_INVALID) {
+ /*
+ * We need to begin a new doubling and we have not allocated
+ * any pages yet. Read the last page in and initialize it to
+ * make the allocation contiguous. The pgno we calculated
+ * above is the first page allocated. The entry in spares is
+ * that page number minus any buckets already allocated (it
+ * simplifies bucket to page transaction). After we've set
+ * that, we calculate the last pgno.
+ */
+
+ hcp->hdr->spares[logn + 1] = pgno - new_bucket;
+ pgno += hcp->hdr->max_bucket;
+ mmeta->last_pgno = pgno;
+ mmeta->lsn = lsn;
+ dirty_meta = DB_MPOOL_DIRTY;
- new_bucket = ++hcp->hdr->max_bucket;
- old_bucket = (hcp->hdr->max_bucket & hcp->hdr->low_mask);
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+
+ P_INIT(h, dbp->pgsize,
+ pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ }
+
+ /* Write out whatever page we ended up modifying. */
+ h->lsn = lsn;
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ h = NULL;
/*
- * If we started a new doubling, fill in the spares array with
- * the starting page number negatively offset by the bucket number.
+ * Update the meta-data page of this hash database.
*/
- if (new_bucket > hcp->hdr->high_mask) {
- /* Starting a new doubling */
+ hcp->hdr->max_bucket = new_bucket;
+ if (new_double) {
hcp->hdr->low_mask = hcp->hdr->high_mask;
hcp->hdr->high_mask = new_bucket | hcp->hdr->low_mask;
- if (hcp->hdr->spares[__db_log2(new_bucket) + 1] == PGNO_INVALID)
- hcp->hdr->spares[__db_log2(new_bucket) + 1] =
- pgno - new_bucket;
}
/* Relocate records to the new bucket */
- return (__ham_split_page(dbc, old_bucket, new_bucket));
+ ret = __ham_split_page(dbc, old_bucket, new_bucket);
+
+err: if (got_meta)
+ (void)mpf->put(mpf, mmeta, dirty_meta);
+
+ if (LOCK_ISSET(metalock))
+ (void)__TLPUT(dbc, metalock);
+
+ if (h != NULL)
+ (void)mpf->put(mpf, h, 0);
+
+ return (ret);
}
/*
@@ -1191,7 +1256,7 @@ __ham_call_hash(dbc, k, len)
* everything held by the cursor.
*/
static int
-__ham_dup_return (dbc, val, flags)
+__ham_dup_return(dbc, val, flags)
DBC *dbc;
DBT *val;
u_int32_t flags;
@@ -1211,7 +1276,7 @@ __ham_dup_return (dbc, val, flags)
dbp = dbc->dbp;
hcp = (HASH_CURSOR *)dbc->internal;
ndx = H_DATAINDEX(hcp->indx);
- type = HPAGE_TYPE(hcp->page, ndx);
+ type = HPAGE_TYPE(dbp, hcp->page, ndx);
pp = hcp->page;
myval = val;
@@ -1228,8 +1293,8 @@ __ham_dup_return (dbc, val, flags)
DB_ASSERT(type != H_OFFDUP);
/* Case 1 */
- if (type != H_DUPLICATE &&
- flags != DB_GET_BOTH && flags != DB_GET_BOTHC)
+ if (type != H_DUPLICATE && flags != DB_GET_BOTH &&
+ flags != DB_GET_BOTHC && flags != DB_GET_BOTH_RANGE)
return (0);
/*
@@ -1239,11 +1304,11 @@ __ham_dup_return (dbc, val, flags)
*/
if (!F_ISSET(hcp, H_ISDUP) && type == H_DUPLICATE) {
F_SET(hcp, H_ISDUP);
- hcp->dup_tlen = LEN_HDATA(hcp->page,
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
hcp->hdr->dbmeta.pagesize, hcp->indx);
- hk = H_PAIRDATA(hcp->page, hcp->indx);
- if (flags == DB_LAST
- || flags == DB_PREV || flags == DB_PREV_NODUP) {
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
+ if (flags == DB_LAST ||
+ flags == DB_PREV || flags == DB_PREV_NODUP) {
hcp->dup_off = 0;
do {
memcpy(&len,
@@ -1265,7 +1330,8 @@ __ham_dup_return (dbc, val, flags)
* may need to adjust the cursor before returning data.
* Case 4
*/
- if (flags == DB_GET_BOTH || flags == DB_GET_BOTHC) {
+ if (flags == DB_GET_BOTH ||
+ flags == DB_GET_BOTHC || flags == DB_GET_BOTH_RANGE) {
if (F_ISSET(hcp, H_ISDUP)) {
/*
* If we're doing a join, search forward from the
@@ -1274,7 +1340,7 @@ __ham_dup_return (dbc, val, flags)
if (flags == DB_GET_BOTHC)
F_SET(hcp, H_CONTINUE);
- __ham_dsearch(dbc, val, &off, &cmp);
+ __ham_dsearch(dbc, val, &off, &cmp, flags);
/*
* This flag is set nowhere else and is safe to
@@ -1283,7 +1349,7 @@ __ham_dup_return (dbc, val, flags)
F_CLR(hcp, H_CONTINUE);
hcp->dup_off = off;
} else {
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
if (((HKEYDATA *)hk)->type == H_OFFPAGE) {
memcpy(&tlen,
HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
@@ -1298,7 +1364,7 @@ __ham_dup_return (dbc, val, flags)
* routines may only look at data and size.
*/
tmp_val.data = HKEYDATA_DATA(hk);
- tmp_val.size = LEN_HDATA(hcp->page,
+ tmp_val.size = LEN_HDATA(dbp, hcp->page,
dbp->pgsize, hcp->indx);
cmp = dbp->dup_compare == NULL ?
__bam_defcmp(dbp, &tmp_val, val) :
@@ -1311,6 +1377,18 @@ __ham_dup_return (dbc, val, flags)
}
/*
+ * If we're doing a bulk get, we don't want to actually return
+ * the data: __ham_bulk will take care of cracking out the
+ * duplicates appropriately.
+ *
+ * The rest of this function calculates partial offsets and
+ * handles the actual __db_ret, so just return if
+ * DB_MULTIPLE(_KEY) is set.
+ */
+ if (F_ISSET(dbc, DBC_MULTIPLE | DBC_MULTIPLE_KEY))
+ return (0);
+
+ /*
* Now, everything is initialized, grab a duplicate if
* necessary.
*/
@@ -1351,8 +1429,8 @@ __ham_dup_return (dbc, val, flags)
* Finally, if we had a duplicate, pp, ndx, and myval should be
* set appropriately.
*/
- if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata.data,
- &dbc->rdata.ulen)) != 0)
+ if ((ret = __db_ret(dbp, pp, ndx, myval, &dbc->rdata->data,
+ &dbc->rdata->ulen)) != 0)
return (ret);
/*
@@ -1374,6 +1452,7 @@ __ham_overwrite(dbc, nval, flags)
u_int32_t flags;
{
DB *dbp;
+ DB_ENV *dbenv;
HASH_CURSOR *hcp;
DBT *myval, tmp_val, tmp_val2;
void *newrec;
@@ -1383,6 +1462,7 @@ __ham_overwrite(dbc, nval, flags)
int ret;
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
hcp = (HASH_CURSOR *)dbc->internal;
if (F_ISSET(hcp, H_ISDUP)) {
/*
@@ -1399,7 +1479,7 @@ __ham_overwrite(dbc, nval, flags)
*/
memset(&tmp_val, 0, sizeof(tmp_val));
if ((ret =
- __ham_dup_return (dbc, &tmp_val, DB_CURRENT)) != 0)
+ __ham_dup_return(dbc, &tmp_val, DB_CURRENT)) != 0)
return (ret);
/* Figure out new size. */
@@ -1435,7 +1515,7 @@ __ham_overwrite(dbc, nval, flags)
}
if ((ret = __os_malloc(dbp->dbenv,
- DUP_SIZE(newsize), NULL, &newrec)) != 0)
+ DUP_SIZE(newsize), &newrec)) != 0)
return (ret);
memset(&tmp_val2, 0, sizeof(tmp_val2));
F_SET(&tmp_val2, DB_DBT_PARTIAL);
@@ -1483,8 +1563,7 @@ __ham_overwrite(dbc, nval, flags)
tmp_val2.size = newsize;
if (dbp->dup_compare(
dbp, &tmp_val, &tmp_val2) != 0) {
- (void)__os_free(newrec,
- DUP_SIZE(newsize));
+ (void)__os_free(dbenv, newrec);
return (__db_duperr(dbp, flags));
}
}
@@ -1495,7 +1574,7 @@ __ham_overwrite(dbc, nval, flags)
tmp_val2.dlen = DUP_SIZE(hcp->dup_len);
ret = __ham_replpair(dbc, &tmp_val2, 0);
- (void)__os_free(newrec, DUP_SIZE(newsize));
+ (void)__os_free(dbenv, newrec);
/* Update cursor */
if (ret != 0)
@@ -1520,7 +1599,7 @@ __ham_overwrite(dbc, nval, flags)
/* Make sure we maintain sort order. */
if (dbp->dup_compare != NULL) {
tmp_val2.data =
- HKEYDATA_DATA(H_PAIRDATA(hcp->page,
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page,
hcp->indx)) + hcp->dup_off +
sizeof(db_indx_t);
tmp_val2.size = hcp->dup_len;
@@ -1529,8 +1608,8 @@ __ham_overwrite(dbc, nval, flags)
}
/* Overwriting a complete duplicate. */
if ((ret =
- __ham_make_dup(dbp->dbenv, nval,
- &tmp_val, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ __ham_make_dup(dbp->dbenv, nval, &tmp_val,
+ &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
return (ret);
/* Now fix what we are replacing. */
tmp_val.doff = hcp->dup_off;
@@ -1541,7 +1620,7 @@ __ham_overwrite(dbc, nval, flags)
hcp->dup_tlen += (nval->size - hcp->dup_len);
else
hcp->dup_tlen -= (hcp->dup_len - nval->size);
- hcp->dup_len = DUP_SIZE(nval->size);
+ hcp->dup_len = (db_indx_t)DUP_SIZE(nval->size);
}
myval = &tmp_val;
} else if (!F_ISSET(nval, DB_DBT_PARTIAL)) {
@@ -1549,12 +1628,12 @@ __ham_overwrite(dbc, nval, flags)
memcpy(&tmp_val, nval, sizeof(*nval));
F_SET(&tmp_val, DB_DBT_PARTIAL);
tmp_val.doff = 0;
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
if (HPAGE_PTYPE(hk) == H_OFFPAGE)
memcpy(&tmp_val.dlen,
HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
else
- tmp_val.dlen = LEN_HDATA(hcp->page,
+ tmp_val.dlen = LEN_HDATA(dbp, hcp->page,
hcp->hdr->dbmeta.pagesize, hcp->indx);
myval = &tmp_val;
} else
@@ -1601,7 +1680,7 @@ __ham_lookup(dbc, key, sought, mode, pgnop)
hcp->bucket = __ham_call_hash(dbc, (u_int8_t *)key->data, key->size);
hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
- while (1) {
+ for (;;) {
*pgnop = PGNO_INVALID;
if ((ret = __ham_item_next(dbc, mode, pgnop)) != 0)
return (ret);
@@ -1609,7 +1688,7 @@ __ham_lookup(dbc, key, sought, mode, pgnop)
if (F_ISSET(hcp, H_NOMORE))
break;
- hk = H_PAIRKEY(hcp->page, hcp->indx);
+ hk = H_PAIRKEY(dbp, hcp->page, hcp->indx);
switch (HPAGE_PTYPE(hk)) {
case H_OFFPAGE:
memcpy(&tlen, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
@@ -1625,12 +1704,12 @@ __ham_lookup(dbc, key, sought, mode, pgnop)
break;
case H_KEYDATA:
if (key->size ==
- LEN_HKEY(hcp->page, dbp->pgsize, hcp->indx) &&
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx) &&
memcmp(key->data,
HKEYDATA_DATA(hk), key->size) == 0) {
/* Found the key, check for data type. */
found_key: F_SET(hcp, H_OK);
- dk = H_PAIRDATA(hcp->page, hcp->indx);
+ dk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
if (HPAGE_PTYPE(dk) == H_OFFDUP)
memcpy(pgnop, HOFFDUP_PGNO(dk),
sizeof(db_pgno_t));
@@ -1643,7 +1722,7 @@ found_key: F_SET(hcp, H_OK);
* These are errors because keys are never
* duplicated, only data items are.
*/
- return (__db_pgfmt(dbp, PGNO(hcp->page)));
+ return (__db_pgfmt(dbp->dbenv, PGNO(hcp->page)));
}
}
@@ -1677,7 +1756,7 @@ __ham_init_dbt(dbenv, dbt, size, bufp, sizep)
memset(dbt, 0, sizeof(*dbt));
if (*sizep < size) {
- if ((ret = __os_realloc(dbenv, size, NULL, bufp)) != 0) {
+ if ((ret = __os_realloc(dbenv, size, bufp)) != 0) {
*sizep = 0;
return (ret);
}
@@ -1732,8 +1811,8 @@ __ham_c_update(dbc, len, add, is_dup)
MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
/*
- * Calcuate the order of this deleted record.
- * This will be one grater than any cursor that is pointing
+ * Calculate the order of this deleted record.
+ * This will be one greater than any cursor that is pointing
* at this record and already marked as deleted.
*/
order = 0;
@@ -1749,11 +1828,11 @@ __ham_c_update(dbc, len, add, is_dup)
continue;
lcp = (HASH_CURSOR *)cp->internal;
if (F_ISSET(lcp, H_DELETED) &&
- hcp->pgno == lcp->pgno &&
- hcp->indx == lcp->indx &&
- order <= lcp->order &&
- (!is_dup || hcp->dup_off == lcp->dup_off))
- order = lcp->order +1;
+ hcp->pgno == lcp->pgno &&
+ hcp->indx == lcp->indx &&
+ order <= lcp->order &&
+ (!is_dup || hcp->dup_off == lcp->dup_off))
+ order = lcp->order + 1;
}
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
}
@@ -1788,8 +1867,8 @@ __ham_c_update(dbc, len, add, is_dup)
* We are "undeleting" so unmark all
* cursors with the same order.
*/
- if (lcp->indx == hcp->indx
- && F_ISSET(lcp, H_DELETED)) {
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED)) {
if (lcp->order == hcp->order)
F_CLR(lcp, H_DELETED);
else if (lcp->order >
@@ -1815,12 +1894,13 @@ __ham_c_update(dbc, len, add, is_dup)
} else {
if (lcp->indx > hcp->indx) {
lcp->indx -= 2;
- if (lcp->indx == hcp->indx
- && F_ISSET(lcp, H_DELETED))
+ if (lcp->indx == hcp->indx &&
+ F_ISSET(lcp, H_DELETED))
lcp->order += order;
- } else if (lcp->indx == hcp->indx
- && !F_ISSET(lcp, H_DELETED)) {
+ } else if (lcp->indx == hcp->indx &&
+ !F_ISSET(lcp, H_DELETED)) {
F_SET(lcp, H_DELETED);
+ F_CLR(lcp, H_ISDUP);
lcp->order = order;
}
}
@@ -1833,10 +1913,10 @@ __ham_c_update(dbc, len, add, is_dup)
*/
if (add) {
lcp->dup_tlen += len;
- if (lcp->dup_off == hcp->dup_off
- && F_ISSET(hcp, H_DELETED)
- && F_ISSET(lcp, H_DELETED)) {
- /* Abort of a delete. */
+ if (lcp->dup_off == hcp->dup_off &&
+ F_ISSET(hcp, H_DELETED) &&
+ F_ISSET(lcp, H_DELETED)) {
+ /* Abort of a delete. */
if (lcp->order == hcp->order)
F_CLR(lcp, H_DELETED);
else if (lcp->order >
@@ -1851,8 +1931,9 @@ __ham_c_update(dbc, len, add, is_dup)
lcp->dup_tlen -= len;
if (lcp->dup_off > hcp->dup_off) {
lcp->dup_off -= len;
- if (lcp->dup_off == hcp->dup_off
- && F_ISSET(lcp, H_DELETED))
+ if (lcp->dup_off ==
+ hcp->dup_off &&
+ F_ISSET(lcp, H_DELETED))
lcp->order += order;
} else if (lcp->dup_off ==
hcp->dup_off &&
@@ -1867,10 +1948,9 @@ __ham_c_update(dbc, len, add, is_dup)
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
- if (found != 0 && DB_LOGGING(dbc)) {
- if ((ret = __ham_curadj_log(dbenv,
- my_txn, &lsn, 0, dbp->log_fileid, hcp->pgno,
- hcp->indx, len, hcp->dup_off, add, is_dup, order)) != 0)
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_curadj_log(dbp, my_txn, &lsn, 0, hcp->pgno,
+ hcp->indx, len, hcp->dup_off, add, is_dup, order)) != 0)
return (ret);
}
@@ -1885,13 +1965,12 @@ __ham_c_update(dbc, len, add, is_dup)
* cursors on a split. The latter is so we can update cursors when we
* move items off page.
*
- * PUBLIC: int __ham_get_clist __P((DB *,
- * PUBLIC: db_pgno_t, u_int32_t, DBC ***));
+ * PUBLIC: int __ham_get_clist __P((DB *, db_pgno_t, u_int32_t, DBC ***));
*/
int
-__ham_get_clist(dbp, bucket, indx, listp)
+__ham_get_clist(dbp, pgno, indx, listp)
DB *dbp;
- db_pgno_t bucket;
+ db_pgno_t pgno;
u_int32_t indx;
DBC ***listp;
{
@@ -1915,18 +1994,20 @@ __ham_get_clist(dbp, bucket, indx, listp)
MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
cp = TAILQ_NEXT(cp, links))
- if (cp->dbtype == DB_HASH &&
- ((indx == NDX_INVALID &&
- ((HASH_CURSOR *)(cp->internal))->bucket
- == bucket) || (indx != NDX_INVALID &&
- cp->internal->pgno == bucket &&
- cp->internal->indx == indx))) {
+ /*
+ * We match if cp->pgno matches the specified
+ * pgno, and if either the cp->indx matches
+ * or we weren't given an index.
+ */
+ if (cp->internal->pgno == pgno &&
+ (indx == NDX_INVALID ||
+ cp->internal->indx == indx)) {
if (nused >= nalloc) {
nalloc += 10;
if ((ret = __os_realloc(dbp->dbenv,
nalloc * sizeof(HASH_CURSOR *),
- NULL, listp)) != 0)
- return (ret);
+ listp)) != 0)
+ goto err;
}
(*listp)[nused++] = cp;
}
@@ -1939,74 +2020,25 @@ __ham_get_clist(dbp, bucket, indx, listp)
if (nused >= nalloc) {
nalloc++;
if ((ret = __os_realloc(dbp->dbenv,
- nalloc * sizeof(HASH_CURSOR *), NULL, listp)) != 0)
+ nalloc * sizeof(HASH_CURSOR *), listp)) != 0)
return (ret);
}
(*listp)[nused] = NULL;
}
return (0);
-}
-
-static int
-__ham_del_dups(orig_dbc, key)
- DBC *orig_dbc;
- DBT *key;
-{
- DBC *dbc;
- DBT data, lkey;
- int ret, t_ret;
-
- /* Allocate a cursor. */
- if ((ret = orig_dbc->c_dup(orig_dbc, &dbc, 0)) != 0)
- return (ret);
-
- /*
- * Walk a cursor through the key/data pairs, deleting as we go. Set
- * the DB_DBT_USERMEM flag, as this might be a threaded application
- * and the flags checking will catch us. We don't actually want the
- * keys or data, so request a partial of length 0.
- */
- memset(&lkey, 0, sizeof(lkey));
- F_SET(&lkey, DB_DBT_USERMEM | DB_DBT_PARTIAL);
- memset(&data, 0, sizeof(data));
- F_SET(&data, DB_DBT_USERMEM | DB_DBT_PARTIAL);
-
- /* Walk through the set of key/data pairs, deleting as we go. */
- if ((ret = dbc->c_get(dbc, key, &data, DB_SET)) != 0) {
- if (ret == DB_NOTFOUND)
- ret = 0;
- goto err;
- }
-
- for (;;) {
- if ((ret = dbc->c_del(dbc, 0)) != 0)
- goto err;
- if ((ret = dbc->c_get(dbc, &lkey, &data, DB_NEXT_DUP)) != 0) {
- if (ret == DB_NOTFOUND) {
- ret = 0;
- break;
- }
- goto err;
- }
- }
-
-err: /*
- * Discard the cursor. This will cause the underlying off-page dup
- * tree to go away as well as the actual entry on the page.
- */
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
- ret = t_ret;
-
+err:
+ MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
return (ret);
-
}
static int
__ham_c_writelock(dbc)
DBC *dbc;
{
- HASH_CURSOR *hcp;
+ DB_ENV *dbenv;
DB_LOCK tmp_lock;
+ HASH_CURSOR *hcp;
int ret;
/*
@@ -2017,79 +2049,13 @@ __ham_c_writelock(dbc)
return (0);
hcp = (HASH_CURSOR *)dbc->internal;
- if ((hcp->lock.off == LOCK_INVALID || hcp->lock_mode == DB_LOCK_READ)) {
+ if ((!LOCK_ISSET(hcp->lock) || hcp->lock_mode == DB_LOCK_READ)) {
tmp_lock = hcp->lock;
if ((ret = __ham_lock_bucket(dbc, DB_LOCK_WRITE)) != 0)
return (ret);
- if (tmp_lock.off != LOCK_INVALID &&
- (ret = lock_put(dbc->dbp->dbenv, &tmp_lock)) != 0)
- return (ret);
- }
- return (0);
-}
-
-/*
- * __ham_c_chgpg --
- *
- * Adjust the cursors after moving an item from one page to another.
- * If the old_index is NDX_INVALID, that means that we copied the
- * page wholesale and we're leaving indices intact and just changing
- * the page number.
- *
- * PUBLIC: int __ham_c_chgpg
- * PUBLIC: __P((DBC *, db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
- */
-int
-__ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
- DBC *dbc;
- db_pgno_t old_pgno, new_pgno;
- u_int32_t old_index, new_index;
-{
- DB *dbp, *ldbp;
- DB_ENV *dbenv;
- DB_LSN lsn;
- DB_TXN *my_txn;
- DBC *cp;
- HASH_CURSOR *hcp;
- int found, ret;
-
- dbp = dbc->dbp;
- dbenv = dbp->dbenv;
-
- my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
- found = 0;
-
- MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
- for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
- ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
- ldbp = LIST_NEXT(ldbp, dblistlinks)) {
- MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
- for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
- cp = TAILQ_NEXT(cp, links)) {
- if (cp == dbc || cp->dbtype != DB_HASH)
- continue;
-
- hcp = (HASH_CURSOR *)cp->internal;
- if (hcp->pgno == old_pgno) {
- if (old_index == NDX_INVALID) {
- hcp->pgno = new_pgno;
- } else if (hcp->indx == old_index) {
- hcp->pgno = new_pgno;
- hcp->indx = new_index;
- } else
- continue;
- if (my_txn != NULL && cp->txn != my_txn)
- found = 1;
- }
- }
- MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
- }
- MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
-
- if (found != 0 && DB_LOGGING(dbc)) {
- if ((ret = __ham_chgpg_log(dbenv,
- my_txn, &lsn, 0, dbp->log_fileid, DB_HAM_CHGPG,
- old_pgno, new_pgno, old_index, new_index)) != 0)
+ dbenv = dbc->dbp->dbenv;
+ if (LOCK_ISSET(tmp_lock) &&
+ (ret = dbenv->lock_put(dbenv, &tmp_lock)) != 0)
return (ret);
}
return (0);
diff --git a/bdb/hash/hash.src b/bdb/hash/hash.src
index e6ecd11c907..b4b633c56e6 100644
--- a/bdb/hash/hash.src
+++ b/bdb/hash/hash.src
@@ -1,8 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
+ *
+ * $Id: hash.src,v 10.38 2002/04/17 19:03:10 krinsky Exp $
*/
/*
* Copyright (c) 1995, 1996
@@ -38,44 +40,10 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * $Id: hash.src,v 10.24 2000/12/12 17:41:48 bostic Exp $
- */
-
-/*
- * This is the source file used to create the logging functions for the
- * hash package. Each access method (or set of routines wishing to register
- * record types with the transaction system) should have a file like this.
- * Each type of log record and its parameters is defined. The basic
- * format of a record definition is:
- *
- * BEGIN <RECORD_TYPE>
- * ARG|STRING|POINTER <variable name> <variable type> <printf format>
- * ...
- * END
- * ARG the argument is a simple parameter of the type * specified.
- * DBT the argument is a DBT (db.h) containing a length and pointer.
- * PTR the argument is a pointer to the data type specified; the entire
- * type should be logged.
- *
- * There are a set of shell scripts of the form xxx.sh that generate c
- * code and or h files to process these. (This is probably better done
- * in a single PERL script, but for now, this works.)
- *
- * The DB recovery system requires the following three fields appear in
- * every record, and will assign them to the per-record-type structures
- * as well as making them the first parameters to the appropriate logging
- * call.
- * rectype: record-type, identifies the structure and log/read call
- * txnid: transaction id, a DBT in this implementation
- * prev: the last LSN for this transaction
*/
-/*
- * Use the argument of PREFIX as the prefix for all record types,
- * routines, id numbers, etc.
- */
-PREFIX ham
+PREFIX __ham
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -83,16 +51,18 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "hash.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/hash.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
@@ -109,8 +79,8 @@ INCLUDE
*/
BEGIN insdel 21
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
ARG ndx u_int32_t lu
POINTER pagelsn DB_LSN * lu
DBT key DBT s
@@ -129,46 +99,26 @@ END
*/
BEGIN newpage 22
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG prev_pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCKNZ prev_pgno db_pgno_t lu
POINTER prevlsn DB_LSN * lu
-ARG new_pgno db_pgno_t lu
+WRLOCKNZ new_pgno db_pgno_t lu
POINTER pagelsn DB_LSN * lu
-ARG next_pgno db_pgno_t lu
+WRLOCKNZ next_pgno db_pgno_t lu
POINTER nextlsn DB_LSN * lu
END
/*
- * DEPRECATED in 3.0.
- * Superceded by metagroup which allocates a group of new pages.
- *
- * Splitting requires two types of log messages. The first logs the
- * meta-data of the split.
- *
- * For the meta-data split
- * bucket: max_bucket in table before split
- * ovflpoint: overflow point before split.
- * spares: spares[ovflpoint] before split.
- */
-DEPRECATED splitmeta 23
-ARG fileid int32_t ld
-ARG bucket u_int32_t lu
-ARG ovflpoint u_int32_t lu
-ARG spares u_int32_t lu
-POINTER metalsn DB_LSN * lu
-END
-
-/*
* Splitting requires two types of log messages. The second logs the
* data on the original page. To redo the split, we have to visit the
* new page (pages) and add the items back on the page if they are not
* yet there.
*/
BEGIN splitdata 24
-ARG fileid int32_t ld
+DB fileid int32_t ld
ARG opcode u_int32_t lu
-ARG pgno db_pgno_t lu
-DBT pageimage DBT s
+WRLOCK pgno db_pgno_t lu
+PGDBT pageimage DBT s
POINTER pagelsn DB_LSN * lu
END
@@ -185,8 +135,8 @@ END
* makedup - this was a replacement that made an item a duplicate.
*/
BEGIN replace 25
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
ARG ndx u_int32_t lu
POINTER pagelsn DB_LSN * lu
ARG off int32_t ld
@@ -196,52 +146,6 @@ ARG makedup u_int32_t lu
END
/*
- * DEPRECATED in 3.0.
- * Hash now uses the btree allocation and deletion page routines.
- *
- * HASH-newpgno: is used to record getting/deleting a new page number.
- * This doesn't require much data modification, just modifying the
- * meta-data.
- * pgno is the page being allocated/freed.
- * free_pgno is the next_pgno on the free list.
- * old_type was the type of a page being deallocated.
- * old_pgno was the next page number before the deallocation.
- */
-DEPRECATED newpgno 26
-ARG opcode u_int32_t lu
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-ARG free_pgno db_pgno_t lu
-ARG old_type u_int32_t lu
-ARG old_pgno db_pgno_t lu
-ARG new_type u_int32_t lu
-POINTER pagelsn DB_LSN * lu
-POINTER metalsn DB_LSN * lu
-END
-
-/*
- * DEPRECATED in 3.0.
- * Since we now pre-allocate the contiguous chunk of pages for a doubling,
- * there is no big benefit to pre-allocating a few extra pages. It used
- * to be that the file was only physically as large as the current bucket,
- * so if you were on a doubling of 16K, but were only on the first bucket
- * of that 16K, the file was much shorter than it would be at the end of
- * the doubling, so we didn't want to force overflow pages at the end of the
- * 16K pages. Since we now must allocate the 16K pages (because of sub
- * databases), it's not a big deal to tack extra pages on at the end.
- *
- * ovfl: initialize a set of overflow pages.
- */
-DEPRECATED ovfl 27
-ARG fileid int32_t ld
-ARG start_pgno db_pgno_t lu
-ARG npages u_int32_t lu
-ARG free_pgno db_pgno_t lu
-ARG ovflpoint u_int32_t lu
-POINTER metalsn DB_LSN * lu
-END
-
-/*
* Used when we empty the first page in a bucket and there are pages after
* it. The page after it gets copied into the bucket page (since bucket
* pages have to be in fixed locations).
@@ -252,33 +156,46 @@ END
* nnextlsn: the LSN of nnext_pgno.
*/
BEGIN copypage 28
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
+DB fileid int32_t ld
+WRLOCK pgno db_pgno_t lu
POINTER pagelsn DB_LSN * lu
-ARG next_pgno db_pgno_t lu
+WRLOCK next_pgno db_pgno_t lu
POINTER nextlsn DB_LSN * lu
-ARG nnext_pgno db_pgno_t lu
+WRLOCKNZ nnext_pgno db_pgno_t lu
POINTER nnextlsn DB_LSN * lu
-DBT page DBT s
+PGDBT page DBT s
END
/*
- * This replaces the old splitmeta operation. It behaves largely the same
- * way, but it has enough information so that we can record a group allocation
- * which we do now because of sub databases. The number of pages allocated is
- * always bucket + 1 pgno is the page number of the first newly allocated
- * bucket.
+ * This record logs the meta-data aspects of a split operation. It has enough
+ * information so that we can record both an individual page allocation as well
+ * as a group allocation which we do because in sub databases, the pages in
+ * a hash doubling, must be contiguous. If we do a group allocation, the
+ * number of pages allocated is bucket + 1, pgno is the page number of the
+ * first newly allocated bucket.
+ *
* bucket: Old maximum bucket number.
- * pgno: Page allocated to bucket + 1 (first newly allocated page)
+ * mmpgno: Master meta-data page number (0 if same as mpgno).
+ * mmetalsn: Lsn of the master meta-data page.
+ * mpgno: Meta-data page number.
* metalsn: Lsn of the meta-data page.
- * pagelsn: Lsn of the maximum page allocated.
+ * pgno: Page allocated to bucket + 1 (first newly allocated page)
+ * pagelsn: Lsn of either the first page allocated (if newalloc == 0) or
+ * the last page allocated (if newalloc == 1).
+ * newalloc: 1 indicates that this record did the actual allocation;
+ * 0 indicates that the pages were already allocated from a
+ * previous (failed) allocation.
*/
BEGIN metagroup 29
-ARG fileid int32_t ld
+DB fileid int32_t ld
ARG bucket u_int32_t lu
-ARG pgno db_pgno_t lu
+WRLOCK mmpgno db_pgno_t lu
+POINTER mmetalsn DB_LSN * lu
+WRLOCKNZ mpgno db_pgno_t lu
POINTER metalsn DB_LSN * lu
+WRLOCK pgno db_pgno_t lu
POINTER pagelsn DB_LSN * lu
+ARG newalloc u_int32_t lu
END
/*
@@ -293,28 +210,10 @@ END
* start_pgno: starting page number
* num: number of allocated pages
*/
-DEPRECATED groupalloc1 30
-ARG fileid int32_t ld
-ARG pgno db_pgno_t lu
-POINTER metalsn DB_LSN * lu
-POINTER mmetalsn DB_LSN * lu
-ARG start_pgno db_pgno_t lu
-ARG num u_int32_t lu
-END
-
-DEPRECATED groupalloc2 31
-ARG fileid int32_t ld
-POINTER meta_lsn DB_LSN * lu
-POINTER alloc_lsn DB_LSN * lu
-ARG start_pgno db_pgno_t lu
-ARG num u_int32_t lu
-ARG free db_pgno_t lu
-END
-
BEGIN groupalloc 32
-ARG fileid int32_t ld
+DB fileid int32_t ld
POINTER meta_lsn DB_LSN * lu
-ARG start_pgno db_pgno_t lu
+WRLOCK start_pgno db_pgno_t lu
ARG num u_int32_t lu
ARG free db_pgno_t lu
END
@@ -329,7 +228,7 @@ END
* dup_off - if a dup its offset
* add - 1 if add 0 if delete
* is_dup - 1 if dup 0 otherwise.
- * order - order assinged to this deleted record or dup.
+ * order - order assigned to this deleted record or dup.
*
* chgpg - rmoved a page, move the records to a new page
* mode - CHGPG page was deleted or records move to new page.
@@ -338,9 +237,15 @@ END
* old_pgno, new_pgno - old and new page numbers.
* old_index, new_index - old and new index numbers, NDX_INVALID if
* it effects all records on the page.
+ * For three opcodes new in 3.3 (DB_HAM_DELFIRSTPG, DELMIDPG,
+ * and DELLASTPG), we overload old_indx and new_indx to avoid
+ * needing a new log record type: old_indx stores the only
+ * indx of interest to these records, and new_indx stores the
+ * order that's assigned to the lowest deleted record we're
+ * moving.
*/
BEGIN curadj 33
-ARG fileid int32_t ld
+DB fileid int32_t ld
ARG pgno db_pgno_t lu
ARG indx u_int32_t lu
ARG len u_int32_t lu
@@ -351,7 +256,7 @@ ARG order u_int32_t lu
END
BEGIN chgpg 34
-ARG fileid int32_t ld
+DB fileid int32_t ld
ARG mode db_ham_mode ld
ARG old_pgno db_pgno_t lu
ARG new_pgno db_pgno_t lu
diff --git a/bdb/hash/hash_conv.c b/bdb/hash/hash_conv.c
index 30d17a6164d..a93e56a2ee4 100644
--- a/bdb/hash/hash_conv.c
+++ b/bdb/hash/hash_conv.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_conv.c,v 11.5 2000/03/31 00:30:32 ubell Exp $";
+static const char revid[] = "$Id: hash_conv.c,v 11.13 2002/08/06 05:34:35 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -15,20 +15,21 @@ static const char revid[] = "$Id: hash_conv.c,v 11.5 2000/03/31 00:30:32 ubell E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "hash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/hash.h"
/*
* __ham_pgin --
* Convert host-specific page layout from the host-independent format
* stored on disk.
*
- * PUBLIC: int __ham_pgin __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ * PUBLIC: int __ham_pgin __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
-__ham_pgin(dbenv, pg, pp, cookie)
+__ham_pgin(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
+ DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
@@ -45,16 +46,16 @@ __ham_pgin(dbenv, pg, pp, cookie)
* initialize the rest of the page and return.
*/
if (h->type != P_HASHMETA && h->pgno == PGNO_INVALID) {
- P_INIT(pp, pginfo->db_pagesize,
+ P_INIT(pp, (db_indx_t)pginfo->db_pagesize,
pg, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
return (0);
}
- if (!pginfo->needswap)
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
return (h->type == P_HASHMETA ? __ham_mswap(pp) :
- __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 1));
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 1));
}
/*
@@ -62,11 +63,12 @@ __ham_pgin(dbenv, pg, pp, cookie)
* Convert host-specific page layout to the host-independent format
* stored on disk.
*
- * PUBLIC: int __ham_pgout __P((DB_ENV *, db_pgno_t, void *, DBT *));
+ * PUBLIC: int __ham_pgout __P((DB_ENV *, DB *, db_pgno_t, void *, DBT *));
*/
int
-__ham_pgout(dbenv, pg, pp, cookie)
+__ham_pgout(dbenv, dummydbp, pg, pp, cookie)
DB_ENV *dbenv;
+ DB *dummydbp;
db_pgno_t pg;
void *pp;
DBT *cookie;
@@ -75,12 +77,12 @@ __ham_pgout(dbenv, pg, pp, cookie)
PAGE *h;
pginfo = (DB_PGINFO *)cookie->data;
- if (!pginfo->needswap)
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
return (h->type == P_HASHMETA ? __ham_mswap(pp) :
- __db_byteswap(dbenv, pg, pp, pginfo->db_pagesize, 0));
+ __db_byteswap(dbenv, dummydbp, pg, pp, pginfo->db_pagesize, 0));
}
/*
@@ -108,5 +110,7 @@ __ham_mswap(pg)
SWAP32(p); /* h_charkey */
for (i = 0; i < NCACHED; ++i)
SWAP32(p); /* spares */
+ p += 59 * sizeof(u_int32_t); /* unusued */
+ SWAP32(p); /* crypto_magic */
return (0);
}
diff --git a/bdb/hash/hash_dup.c b/bdb/hash/hash_dup.c
index f5fbf4f472f..ec70e519d54 100644
--- a/bdb/hash/hash_dup.c
+++ b/bdb/hash/hash_dup.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -38,20 +38,14 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_dup.c,v 11.49 2000/12/21 21:54:35 margo Exp $";
+static const char revid[] = "$Id: hash_dup.c,v 11.76 2002/08/06 05:34:40 bostic Exp $";
#endif /* not lint */
/*
* PACKAGE: hashing
*
* DESCRIPTION:
- * Manipulation of duplicates for the hash package.
- *
- * ROUTINES:
- *
- * External
- * __add_dup
- * Internal
+ * Manipulation of duplicates for the hash package.
*/
#ifndef NO_SYSTEM_INCLUDES
@@ -61,13 +55,15 @@ static const char revid[] = "$Id: hash_dup.c,v 11.49 2000/12/21 21:54:35 margo E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "hash.h"
-#include "btree.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/btree.h"
+static int __ham_c_chgpg __P((DBC *,
+ db_pgno_t, u_int32_t, db_pgno_t, u_int32_t));
static int __ham_check_move __P((DBC *, u_int32_t));
static int __ham_dcursor __P((DBC *, db_pgno_t, u_int32_t));
+static int __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
/*
* Called from hash_access to add a duplicate key. nval is the new
@@ -92,13 +88,15 @@ __ham_add_dup(dbc, nval, flags, pgnop)
db_pgno_t *pgnop;
{
DB *dbp;
- HASH_CURSOR *hcp;
DBT pval, tmp_val;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
u_int32_t add_bytes, new_size;
int cmp, ret;
u_int8_t *hk;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
DB_ASSERT(flags != DB_CURRENT);
@@ -117,12 +115,12 @@ __ham_add_dup(dbc, nval, flags, pgnop)
* hcp->dndx is the first free ndx or the index of the
* current pointer into the duplicate set.
*/
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
/* Add the len bytes to the current singleton. */
if (HPAGE_PTYPE(hk) != H_DUPLICATE)
add_bytes += DUP_SIZE(0);
new_size =
- LEN_HKEYDATA(hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)) +
+ LEN_HKEYDATA(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx)) +
add_bytes;
/*
@@ -132,7 +130,7 @@ __ham_add_dup(dbc, nval, flags, pgnop)
*/
if (HPAGE_PTYPE(hk) != H_OFFDUP &&
(HPAGE_PTYPE(hk) == H_OFFPAGE || ISBIG(hcp, new_size) ||
- add_bytes > P_FREESPACE(hcp->page))) {
+ add_bytes > P_FREESPACE(dbp, hcp->page))) {
if ((ret = __ham_dup_convert(dbc)) != 0)
return (ret);
@@ -145,14 +143,14 @@ __ham_add_dup(dbc, nval, flags, pgnop)
if (HPAGE_PTYPE(hk) != H_DUPLICATE) {
pval.flags = 0;
pval.data = HKEYDATA_DATA(hk);
- pval.size = LEN_HDATA(hcp->page, dbp->pgsize,
+ pval.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize,
hcp->indx);
if ((ret = __ham_make_dup(dbp->dbenv,
- &pval, &tmp_val, &dbc->rdata.data,
- &dbc->rdata.ulen)) != 0 || (ret =
+ &pval, &tmp_val, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen)) != 0 || (ret =
__ham_replpair(dbc, &tmp_val, 1)) != 0)
return (ret);
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
HPAGE_PTYPE(hk) = H_DUPLICATE;
/*
@@ -167,7 +165,7 @@ __ham_add_dup(dbc, nval, flags, pgnop)
/* Now make the new entry a duplicate. */
if ((ret = __ham_make_dup(dbp->dbenv, nval,
- &tmp_val, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ &tmp_val, &dbc->my_rdata.data, &dbc->my_rdata.ulen)) != 0)
return (ret);
tmp_val.dlen = 0;
@@ -176,13 +174,14 @@ __ham_add_dup(dbc, nval, flags, pgnop)
case DB_KEYLAST:
case DB_NODUPDATA:
if (dbp->dup_compare != NULL) {
- __ham_dsearch(dbc, nval, &tmp_val.doff, &cmp);
+ __ham_dsearch(dbc,
+ nval, &tmp_val.doff, &cmp, flags);
/* dup dups are not supported w/ sorted dups */
if (cmp == 0)
return (__db_duperr(dbp, flags));
} else {
- hcp->dup_tlen = LEN_HDATA(hcp->page,
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page,
dbp->pgsize, hcp->indx);
hcp->dup_len = nval->size;
F_SET(hcp, H_ISDUP);
@@ -203,8 +202,7 @@ __ham_add_dup(dbc, nval, flags, pgnop)
/* Add the duplicate. */
ret = __ham_replpair(dbc, &tmp_val, 0);
if (ret == 0)
- ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
-
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
if (ret != 0)
return (ret);
@@ -213,12 +211,12 @@ __ham_add_dup(dbc, nval, flags, pgnop)
case DB_AFTER:
hcp->dup_off += DUP_SIZE(hcp->dup_len);
hcp->dup_len = nval->size;
- hcp->dup_tlen += DUP_SIZE(nval->size);
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
break;
case DB_KEYFIRST:
case DB_KEYLAST:
case DB_BEFORE:
- hcp->dup_tlen += DUP_SIZE(nval->size);
+ hcp->dup_tlen += (db_indx_t)DUP_SIZE(nval->size);
hcp->dup_len = nval->size;
break;
}
@@ -230,8 +228,8 @@ __ham_add_dup(dbc, nval, flags, pgnop)
* If we get here, then we're on duplicate pages; set pgnop and
* return so the common code can handle it.
*/
- memcpy(pgnop,
- HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)), sizeof(db_pgno_t));
+ memcpy(pgnop, HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
+ sizeof(db_pgno_t));
return (ret);
}
@@ -245,19 +243,21 @@ int
__ham_dup_convert(dbc)
DBC *dbc;
{
+ BOVERFLOW bo;
DB *dbp;
DBC **hcs;
+ DBT dbt;
DB_LSN lsn;
- PAGE *dp;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
- BOVERFLOW bo;
- DBT dbt;
HOFFPAGE ho;
+ PAGE *dp;
db_indx_t i, len, off;
int c, ret, t_ret;
u_int8_t *p, *pend;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
/*
@@ -274,24 +274,24 @@ __ham_dup_convert(dbc)
*/
if ((ret = __ham_get_clist(dbp,
PGNO(hcp->page), (u_int32_t)hcp->indx, &hcs)) != 0)
- return (ret);
+ goto err;
/*
* Now put the duplicates onto the new page.
*/
dbt.flags = 0;
- switch (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))) {
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))) {
case H_KEYDATA:
/* Simple case, one key on page; move it to dup page. */
- dbt.size = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
- dbt.data = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ dbt.size = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
+ dbt.data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
ret = __db_pitem(dbc,
dp, 0, BKEYDATA_SIZE(dbt.size), NULL, &dbt);
goto finish;
case H_OFFPAGE:
/* Simple case, one key on page; move it to dup page. */
- memcpy(&ho,
- P_ENTRY(hcp->page, H_DATAINDEX(hcp->indx)), HOFFPAGE_SIZE);
+ memcpy(&ho, P_ENTRY(dbp, hcp->page, H_DATAINDEX(hcp->indx)),
+ HOFFPAGE_SIZE);
UMRW_SET(bo.unused1);
B_TSET(bo.type, ho.type, 0);
UMRW_SET(bo.unused2);
@@ -301,17 +301,15 @@ __ham_dup_convert(dbc)
dbt.data = &bo;
ret = __db_pitem(dbc, dp, 0, dbt.size, &dbt, NULL);
-
finish: if (ret == 0) {
- memp_fset(dbp->mpf, dp, DB_MPOOL_DIRTY);
- /*
- * Update any other cursors
- */
- if (hcs != NULL && DB_LOGGING(dbc)
- && IS_SUBTRANSACTION(dbc->txn)) {
- if ((ret = __ham_chgpg_log(dbp->dbenv,
- dbc->txn, &lsn, 0, dbp->log_fileid,
- DB_HAM_DUP, PGNO(hcp->page),
+ if ((ret = mpf->set(mpf, dp, DB_MPOOL_DIRTY)) != 0)
+ break;
+
+ /* Update any other cursors. */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
PGNO(dp), hcp->indx, 0)) != 0)
break;
}
@@ -319,14 +317,12 @@ finish: if (ret == 0) {
if ((ret = __ham_dcursor(hcs[c],
PGNO(dp), 0)) != 0)
break;
-
}
break;
-
case H_DUPLICATE:
- p = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ p = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
pend = p +
- LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
/*
* We need to maintain the duplicate cursor position.
@@ -344,39 +340,48 @@ finish: if (ret == 0) {
if ((ret = __db_pitem(dbc, dp,
i, BKEYDATA_SIZE(dbt.size), NULL, &dbt)) != 0)
break;
- /*
- * Update any other cursors
- */
+
+ /* Update any other cursors */
+ if (hcs != NULL && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
+ if ((ret = __ham_chgpg_log(dbp, dbc->txn,
+ &lsn, 0, DB_HAM_DUP, PGNO(hcp->page),
+ PGNO(dp), hcp->indx, i)) != 0)
+ break;
+ }
for (c = 0; hcs != NULL && hcs[c] != NULL; c++)
if (((HASH_CURSOR *)(hcs[c]->internal))->dup_off
== off && (ret = __ham_dcursor(hcs[c],
PGNO(dp), i)) != 0)
- goto out;
+ goto err;
off += len + 2 * sizeof(db_indx_t);
}
-out: break;
-
+ break;
default:
- ret = __db_pgfmt(dbp, (u_long)hcp->pgno);
+ ret = __db_pgfmt(dbp->dbenv, (u_long)hcp->pgno);
break;
}
- if (ret == 0) {
- /*
- * Now attach this to the source page in place of
- * the old duplicate item.
- */
- __ham_move_offpage(dbc, hcp->page,
+
+ /*
+ * Now attach this to the source page in place of the old duplicate
+ * item.
+ */
+ if (ret == 0)
+ ret = __ham_move_offpage(dbc, hcp->page,
(u_int32_t)H_DATAINDEX(hcp->indx), PGNO(dp));
- ret = memp_fset(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
- if ((t_ret = memp_fput(dbp->mpf, dp, DB_MPOOL_DIRTY)) != 0)
- ret = t_ret;
+err: if (ret == 0)
+ ret = mpf->set(mpf, hcp->page, DB_MPOOL_DIRTY);
+
+ if ((t_ret =
+ mpf->put(mpf, dp, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret == 0)
hcp->dup_tlen = hcp->dup_off = hcp->dup_len = 0;
- } else
- (void)__db_free(dbc, dp);
if (hcs != NULL)
- __os_free(hcs, 0);
+ __os_free(dbp->dbenv, hcs);
return (ret);
}
@@ -444,9 +449,10 @@ __ham_check_move(dbc, add_len)
u_int32_t add_len;
{
DB *dbp;
- HASH_CURSOR *hcp;
DBT k, d;
DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
PAGE *next_pagep;
db_pgno_t next_pgno;
u_int32_t new_datalen, old_len, rectype;
@@ -454,9 +460,10 @@ __ham_check_move(dbc, add_len)
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
/*
* If the item is already off page duplicates or an offpage item,
@@ -465,7 +472,7 @@ __ham_check_move(dbc, add_len)
if (HPAGE_PTYPE(hk) == H_OFFDUP || HPAGE_PTYPE(hk) == H_OFFPAGE)
return (0);
- old_len = LEN_HITEM(hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx));
+ old_len = LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_DATAINDEX(hcp->indx));
new_datalen = old_len - HKEYDATA_SIZE(0) + add_len;
if (HPAGE_PTYPE(hk) != H_DUPLICATE)
new_datalen += DUP_SIZE(0);
@@ -479,10 +486,10 @@ __ham_check_move(dbc, add_len)
* If neither of these is true, then we can return.
*/
if (ISBIG(hcp, new_datalen) && (old_len > HOFFDUP_SIZE ||
- HOFFDUP_SIZE - old_len <= P_FREESPACE(hcp->page)))
+ HOFFDUP_SIZE - old_len <= P_FREESPACE(dbp, hcp->page)))
return (0);
- if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(hcp->page))
+ if (!ISBIG(hcp, new_datalen) && add_len <= P_FREESPACE(dbp, hcp->page))
return (0);
/*
@@ -494,20 +501,20 @@ __ham_check_move(dbc, add_len)
new_datalen = ISBIG(hcp, new_datalen) ?
HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen);
- new_datalen += LEN_HITEM(hcp->page, dbp->pgsize, H_KEYINDEX(hcp->indx));
+ new_datalen += LEN_HITEM(dbp, hcp->page, dbp->pgsize, H_KEYINDEX(hcp->indx));
next_pagep = NULL;
for (next_pgno = NEXT_PGNO(hcp->page); next_pgno != PGNO_INVALID;
next_pgno = NEXT_PGNO(next_pagep)) {
if (next_pagep != NULL &&
- (ret = memp_fput(dbp->mpf, next_pagep, 0)) != 0)
+ (ret = mpf->put(mpf, next_pagep, 0)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf,
+ if ((ret = mpf->get(mpf,
&next_pgno, DB_MPOOL_CREATE, &next_pagep)) != 0)
return (ret);
- if (P_FREESPACE(next_pagep) >= new_datalen)
+ if (P_FREESPACE(dbp, next_pagep) >= new_datalen)
break;
}
@@ -517,58 +524,58 @@ __ham_check_move(dbc, add_len)
return (ret);
/* Add new page at the end of the chain. */
- if (P_FREESPACE(next_pagep) < new_datalen && (ret =
+ if (P_FREESPACE(dbp, next_pagep) < new_datalen && (ret =
__ham_add_ovflpage(dbc, next_pagep, 1, &next_pagep)) != 0) {
- (void)memp_fput(dbp->mpf, next_pagep, 0);
+ (void)mpf->put(mpf, next_pagep, 0);
return (ret);
}
/* Copy the item to the new page. */
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
rectype = PUTPAIR;
k.flags = 0;
d.flags = 0;
if (HPAGE_PTYPE(
- H_PAIRKEY(hcp->page, hcp->indx)) == H_OFFPAGE) {
+ H_PAIRKEY(dbp, hcp->page, hcp->indx)) == H_OFFPAGE) {
rectype |= PAIR_KEYMASK;
- k.data = H_PAIRKEY(hcp->page, hcp->indx);
+ k.data = H_PAIRKEY(dbp, hcp->page, hcp->indx);
k.size = HOFFPAGE_SIZE;
} else {
k.data =
- HKEYDATA_DATA(H_PAIRKEY(hcp->page, hcp->indx));
- k.size = LEN_HKEY(hcp->page, dbp->pgsize, hcp->indx);
+ HKEYDATA_DATA(H_PAIRKEY(dbp, hcp->page, hcp->indx));
+ k.size =
+ LEN_HKEY(dbp, hcp->page, dbp->pgsize, hcp->indx);
}
if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
rectype |= PAIR_DATAMASK;
- d.data = H_PAIRDATA(hcp->page, hcp->indx);
+ d.data = H_PAIRDATA(dbp, hcp->page, hcp->indx);
d.size = HOFFPAGE_SIZE;
} else {
- if (HPAGE_PTYPE(H_PAIRDATA(hcp->page, hcp->indx))
+ if (HPAGE_PTYPE(H_PAIRDATA(dbp, hcp->page, hcp->indx))
== H_DUPLICATE)
rectype |= PAIR_DUPMASK;
d.data =
- HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
- d.size = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
+ d.size = LEN_HDATA(dbp, hcp->page,
+ dbp->pgsize, hcp->indx);
}
- if ((ret = __ham_insdel_log(dbp->dbenv,
- dbc->txn, &new_lsn, 0, rectype,
- dbp->log_fileid, PGNO(next_pagep),
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, rectype, PGNO(next_pagep),
(u_int32_t)NUM_ENT(next_pagep), &LSN(next_pagep),
&k, &d)) != 0) {
- (void)memp_fput(dbp->mpf, next_pagep, 0);
+ (void)mpf->put(mpf, next_pagep, 0);
return (ret);
}
+ } else
+ LSN_NOT_LOGGED(new_lsn);
- /* Move lsn onto page. */
- LSN(next_pagep) = new_lsn; /* Structure assignment. */
- }
+ /* Move lsn onto page. */
+ LSN(next_pagep) = new_lsn; /* Structure assignment. */
- __ham_copy_item(dbp->pgsize,
- hcp->page, H_KEYINDEX(hcp->indx), next_pagep);
- __ham_copy_item(dbp->pgsize,
- hcp->page, H_DATAINDEX(hcp->indx), next_pagep);
+ __ham_copy_item(dbp, hcp->page, H_KEYINDEX(hcp->indx), next_pagep);
+ __ham_copy_item(dbp, hcp->page, H_DATAINDEX(hcp->indx), next_pagep);
/*
* We've just manually inserted a key and set of data onto
@@ -581,7 +588,7 @@ __ham_check_move(dbc, add_len)
* Note that __ham_del_pair should dirty the page we're moving
* the items from, so we need only dirty the new page ourselves.
*/
- if ((ret = memp_fset(dbp->mpf, next_pagep, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->set(mpf, next_pagep, DB_MPOOL_DIRTY)) != 0)
goto out;
/* Update all cursors that used to point to this item. */
@@ -596,12 +603,17 @@ __ham_check_move(dbc, add_len)
* __ham_del_pair decremented nelem. This is incorrect; we
* manually copied the element elsewhere, so the total number
* of elements hasn't changed. Increment it again.
+ *
+ * !!!
+ * Note that we still have the metadata page pinned, and
+ * __ham_del_pair dirtied it, so we don't need to set the dirty
+ * flag again.
*/
if (!STD_LOCKING(dbc))
hcp->hdr->nelem++;
out:
- (void)memp_fput(dbp->mpf, hcp->page, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, hcp->page, DB_MPOOL_DIRTY);
hcp->page = next_pagep;
hcp->pgno = PGNO(hcp->page);
hcp->indx = NUM_ENT(hcp->page) - 2;
@@ -620,9 +632,8 @@ out:
* This is really just a special case of __onpage_replace; we should
* probably combine them.
*
- * PUBLIC: void __ham_move_offpage __P((DBC *, PAGE *, u_int32_t, db_pgno_t));
*/
-void
+static int
__ham_move_offpage(dbc, pagep, ndx, pgno)
DBC *dbc;
PAGE *pagep;
@@ -630,48 +641,51 @@ __ham_move_offpage(dbc, pagep, ndx, pgno)
db_pgno_t pgno;
{
DB *dbp;
- HASH_CURSOR *hcp;
DBT new_dbt;
DBT old_dbt;
HOFFDUP od;
- db_indx_t i;
+ db_indx_t i, *inp;
int32_t shrink;
u_int8_t *src;
+ int ret;
dbp = dbc->dbp;
- hcp = (HASH_CURSOR *)dbc->internal;
od.type = H_OFFDUP;
UMRW_SET(od.unused[0]);
UMRW_SET(od.unused[1]);
UMRW_SET(od.unused[2]);
od.pgno = pgno;
+ ret = 0;
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
new_dbt.data = &od;
new_dbt.size = HOFFDUP_SIZE;
- old_dbt.data = P_ENTRY(pagep, ndx);
- old_dbt.size = LEN_HITEM(pagep, dbp->pgsize, ndx);
- (void)__ham_replace_log(dbp->dbenv,
- dbc->txn, &LSN(pagep), 0, dbp->log_fileid,
+ old_dbt.data = P_ENTRY(dbp, pagep, ndx);
+ old_dbt.size = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx);
+ if ((ret = __ham_replace_log(dbp, dbc->txn, &LSN(pagep), 0,
PGNO(pagep), (u_int32_t)ndx, &LSN(pagep), -1,
- &old_dbt, &new_dbt, 0);
- }
+ &old_dbt, &new_dbt, 0)) != 0)
+ return (ret);
+ } else
+ LSN_NOT_LOGGED(LSN(pagep));
- shrink = LEN_HITEM(pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE;
+ shrink = LEN_HITEM(dbp, pagep, dbp->pgsize, ndx) - HOFFDUP_SIZE;
+ inp = P_INP(dbp, pagep);
if (shrink != 0) {
/* Copy data. */
src = (u_int8_t *)(pagep) + HOFFSET(pagep);
- memmove(src + shrink, src, pagep->inp[ndx] - HOFFSET(pagep));
+ memmove(src + shrink, src, inp[ndx] - HOFFSET(pagep));
HOFFSET(pagep) += shrink;
/* Update index table. */
for (i = ndx; i < NUM_ENT(pagep); i++)
- pagep->inp[i] += shrink;
+ inp[i] += shrink;
}
/* Now copy the offdup entry onto the page. */
- memcpy(P_ENTRY(pagep, ndx), &od, HOFFDUP_SIZE);
+ memcpy(P_ENTRY(dbp, pagep, ndx), &od, HOFFDUP_SIZE);
+ return (ret);
}
/*
@@ -679,13 +693,14 @@ __ham_move_offpage(dbc, pagep, ndx, pgno)
* Locate a particular duplicate in a duplicate set. Make sure that
* we exit with the cursor set appropriately.
*
- * PUBLIC: void __ham_dsearch __P((DBC *, DBT *, u_int32_t *, int *));
+ * PUBLIC: void __ham_dsearch
+ * PUBLIC: __P((DBC *, DBT *, u_int32_t *, int *, u_int32_t));
*/
void
-__ham_dsearch(dbc, dbt, offp, cmpp)
+__ham_dsearch(dbc, dbt, offp, cmpp, flags)
DBC *dbc;
DBT *dbt;
- u_int32_t *offp;
+ u_int32_t *offp, flags;
int *cmpp;
{
DB *dbp;
@@ -697,25 +712,36 @@ __ham_dsearch(dbc, dbt, offp, cmpp)
dbp = dbc->dbp;
hcp = (HASH_CURSOR *)dbc->internal;
- if (dbp->dup_compare == NULL)
- func = __bam_defcmp;
- else
- func = dbp->dup_compare;
+ func = dbp->dup_compare == NULL ? __bam_defcmp : dbp->dup_compare;
i = F_ISSET(hcp, H_CONTINUE) ? hcp->dup_off: 0;
- data = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx)) + i;
- hcp->dup_tlen = LEN_HDATA(hcp->page, dbp->pgsize, hcp->indx);
+ data = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) + i;
+ hcp->dup_tlen = LEN_HDATA(dbp, hcp->page, dbp->pgsize, hcp->indx);
while (i < hcp->dup_tlen) {
memcpy(&len, data, sizeof(db_indx_t));
data += sizeof(db_indx_t);
cur.data = data;
cur.size = (u_int32_t)len;
+
+ /*
+ * If we find an exact match, we're done. If in a sorted
+ * duplicate set and the item is larger than our test item,
+ * we're done. In the latter case, if permitting partial
+ * matches, it's not a failure.
+ */
*cmpp = func(dbp, dbt, &cur);
- if (*cmpp == 0 || (*cmpp < 0 && dbp->dup_compare != NULL))
+ if (*cmpp == 0)
+ break;
+ if (*cmpp < 0 && dbp->dup_compare != NULL) {
+ if (flags == DB_GET_BOTH_RANGE)
+ *cmpp = 0;
break;
+ }
+
i += len + 2 * sizeof(db_indx_t);
data += len + sizeof(db_indx_t);
}
+
*offp = i;
hcp->dup_off = i;
hcp->dup_len = len;
@@ -727,29 +753,22 @@ __ham_dsearch(dbc, dbt, offp, cmpp)
* __ham_cprint --
* Display the current cursor list.
*
- * PUBLIC: int __ham_cprint __P((DB *));
+ * PUBLIC: void __ham_cprint __P((DBC *));
*/
-int
-__ham_cprint(dbp)
- DB *dbp;
+void
+__ham_cprint(dbc)
+ DBC *dbc;
{
HASH_CURSOR *cp;
- DBC *dbc;
- MUTEX_THREAD_LOCK(dbp->dbenv, dbp->mutexp);
- for (dbc = TAILQ_FIRST(&dbp->active_queue);
- dbc != NULL; dbc = TAILQ_NEXT(dbc, links)) {
- cp = (HASH_CURSOR *)dbc->internal;
- fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu",
- P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno,
- (u_long)cp->indx);
- if (F_ISSET(cp, H_DELETED))
- fprintf(stderr, " (deleted)");
- fprintf(stderr, "\n");
- }
- MUTEX_THREAD_UNLOCK(dbp->dbenv, dbp->mutexp);
+ cp = (HASH_CURSOR *)dbc->internal;
- return (0);
+ fprintf(stderr, "%#0lx->%#0lx: page: %lu index: %lu",
+ P_TO_ULONG(dbc), P_TO_ULONG(cp), (u_long)cp->pgno,
+ (u_long)cp->indx);
+ if (F_ISSET(cp, H_DELETED))
+ fprintf(stderr, " (deleted)");
+ fprintf(stderr, "\n");
}
#endif /* DEBUG */
@@ -765,17 +784,17 @@ __ham_dcursor(dbc, pgno, indx)
u_int32_t indx;
{
DB *dbp;
- DBC *dbc_nopd;
HASH_CURSOR *hcp;
BTREE_CURSOR *dcp;
int ret;
dbp = dbc->dbp;
+ hcp = (HASH_CURSOR *)dbc->internal;
- if ((ret = __db_c_newopd(dbc, pgno, &dbc_nopd)) != 0)
+ if ((ret = __db_c_newopd(dbc, pgno, hcp->opd, &hcp->opd)) != 0)
return (ret);
- dcp = (BTREE_CURSOR *)dbc_nopd->internal;
+ dcp = (BTREE_CURSOR *)hcp->opd->internal;
dcp->pgno = pgno;
dcp->indx = indx;
@@ -792,14 +811,81 @@ __ham_dcursor(dbc, pgno, indx)
* Transfer the deleted flag from the top-level cursor to the
* created one.
*/
- hcp = (HASH_CURSOR *)dbc->internal;
if (F_ISSET(hcp, H_DELETED)) {
F_SET(dcp, C_DELETED);
F_CLR(hcp, H_DELETED);
}
- /* Stack the cursors and reset the initial cursor's index. */
- hcp->opd = dbc_nopd;
+ return (0);
+}
+
+/*
+ * __ham_c_chgpg --
+ * Adjust the cursors after moving an item to a new page. We only
+ * move cursors that are pointing at this one item and are not
+ * deleted; since we only touch non-deleted cursors, and since
+ * (by definition) no item existed at the pgno/indx we're moving the
+ * item to, we're guaranteed that all the cursors we affect here or
+ * on abort really do refer to this one item.
+ */
+static int
+__ham_c_chgpg(dbc, old_pgno, old_index, new_pgno, new_index)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t old_index, new_index;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+ /*
+ * If a cursor is deleted, it doesn't refer to this
+ * item--it just happens to have the same indx, but
+ * it points to a former neighbor. Don't move it.
+ */
+ if (F_ISSET(hcp, H_DELETED))
+ continue;
+
+ if (hcp->pgno == old_pgno) {
+ if (hcp->indx == old_index) {
+ hcp->pgno = new_pgno;
+ hcp->indx = new_index;
+ } else
+ continue;
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, DB_HAM_CHGPG,
+ old_pgno, new_pgno, old_index, new_index)) != 0)
+ return (ret);
+ }
return (0);
}
diff --git a/bdb/hash/hash_func.c b/bdb/hash/hash_func.c
index 22b4f08ee70..c6cc2ad4460 100644
--- a/bdb/hash/hash_func.c
+++ b/bdb/hash/hash_func.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_func.c,v 11.7 2000/08/16 18:26:19 ubell Exp $";
+static const char revid[] = "$Id: hash_func.c,v 11.12 2002/03/28 19:49:42 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -51,8 +51,6 @@ static const char revid[] = "$Id: hash_func.c,v 11.7 2000/08/16 18:26:19 ubell E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "hash.h"
/*
* __ham_func2 --
@@ -230,6 +228,11 @@ __ham_func5(dbp, key, len)
return (h);
}
+/*
+ * __ham_test --
+ *
+ * PUBLIC: u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+ */
u_int32_t
__ham_test(dbp, key, len)
DB *dbp;
diff --git a/bdb/hash/hash_meta.c b/bdb/hash/hash_meta.c
index d96a6db3207..9f224454869 100644
--- a/bdb/hash/hash_meta.c
+++ b/bdb/hash/hash_meta.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_meta.c,v 11.10 2000/12/21 21:54:35 margo Exp $";
+static const char revid[] = "$Id: hash_meta.c,v 11.19 2002/06/03 14:22:15 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,11 +16,10 @@ static const char revid[] = "$Id: hash_meta.c,v 11.10 2000/12/21 21:54:35 margo
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "hash.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
/*
* Acquire the meta-data page.
@@ -31,30 +30,32 @@ int
__ham_get_meta(dbc)
DBC *dbc;
{
- HASH_CURSOR *hcp;
- HASH *hashp;
DB *dbp;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ HASH *hashp;
+ HASH_CURSOR *hcp;
int ret;
- hcp = (HASH_CURSOR *)dbc->internal;
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
hashp = dbp->h_internal;
+ hcp = (HASH_CURSOR *)dbc->internal;
- if (dbp->dbenv != NULL &&
- STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER)) {
+ if (dbenv != NULL &&
+ STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
dbc->lock.pgno = hashp->meta_pgno;
- if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
&dbc->lock_dbt, DB_LOCK_READ, &hcp->hlock)) != 0)
return (ret);
}
- if ((ret = memp_fget(dbc->dbp->mpf,
+ if ((ret = mpf->get(mpf,
&hashp->meta_pgno, DB_MPOOL_CREATE, &(hcp->hdr))) != 0 &&
- hcp->hlock.off != LOCK_INVALID) {
- (void)lock_put(dbc->dbp->dbenv, &hcp->hlock);
- hcp->hlock.off = LOCK_INVALID;
- }
+ LOCK_ISSET(hcp->hlock))
+ (void)dbenv->lock_put(dbenv, &hcp->hlock);
return (ret);
}
@@ -68,18 +69,19 @@ int
__ham_release_meta(dbc)
DBC *dbc;
{
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
+ mpf = dbc->dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
if (hcp->hdr)
- (void)memp_fput(dbc->dbp->mpf, hcp->hdr,
+ (void)mpf->put(mpf, hcp->hdr,
F_ISSET(hcp, H_DIRTY) ? DB_MPOOL_DIRTY : 0);
hcp->hdr = NULL;
- if (!F_ISSET(dbc, DBC_RECOVER) &&
- dbc->txn == NULL && hcp->hlock.off != LOCK_INVALID)
- (void)lock_put(dbc->dbp->dbenv, &hcp->hlock);
- hcp->hlock.off = LOCK_INVALID;
+ if (!F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE) &&
+ dbc->txn == NULL && LOCK_ISSET(hcp->hlock))
+ (void)dbc->dbp->dbenv->lock_put(dbc->dbp->dbenv, &hcp->hlock);
F_CLR(hcp, H_DIRTY);
return (0);
@@ -95,6 +97,7 @@ __ham_dirty_meta(dbc)
DBC *dbc;
{
DB *dbp;
+ DB_ENV *dbenv;
DB_LOCK _tmp;
HASH *hashp;
HASH_CURSOR *hcp;
@@ -105,12 +108,13 @@ __ham_dirty_meta(dbc)
hcp = (HASH_CURSOR *)dbc->internal;
ret = 0;
- if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER)) {
+ if (STD_LOCKING(dbc) && !F_ISSET(dbc, DBC_RECOVER | DBC_COMPENSATE)) {
+ dbenv = dbp->dbenv;
dbc->lock.pgno = hashp->meta_pgno;
- if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
DB_NONBLOCK(dbc) ? DB_LOCK_NOWAIT : 0,
&dbc->lock_dbt, DB_LOCK_WRITE, &_tmp)) == 0) {
- ret = lock_put(dbp->dbenv, &hcp->hlock);
+ ret = dbenv->lock_put(dbenv, &hcp->hlock);
hcp->hlock = _tmp;
}
}
diff --git a/bdb/hash/hash_method.c b/bdb/hash/hash_method.c
index f8239993dc5..9a6bf59536a 100644
--- a/bdb/hash/hash_method.c
+++ b/bdb/hash/hash_method.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_method.c,v 11.7 2000/07/04 18:28:23 bostic Exp $";
+static const char revid[] = "$Id: hash_method.c,v 11.12 2002/03/27 04:32:12 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,8 +16,8 @@ static const char revid[] = "$Id: hash_method.c,v 11.7 2000/07/04 18:28:23 bosti
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "hash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
static int __ham_set_h_ffactor __P((DB *, u_int32_t));
static int __ham_set_h_hash
@@ -38,7 +38,7 @@ __ham_db_create(dbp)
int ret;
if ((ret = __os_malloc(dbp->dbenv,
- sizeof(HASH), NULL, &dbp->h_internal)) != 0)
+ sizeof(HASH), &dbp->h_internal)) != 0)
return (ret);
hashp = dbp->h_internal;
@@ -63,7 +63,7 @@ __ham_db_close(dbp)
{
if (dbp->h_internal == NULL)
return (0);
- __os_free(dbp->h_internal, sizeof(HASH));
+ __os_free(dbp->dbenv, dbp->h_internal);
dbp->h_internal = NULL;
return (0);
}
diff --git a/bdb/hash/hash_open.c b/bdb/hash/hash_open.c
new file mode 100644
index 00000000000..041a1df1e7b
--- /dev/null
+++ b/bdb/hash/hash_open.c
@@ -0,0 +1,558 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * Margo Seltzer. All rights reserved.
+ */
+/*
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hash_open.c,v 11.175 2002/09/04 19:06:44 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+
+static db_pgno_t __ham_init_meta __P((DB *, HMETA *, db_pgno_t, DB_LSN *));
+
+/*
+ * __ham_open --
+ *
+ * PUBLIC: int __ham_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char * name, db_pgno_t, u_int32_t));
+ */
+int
+__ham_open(dbp, txn, name, base_pgno, flags)
+ DB *dbp;
+ DB_TXN *txn;
+ const char *name;
+ db_pgno_t base_pgno;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DBC *dbc;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
+ HASH *hashp;
+ int ret, t_ret;
+
+ COMPQUIET(name, NULL);
+ dbenv = dbp->dbenv;
+ dbc = NULL;
+ mpf = dbp->mpf;
+
+ /* Initialize the remaining fields/methods of the DB. */
+ dbp->stat = __ham_stat;
+
+ /*
+ * Get a cursor. If DB_CREATE is specified, we may be creating
+ * pages, and to do that safely in CDB we need a write cursor.
+ * In STD_LOCKING mode, we'll synchronize using the meta page
+ * lock instead.
+ */
+ if ((ret = dbp->cursor(dbp,
+ txn, &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ?
+ DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ hcp = (HASH_CURSOR *)dbc->internal;
+ hashp = dbp->h_internal;
+ hashp->meta_pgno = base_pgno;
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err1;
+
+ /* Initialize the hdr structure. */
+ if (hcp->hdr->dbmeta.magic == DB_HASHMAGIC) {
+ /* File exists, verify the data in the header. */
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = hcp->hdr->dbmeta.version < 5
+ ? __ham_func4 : __ham_func5;
+ if (!F_ISSET(dbp, DB_AM_RDONLY) && !IS_RECOVERING(dbenv) &&
+ hashp->h_hash(dbp,
+ CHARKEY, sizeof(CHARKEY)) != hcp->hdr->h_charkey) {
+ __db_err(dbp->dbenv,
+ "hash: incompatible hash function");
+ ret = EINVAL;
+ goto err2;
+ }
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_DUPSORT))
+ F_SET(dbp, DB_AM_DUPSORT);
+ if (F_ISSET(&hcp->hdr->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+
+ /* We must initialize last_pgno, it could be stale. */
+ if (!F_ISSET(dbp, DB_AM_RDONLY) &&
+ dbp->meta_pgno == PGNO_BASE_MD) {
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ goto err2;
+ mpf->last_pgno(mpf, &hcp->hdr->dbmeta.last_pgno);
+ }
+ } else if (!IS_RECOVERING(dbenv) && !F_ISSET(dbp, DB_AM_RECOVER))
+ DB_ASSERT(0);
+
+err2: /* Release the meta data page */
+ if ((t_ret = __ham_release_meta(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+err1: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __ham_metachk --
+ *
+ * PUBLIC: int __ham_metachk __P((DB *, const char *, HMETA *));
+ */
+int
+__ham_metachk(dbp, name, hashm)
+ DB *dbp;
+ const char *name;
+ HMETA *hashm;
+{
+ DB_ENV *dbenv;
+ u_int32_t vers;
+ int ret;
+
+ dbenv = dbp->dbenv;
+
+ /*
+ * At this point, all we know is that the magic number is for a Hash.
+ * Check the version, the database may be out of date.
+ */
+ vers = hashm->dbmeta.version;
+ if (F_ISSET(dbp, DB_AM_SWAP))
+ M_32_SWAP(vers);
+ switch (vers) {
+ case 4:
+ case 5:
+ case 6:
+ __db_err(dbenv,
+ "%s: hash version %lu requires a version upgrade",
+ name, (u_long)vers);
+ return (DB_OLD_VERSION);
+ case 7:
+ case 8:
+ break;
+ default:
+ __db_err(dbenv,
+ "%s: unsupported hash version: %lu", name, (u_long)vers);
+ return (EINVAL);
+ }
+
+ /* Swap the page if we need to. */
+ if (F_ISSET(dbp, DB_AM_SWAP) && (ret = __ham_mswap((PAGE *)hashm)) != 0)
+ return (ret);
+
+ /* Check the type. */
+ if (dbp->type != DB_HASH && dbp->type != DB_UNKNOWN)
+ return (EINVAL);
+ dbp->type = DB_HASH;
+ DB_ILLEGAL_METHOD(dbp, DB_OK_HASH);
+
+ /*
+ * Check application info against metadata info, and set info, flags,
+ * and type based on metadata info.
+ */
+ if ((ret = __db_fchk(dbenv,
+ "DB->open", hashm->dbmeta.flags,
+ DB_HASH_DUP | DB_HASH_SUBDB | DB_HASH_DUPSORT)) != 0)
+ return (ret);
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUP))
+ F_SET(dbp, DB_AM_DUP);
+ else
+ if (F_ISSET(dbp, DB_AM_DUP)) {
+ __db_err(dbenv,
+ "%s: DB_DUP specified to open method but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_SUBDB))
+ F_SET(dbp, DB_AM_SUBDB);
+ else
+ if (F_ISSET(dbp, DB_AM_SUBDB)) {
+ __db_err(dbenv,
+ "%s: multiple databases specified but not supported in file",
+ name);
+ return (EINVAL);
+ }
+
+ if (F_ISSET(&hashm->dbmeta, DB_HASH_DUPSORT)) {
+ if (dbp->dup_compare == NULL)
+ dbp->dup_compare = __bam_defcmp;
+ } else
+ if (dbp->dup_compare != NULL) {
+ __db_err(dbenv,
+ "%s: duplicate sort function specified but not set in database",
+ name);
+ return (EINVAL);
+ }
+
+ /* Set the page size. */
+ dbp->pgsize = hashm->dbmeta.pagesize;
+
+ /* Copy the file's ID. */
+ memcpy(dbp->fileid, hashm->dbmeta.uid, DB_FILE_ID_LEN);
+
+ return (0);
+}
+
+/*
+ * __ham_init_meta --
+ *
+ * Initialize a hash meta-data page. We assume that the meta-data page is
+ * contiguous with the initial buckets that we create. If that turns out
+ * to be false, we'll fix it up later. Return the initial number of buckets
+ * allocated.
+ */
+static db_pgno_t
+__ham_init_meta(dbp, meta, pgno, lsnp)
+ DB *dbp;
+ HMETA *meta;
+ db_pgno_t pgno;
+ DB_LSN *lsnp;
+{
+ HASH *hashp;
+ db_pgno_t nbuckets;
+ int i;
+ int32_t l2;
+
+ hashp = dbp->h_internal;
+ if (hashp->h_hash == NULL)
+ hashp->h_hash = DB_HASHVERSION < 5 ? __ham_func4 : __ham_func5;
+
+ if (hashp->h_nelem != 0 && hashp->h_ffactor != 0) {
+ hashp->h_nelem = (hashp->h_nelem - 1) / hashp->h_ffactor + 1;
+ l2 = __db_log2(hashp->h_nelem > 2 ? hashp->h_nelem : 2);
+ } else
+ l2 = 1;
+ nbuckets = (db_pgno_t)(1 << l2);
+
+ memset(meta, 0, sizeof(HMETA));
+ meta->dbmeta.lsn = *lsnp;
+ meta->dbmeta.pgno = pgno;
+ meta->dbmeta.magic = DB_HASHMAGIC;
+ meta->dbmeta.version = DB_HASHVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_HASHMETA;
+ meta->dbmeta.free = PGNO_INVALID;
+ meta->dbmeta.last_pgno = pgno;
+ meta->max_bucket = nbuckets - 1;
+ meta->high_mask = nbuckets - 1;
+ meta->low_mask = (nbuckets >> 1) - 1;
+ meta->ffactor = hashp->h_ffactor;
+ meta->h_charkey = hashp->h_hash(dbp, CHARKEY, sizeof(CHARKEY));
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ if (F_ISSET(dbp, DB_AM_DUP))
+ F_SET(&meta->dbmeta, DB_HASH_DUP);
+ if (F_ISSET(dbp, DB_AM_SUBDB))
+ F_SET(&meta->dbmeta, DB_HASH_SUBDB);
+ if (dbp->dup_compare != NULL)
+ F_SET(&meta->dbmeta, DB_HASH_DUPSORT);
+
+ /*
+ * Create the first and second buckets pages so that we have the
+ * page numbers for them and we can store that page number in the
+ * meta-data header (spares[0]).
+ */
+ meta->spares[0] = pgno + 1;
+
+ /* Fill in the last fields of the meta data page. */
+ for (i = 1; i <= l2; i++)
+ meta->spares[i] = meta->spares[0];
+ for (; i < NCACHED; i++)
+ meta->spares[i] = PGNO_INVALID;
+
+ return (nbuckets);
+}
+
+/*
+ * __ham_new_file --
+ * Create the necessary pages to begin a new database file. If name
+ * is NULL, then this is an unnamed file, the mpf has been set in the dbp
+ * and we simply create the pages using mpool. In this case, we don't log
+ * because we never have to redo an unnamed create and the undo simply
+ * frees resources.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __ham_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__ham_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ HMETA *meta;
+ PAGE *page;
+ int ret;
+ db_pgno_t lpgno;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ meta = NULL;
+ page = NULL;
+ memset(&pdbt, 0, sizeof(pdbt));
+
+ /* Build meta-data page. */
+ if (name == NULL) {
+ lpgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.type = dbp->type;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (HMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ LSN_NOT_LOGGED(lsn);
+ lpgno = __ham_init_meta(dbp, meta, PGNO_BASE_MD, &lsn);
+ meta->dbmeta.last_pgno = lpgno;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+ /* Now allocate the final hash bucket. */
+ if (name == NULL) {
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &page)) != 0)
+ goto err;
+ } else {
+#ifdef DIAGNOSTIC
+ memset(buf, dbp->pgsize, 0);
+#endif
+ page = (PAGE *)buf;
+ }
+
+ P_INIT(page, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN_NOT_LOGGED(page->lsn);
+
+ if (name == NULL)
+ ret = mpf->put(mpf, page, DB_MPOOL_DIRTY);
+ else {
+ if ((ret = __db_pgout(dbenv, lpgno, buf, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv, txn, name,
+ DB_APP_DATA, fhp, lpgno * dbp->pgsize, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ page = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else {
+ if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ if (page != NULL)
+ (void)mpf->put(mpf, page, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __ham_new_subdb --
+ * Create the necessary pages to begin a new subdatabase.
+ *
+ * PUBLIC: int __ham_new_subdb __P((DB *, DB *, DB_TXN *));
+ */
+int
+__ham_new_subdb(mdbp, dbp, txn)
+ DB *mdbp, *dbp;
+ DB_TXN *txn;
+{
+ DBC *dbc;
+ DB_ENV *dbenv;
+ DB_LOCK metalock, mmlock;
+ DB_LSN lsn;
+ DB_MPOOLFILE *mpf;
+ DBMETA *mmeta;
+ HMETA *meta;
+ PAGE *h;
+ int i, ret, t_ret;
+ db_pgno_t lpgno, mpgno;
+
+ dbenv = mdbp->dbenv;
+ mpf = mdbp->mpf;
+ dbc = NULL;
+ meta = NULL;
+ mmeta = NULL;
+ LOCK_INIT(metalock);
+ LOCK_INIT(mmlock);
+
+ if ((ret = mdbp->cursor(mdbp, txn,
+ &dbc, CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0)) != 0)
+ return (ret);
+
+ /* Get and lock the new meta data page. */
+ if ((ret = __db_lget(dbc,
+ 0, dbp->meta_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &dbp->meta_pgno, DB_MPOOL_CREATE, &meta)) != 0)
+ goto err;
+
+ /* Initialize the new meta-data page. */
+ lsn = meta->dbmeta.lsn;
+ lpgno = __ham_init_meta(dbp, meta, dbp->meta_pgno, &lsn);
+
+ /*
+ * We are about to allocate a set of contiguous buckets (lpgno
+ * worth). We need to get the master meta-data page to figure
+ * out where these pages are and to allocate them. So, lock and
+ * get the master meta data page.
+ */
+ mpgno = PGNO_BASE_MD;
+ if ((ret = __db_lget(dbc, 0, mpgno, DB_LOCK_WRITE, 0, &mmlock)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf, &mpgno, 0, &mmeta)) != 0)
+ goto err;
+
+ /*
+ * Now update the hash meta-data page to reflect where the first
+ * set of buckets are actually located.
+ */
+ meta->spares[0] = mmeta->last_pgno + 1;
+ for (i = 0; i < NCACHED && meta->spares[i] != PGNO_INVALID; i++)
+ meta->spares[i] = meta->spares[0];
+
+ /* The new meta data page is now complete; log it. */
+ if ((ret = __db_log_page(mdbp,
+ txn, &meta->dbmeta.lsn, dbp->meta_pgno, (PAGE *)meta)) != 0)
+ goto err;
+
+ /* Reflect the group allocation. */
+ if (DBENV_LOGGING(dbenv))
+ if ((ret = __ham_groupalloc_log(mdbp, txn,
+ &LSN(mmeta), 0, &LSN(mmeta),
+ meta->spares[0], meta->max_bucket + 1, mmeta->free)) != 0)
+ goto err;
+
+ /* Release the new meta-data page. */
+ if ((ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ meta = NULL;
+
+ mmeta->last_pgno +=lpgno;
+ lpgno = mmeta->last_pgno;
+
+ /* Now allocate the final hash bucket. */
+ if ((ret = mpf->get(mpf, &lpgno, DB_MPOOL_CREATE, &h)) != 0)
+ goto err;
+ P_INIT(h, dbp->pgsize, lpgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ LSN(h) = LSN(mmeta);
+ if ((ret = mpf->put(mpf, h, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+
+ /* Now put the master-metadata page back. */
+ if ((ret = mpf->put(mpf, mmeta, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ mmeta = NULL;
+
+err:
+ if (mmeta != NULL)
+ if ((t_ret = mpf->put(mpf, mmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(mmlock))
+ if ((t_ret = __LPUT(dbc, mmlock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (meta != NULL)
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (LOCK_ISSET(metalock))
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL)
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
diff --git a/bdb/hash/hash_page.c b/bdb/hash/hash_page.c
index 64f38853284..6788129773f 100644
--- a/bdb/hash/hash_page.c
+++ b/bdb/hash/hash_page.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,23 +43,14 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_page.c,v 11.46 2001/01/11 18:19:51 bostic Exp $";
+static const char revid[] = "$Id: hash_page.c,v 11.87 2002/08/15 02:46:20 bostic Exp $";
#endif /* not lint */
/*
* PACKAGE: hashing
*
* DESCRIPTION:
- * Page manipulation for hashing package.
- *
- * ROUTINES:
- *
- * External
- * __get_page
- * __add_ovflpage
- * __overflow_page
- * Internal
- * open_temp
+ * Page manipulation for hashing package.
*/
#ifndef NO_SYSTEM_INCLUDES
@@ -69,11 +60,13 @@ static const char revid[] = "$Id: hash_page.c,v 11.46 2001/01/11 18:19:51 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "hash.h"
-#include "lock.h"
-#include "txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+
+static int __ham_c_delpg
+ __P((DBC *, db_pgno_t, db_pgno_t, u_int32_t, db_ham_mode, u_int32_t *));
/*
* PUBLIC: int __ham_item __P((DBC *, db_lockmode_t, db_pgno_t *));
@@ -104,15 +97,15 @@ __ham_item(dbc, mode, pgnop)
recheck:
/* Check if we are looking for space in which to insert an item. */
- if (hcp->seek_size && hcp->seek_found_page == PGNO_INVALID
- && hcp->seek_size < P_FREESPACE(hcp->page))
+ if (hcp->seek_size && hcp->seek_found_page == PGNO_INVALID &&
+ hcp->seek_size < P_FREESPACE(dbp, hcp->page))
hcp->seek_found_page = hcp->pgno;
/* Check for off-page duplicates. */
if (hcp->indx < NUM_ENT(hcp->page) &&
- HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
memcpy(pgnop,
- HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)),
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
sizeof(db_pgno_t));
F_SET(hcp, H_OK);
return (0);
@@ -126,7 +119,7 @@ recheck:
* pointer to be the beginning of the datum.
*/
memcpy(&hcp->dup_len,
- HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx)) +
+ HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx)) +
hcp->dup_off, sizeof(db_indx_t));
if (hcp->indx >= (db_indx_t)NUM_ENT(hcp->page)) {
@@ -153,15 +146,18 @@ int
__ham_item_reset(dbc)
DBC *dbc;
{
- HASH_CURSOR *hcp;
DB *dbp;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
int ret;
- ret = 0;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
+
+ ret = 0;
if (hcp->page != NULL)
- ret = memp_fput(dbp->mpf, hcp->page, 0);
+ ret = mpf->put(mpf, hcp->page, 0);
__ham_item_init(dbc);
return (ret);
@@ -181,8 +177,7 @@ __ham_item_init(dbc)
* If this cursor still holds any locks, we must
* release them if we are not running with transactions.
*/
- if (hcp->lock.off != LOCK_INVALID && dbc->txn == NULL)
- (void)lock_put(dbc->dbp->dbenv, &hcp->lock);
+ (void)__TLPUT(dbc, hcp->lock);
/*
* The following fields must *not* be initialized here
@@ -191,7 +186,7 @@ __ham_item_init(dbc)
*/
hcp->bucket = BUCKET_INVALID;
hcp->lbucket = BUCKET_INVALID;
- hcp->lock.off = LOCK_INVALID;
+ LOCK_INIT(hcp->lock);
hcp->lock_mode = DB_LOCK_NG;
hcp->dup_off = 0;
hcp->dup_len = 0;
@@ -269,8 +264,9 @@ __ham_item_prev(dbc, mode, pgnop)
db_pgno_t next_pgno;
int ret;
- dbp = dbc->dbp;
hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
+
/*
* There are 5 cases for backing up in a hash file.
* Case 1: In the middle of a page, no duplicates, just dec the index.
@@ -291,9 +287,10 @@ __ham_item_prev(dbc, mode, pgnop)
* to handle backing up through keys.
*/
if (!F_ISSET(hcp, H_NEXT_NODUP) && F_ISSET(hcp, H_ISDUP)) {
- if (HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx)) == H_OFFDUP) {
+ if (HPAGE_TYPE(dbp, hcp->page, H_DATAINDEX(hcp->indx)) ==
+ H_OFFDUP) {
memcpy(pgnop,
- HOFFDUP_PGNO(H_PAIRDATA(hcp->page, hcp->indx)),
+ HOFFDUP_PGNO(H_PAIRDATA(dbp, hcp->page, hcp->indx)),
sizeof(db_pgno_t));
F_SET(hcp, H_OK);
return (0);
@@ -302,7 +299,7 @@ __ham_item_prev(dbc, mode, pgnop)
/* Duplicates are on-page. */
if (hcp->dup_off != 0) {
memcpy(&hcp->dup_len, HKEYDATA_DATA(
- H_PAIRDATA(hcp->page, hcp->indx))
+ H_PAIRDATA(dbp, hcp->page, hcp->indx))
+ hcp->dup_off - sizeof(db_indx_t),
sizeof(db_indx_t));
hcp->dup_off -=
@@ -396,7 +393,7 @@ __ham_item_next(dbc, mode, pgnop)
if (F_ISSET(hcp, H_DELETED)) {
if (hcp->indx != NDX_INVALID &&
F_ISSET(hcp, H_ISDUP) &&
- HPAGE_TYPE(hcp->page, H_DATAINDEX(hcp->indx))
+ HPAGE_TYPE(dbc->dbp, hcp->page, H_DATAINDEX(hcp->indx))
== H_DUPLICATE && hcp->dup_tlen == hcp->dup_off) {
if (F_ISSET(hcp, H_DUPONLY)) {
F_CLR(hcp, H_OK);
@@ -447,7 +444,7 @@ __ham_item_next(dbc, mode, pgnop)
}
/*
- * PUBLIC: void __ham_putitem __P((PAGE *p, const DBT *, int));
+ * PUBLIC: void __ham_putitem __P((DB *, PAGE *p, const DBT *, int));
*
* This is a little bit sleazy in that we're overloading the meaning
* of the H_OFFPAGE type here. When we recover deletes, we have the
@@ -456,24 +453,27 @@ __ham_item_next(dbc, mode, pgnop)
* an H_KEYDATA around it.
*/
void
-__ham_putitem(p, dbt, type)
+__ham_putitem(dbp, p, dbt, type)
+ DB *dbp;
PAGE *p;
const DBT *dbt;
int type;
{
u_int16_t n, off;
+ db_indx_t *inp;
n = NUM_ENT(p);
+ inp = P_INP(dbp, p);
/* Put the item element on the page. */
if (type == H_OFFPAGE) {
off = HOFFSET(p) - dbt->size;
- HOFFSET(p) = p->inp[n] = off;
- memcpy(P_ENTRY(p, n), dbt->data, dbt->size);
+ HOFFSET(p) = inp[n] = off;
+ memcpy(P_ENTRY(dbp, p, n), dbt->data, dbt->size);
} else {
off = HOFFSET(p) - HKEYDATA_SIZE(dbt->size);
- HOFFSET(p) = p->inp[n] = off;
- PUT_HKEYDATA(P_ENTRY(p, n), dbt->data, dbt->size, type);
+ HOFFSET(p) = inp[n] = off;
+ PUT_HKEYDATA(P_ENTRY(dbp, p, n), dbt->data, dbt->size, type);
}
/* Adjust page info. */
@@ -481,8 +481,8 @@ __ham_putitem(p, dbt, type)
}
/*
- * PUBLIC: void __ham_reputpair
- * PUBLIC: __P((PAGE *p, u_int32_t, u_int32_t, const DBT *, const DBT *));
+ * PUBLIC: void __ham_reputpair __P((DB *, PAGE *,
+ * PUBLIC: u_int32_t, const DBT *, const DBT *));
*
* This is a special case to restore a key/data pair to its original
* location during recovery. We are guaranteed that the pair fits
@@ -490,17 +490,21 @@ __ham_putitem(p, dbt, type)
* the last pair, the normal insert works).
*/
void
-__ham_reputpair(p, psize, ndx, key, data)
+__ham_reputpair(dbp, p, ndx, key, data)
+ DB *dbp;
PAGE *p;
- u_int32_t psize, ndx;
+ u_int32_t ndx;
const DBT *key, *data;
{
- db_indx_t i, movebytes, newbytes;
+ db_indx_t i, *inp, movebytes, newbytes;
+ size_t psize;
u_int8_t *from;
+ psize = dbp->pgsize;
+ inp = P_INP(dbp, p);
/* First shuffle the existing items up on the page. */
- movebytes =
- (ndx == 0 ? psize : p->inp[H_DATAINDEX(ndx - 2)]) - HOFFSET(p);
+ movebytes = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - HOFFSET(p));
newbytes = key->size + data->size;
from = (u_int8_t *)p + HOFFSET(p);
memmove(from - newbytes, from, movebytes);
@@ -511,17 +515,17 @@ __ham_reputpair(p, psize, ndx, key, data)
* we are dealing with index 0 (db_indx_t's are unsigned).
*/
for (i = NUM_ENT(p) - 1; ; i-- ) {
- p->inp[i + 2] = p->inp[i] - newbytes;
+ inp[i + 2] = inp[i] - newbytes;
if (i == H_KEYINDEX(ndx))
break;
}
/* Put the key and data on the page. */
- p->inp[H_KEYINDEX(ndx)] =
- (ndx == 0 ? psize : p->inp[H_DATAINDEX(ndx - 2)]) - key->size;
- p->inp[H_DATAINDEX(ndx)] = p->inp[H_KEYINDEX(ndx)] - data->size;
- memcpy(P_ENTRY(p, H_KEYINDEX(ndx)), key->data, key->size);
- memcpy(P_ENTRY(p, H_DATAINDEX(ndx)), data->data, data->size);
+ inp[H_KEYINDEX(ndx)] = (db_indx_t)(
+ (ndx == 0 ? psize : inp[H_DATAINDEX(ndx - 2)]) - key->size);
+ inp[H_DATAINDEX(ndx)] = inp[H_KEYINDEX(ndx)] - data->size;
+ memcpy(P_ENTRY(dbp, p, H_KEYINDEX(ndx)), key->data, key->size);
+ memcpy(P_ENTRY(dbp, p, H_DATAINDEX(ndx)), data->data, data->size);
/* Adjust page info. */
HOFFSET(p) -= newbytes;
@@ -537,25 +541,25 @@ __ham_del_pair(dbc, reclaim_page)
int reclaim_page;
{
DB *dbp;
- HASH_CURSOR *hcp;
DBT data_dbt, key_dbt;
- DB_ENV *dbenv;
DB_LSN new_lsn, *n_lsn, tmp_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
PAGE *n_pagep, *nn_pagep, *p, *p_pagep;
+ db_ham_mode op;
db_indx_t ndx;
db_pgno_t chg_pgno, pgno, tmp_pgno;
int ret, t_ret;
+ u_int32_t order;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
-
- dbenv = dbp->dbenv;
- ndx = hcp->indx;
-
n_pagep = p_pagep = nn_pagep = NULL;
+ ndx = hcp->indx;
- if (hcp->page == NULL && (ret = memp_fget(dbp->mpf,
- &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
return (ret);
p = hcp->page;
@@ -567,17 +571,17 @@ __ham_del_pair(dbc, reclaim_page)
* entry referring to the big item.
*/
ret = 0;
- if (HPAGE_PTYPE(H_PAIRKEY(p, ndx)) == H_OFFPAGE) {
- memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(p, H_KEYINDEX(ndx))),
+ if (HPAGE_PTYPE(H_PAIRKEY(dbp, p, ndx)) == H_OFFPAGE) {
+ memcpy(&pgno, HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_KEYINDEX(ndx))),
sizeof(db_pgno_t));
ret = __db_doff(dbc, pgno);
}
if (ret == 0)
- switch (HPAGE_PTYPE(H_PAIRDATA(p, ndx))) {
+ switch (HPAGE_PTYPE(H_PAIRDATA(dbp, p, ndx))) {
case H_OFFPAGE:
memcpy(&pgno,
- HOFFPAGE_PGNO(P_ENTRY(p, H_DATAINDEX(ndx))),
+ HOFFPAGE_PGNO(P_ENTRY(dbp, p, H_DATAINDEX(ndx))),
sizeof(db_pgno_t));
ret = __db_doff(dbc, pgno);
break;
@@ -596,21 +600,21 @@ __ham_del_pair(dbc, reclaim_page)
return (ret);
/* Now log the delete off this page. */
- if (DB_LOGGING(dbc)) {
- key_dbt.data = P_ENTRY(p, H_KEYINDEX(ndx));
- key_dbt.size = LEN_HITEM(p, dbp->pgsize, H_KEYINDEX(ndx));
- data_dbt.data = P_ENTRY(p, H_DATAINDEX(ndx));
- data_dbt.size = LEN_HITEM(p, dbp->pgsize, H_DATAINDEX(ndx));
-
- if ((ret = __ham_insdel_log(dbenv,
- dbc->txn, &new_lsn, 0, DELPAIR,
- dbp->log_fileid, PGNO(p), (u_int32_t)ndx,
+ if (DBC_LOGGING(dbc)) {
+ key_dbt.data = P_ENTRY(dbp, p, H_KEYINDEX(ndx));
+ key_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_KEYINDEX(ndx));
+ data_dbt.data = P_ENTRY(dbp, p, H_DATAINDEX(ndx));
+ data_dbt.size = LEN_HITEM(dbp, p, dbp->pgsize, H_DATAINDEX(ndx));
+
+ if ((ret = __ham_insdel_log(dbp,
+ dbc->txn, &new_lsn, 0, DELPAIR, PGNO(p), (u_int32_t)ndx,
&LSN(p), &key_dbt, &data_dbt)) != 0)
return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
- /* Move lsn onto page. */
- LSN(p) = new_lsn;
- }
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn;
/* Do the delete. */
__ham_dpair(dbp, p, ndx);
@@ -636,8 +640,11 @@ __ham_del_pair(dbc, reclaim_page)
* XXX
* Perhaps we can retain incremental numbers and apply them later.
*/
- if (!STD_LOCKING(dbc))
+ if (!STD_LOCKING(dbc)) {
--hcp->hdr->nelem;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
/*
* If we need to reclaim the page, then check if the page is empty.
@@ -650,43 +657,43 @@ __ham_del_pair(dbc, reclaim_page)
if (!reclaim_page ||
NUM_ENT(p) != 0 ||
(PREV_PGNO(p) == PGNO_INVALID && NEXT_PGNO(p) == PGNO_INVALID))
- return (memp_fset(dbp->mpf, p, DB_MPOOL_DIRTY));
+ return (mpf->set(mpf, p, DB_MPOOL_DIRTY));
if (PREV_PGNO(p) == PGNO_INVALID) {
/*
* First page in chain is empty and we know that there
* are more pages in the chain.
*/
- if ((ret =
- memp_fget(dbp->mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ if ((ret = mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
return (ret);
- if (NEXT_PGNO(n_pagep) != PGNO_INVALID &&
- (ret = memp_fget(dbp->mpf, &NEXT_PGNO(n_pagep), 0,
- &nn_pagep)) != 0)
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID && (ret =
+ mpf->get(mpf, &NEXT_PGNO(n_pagep), 0, &nn_pagep)) != 0)
goto err;
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
key_dbt.data = n_pagep;
key_dbt.size = dbp->pgsize;
- if ((ret = __ham_copypage_log(dbenv,
- dbc->txn, &new_lsn, 0, dbp->log_fileid, PGNO(p),
+ if ((ret = __ham_copypage_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(p),
&LSN(p), PGNO(n_pagep), &LSN(n_pagep),
NEXT_PGNO(n_pagep),
nn_pagep == NULL ? NULL : &LSN(nn_pagep),
&key_dbt)) != 0)
goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p) = new_lsn; /* Structure assignment. */
+ LSN(n_pagep) = new_lsn;
+ if (NEXT_PGNO(n_pagep) != PGNO_INVALID)
+ LSN(nn_pagep) = new_lsn;
- /* Move lsn onto page. */
- LSN(p) = new_lsn; /* Structure assignment. */
- LSN(n_pagep) = new_lsn;
- if (NEXT_PGNO(n_pagep) != PGNO_INVALID)
- LSN(nn_pagep) = new_lsn;
- }
if (nn_pagep != NULL) {
PREV_PGNO(nn_pagep) = PGNO(p);
- if ((ret = memp_fput(dbp->mpf,
- nn_pagep, DB_MPOOL_DIRTY)) != 0) {
+ if ((ret =
+ mpf->put(mpf, nn_pagep, DB_MPOOL_DIRTY)) != 0) {
nn_pagep = NULL;
goto err;
}
@@ -703,26 +710,30 @@ __ham_del_pair(dbc, reclaim_page)
* Update cursors to reflect the fact that records
* on the second page have moved to the first page.
*/
- if ((ret = __ham_c_chgpg(dbc,
- PGNO(n_pagep), NDX_INVALID, PGNO(p), NDX_INVALID)) != 0)
- return (ret);
+ if ((ret = __ham_c_delpg(dbc, PGNO(n_pagep),
+ PGNO(p), 0, DB_HAM_DELFIRSTPG, &order)) != 0)
+ goto err;
/*
* Update the cursor to reflect its new position.
*/
hcp->indx = 0;
hcp->pgno = PGNO(p);
- if ((ret = memp_fset(dbp->mpf, p, DB_MPOOL_DIRTY)) != 0 ||
- (ret = __db_free(dbc, n_pagep)) != 0)
- return (ret);
+ hcp->order += order;
+
+ if ((ret = mpf->set(mpf, p, DB_MPOOL_DIRTY)) != 0)
+ goto err;
+ if ((ret = __db_free(dbc, n_pagep)) != 0) {
+ n_pagep = NULL;
+ goto err;
+ }
} else {
- if ((ret =
- memp_fget(dbp->mpf, &PREV_PGNO(p), 0, &p_pagep)) != 0)
+ if ((ret = mpf->get(mpf, &PREV_PGNO(p), 0, &p_pagep)) != 0)
goto err;
if (NEXT_PGNO(p) != PGNO_INVALID) {
- if ((ret = memp_fget(dbp->mpf,
- &NEXT_PGNO(p), 0, &n_pagep)) != 0)
+ if ((ret =
+ mpf->get(mpf, &NEXT_PGNO(p), 0, &n_pagep)) != 0)
goto err;
n_lsn = &LSN(n_pagep);
} else {
@@ -734,32 +745,40 @@ __ham_del_pair(dbc, reclaim_page)
if (n_pagep != NULL)
PREV_PGNO(n_pagep) = PGNO(p_pagep);
- if (DB_LOGGING(dbc)) {
- if ((ret = __ham_newpage_log(dbenv,
- dbc->txn, &new_lsn, 0, DELOVFL,
- dbp->log_fileid, PREV_PGNO(p), &LSN(p_pagep),
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn,
+ &new_lsn, 0, DELOVFL, PREV_PGNO(p), &LSN(p_pagep),
PGNO(p), &LSN(p), NEXT_PGNO(p), n_lsn)) != 0)
goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ /* Move lsn onto page. */
+ LSN(p_pagep) = new_lsn; /* Structure assignment. */
+ if (n_pagep)
+ LSN(n_pagep) = new_lsn;
+ LSN(p) = new_lsn;
- /* Move lsn onto page. */
- LSN(p_pagep) = new_lsn; /* Structure assignment. */
- if (n_pagep)
- LSN(n_pagep) = new_lsn;
- LSN(p) = new_lsn;
- }
if (NEXT_PGNO(p) == PGNO_INVALID) {
/*
* There is no next page; put the cursor on the
* previous page as if we'd deleted the last item
- * on that page; index greater than number of
- * valid entries and H_DELETED set.
+ * on that page, with index after the last valid
+ * entry.
+ *
+ * The deleted flag was set up above.
*/
hcp->pgno = PGNO(p_pagep);
hcp->indx = NUM_ENT(p_pagep);
- F_SET(hcp, H_DELETED);
+ op = DB_HAM_DELLASTPG;
} else {
+ /*
+ * There is a next page, so put the cursor at
+ * the beginning of it.
+ */
hcp->pgno = NEXT_PGNO(p);
hcp->indx = 0;
+ op = DB_HAM_DELMIDPG;
}
/*
@@ -770,26 +789,28 @@ __ham_del_pair(dbc, reclaim_page)
hcp->page = NULL;
chg_pgno = PGNO(p);
ret = __db_free(dbc, p);
- if ((t_ret = memp_fput(dbp->mpf, p_pagep, DB_MPOOL_DIRTY)) != 0
- && ret == 0)
+ if ((t_ret =
+ mpf->put(mpf, p_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
- if (n_pagep != NULL && (t_ret = memp_fput(dbp->mpf,
- n_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
+ if (n_pagep != NULL && (t_ret =
+ mpf->put(mpf, n_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
if (ret != 0)
return (ret);
- ret = __ham_c_chgpg(dbc,
- chg_pgno, 0, hcp->pgno, hcp->indx);
+ if ((ret = __ham_c_delpg(dbc,
+ chg_pgno, hcp->pgno, hcp->indx, op, &order)) != 0)
+ return (ret);
+ hcp->order += order;
}
return (ret);
err: /* Clean up any pages. */
if (n_pagep != NULL)
- (void)memp_fput(dbp->mpf, n_pagep, 0);
+ (void)mpf->put(mpf, n_pagep, 0);
if (nn_pagep != NULL)
- (void)memp_fput(dbp->mpf, nn_pagep, 0);
+ (void)mpf->put(mpf, nn_pagep, 0);
if (p_pagep != NULL)
- (void)memp_fput(dbp->mpf, p_pagep, 0);
+ (void)mpf->put(mpf, p_pagep, 0);
return (ret);
}
@@ -807,12 +828,13 @@ __ham_replpair(dbc, dbt, make_dup)
u_int32_t make_dup;
{
DB *dbp;
- HASH_CURSOR *hcp;
DBT old_dbt, tdata, tmp;
+ DB_ENV *dbenv;
DB_LSN new_lsn;
+ HASH_CURSOR *hcp;
int32_t change; /* XXX: Possible overflow. */
- u_int32_t dup, len, memsize;
- int is_big, ret, type;
+ u_int32_t dup_flag, len, memsize;
+ int beyond_eor, is_big, ret, type;
u_int8_t *beg, *dest, *end, *hk, *src;
void *memp;
@@ -828,6 +850,7 @@ __ham_replpair(dbc, dbt, make_dup)
* add.
*/
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
hcp = (HASH_CURSOR *)dbc->internal;
/*
@@ -841,19 +864,21 @@ __ham_replpair(dbc, dbt, make_dup)
*/
change = dbt->size - dbt->dlen;
- hk = H_PAIRDATA(hcp->page, hcp->indx);
+ hk = H_PAIRDATA(dbp, hcp->page, hcp->indx);
is_big = HPAGE_PTYPE(hk) == H_OFFPAGE;
if (is_big)
memcpy(&len, HOFFPAGE_TLEN(hk), sizeof(u_int32_t));
else
- len = LEN_HKEYDATA(hcp->page,
+ len = LEN_HKEYDATA(dbp, hcp->page,
dbp->pgsize, H_DATAINDEX(hcp->indx));
- if (dbt->doff + dbt->dlen > len)
+ beyond_eor = dbt->doff + dbt->dlen > len;
+ if (beyond_eor)
change += dbt->doff + dbt->dlen - len;
- if (change > (int32_t)P_FREESPACE(hcp->page) || is_big) {
+ if (change > (int32_t)P_FREESPACE(dbp, hcp->page) ||
+ beyond_eor || is_big) {
/*
* Case 3 -- two subcases.
* A. This is not really a partial operation, but an overwrite.
@@ -868,16 +893,16 @@ __ham_replpair(dbc, dbt, make_dup)
memset(&tmp, 0, sizeof(tmp));
if ((ret =
__db_ret(dbp, hcp->page, H_KEYINDEX(hcp->indx),
- &tmp, &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
+ &tmp, &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
return (ret);
/* Preserve duplicate info. */
- dup = F_ISSET(hcp, H_ISDUP);
+ dup_flag = F_ISSET(hcp, H_ISDUP);
if (dbt->doff == 0 && dbt->dlen == len) {
ret = __ham_del_pair(dbc, 0);
if (ret == 0)
ret = __ham_add_el(dbc,
- &tmp, dbt, dup ? H_DUPLICATE : H_KEYDATA);
+ &tmp, dbt, dup_flag ? H_DUPLICATE : H_KEYDATA);
} else { /* Case B */
type = HPAGE_PTYPE(hk) != H_OFFPAGE ?
HPAGE_PTYPE(hk) : H_KEYDATA;
@@ -891,15 +916,14 @@ __ham_replpair(dbc, dbt, make_dup)
/* Now we can delete the item. */
if ((ret = __ham_del_pair(dbc, 0)) != 0) {
- __os_free(memp, memsize);
+ __os_free(dbenv, memp);
goto err;
}
/* Now shift old data around to make room for new. */
if (change > 0) {
- if ((ret = __os_realloc(dbp->dbenv,
- tdata.size + change,
- NULL, &tdata.data)) != 0)
+ if ((ret = __os_realloc(dbenv,
+ tdata.size + change, &tdata.data)) != 0)
return (ret);
memp = tdata.data;
memsize = tdata.size + change;
@@ -920,9 +944,9 @@ __ham_replpair(dbc, dbt, make_dup)
/* Now add the pair. */
ret = __ham_add_el(dbc, &tmp, &tdata, type);
- __os_free(memp, memsize);
+ __os_free(dbenv, memp);
}
- F_SET(hcp, dup);
+ F_SET(hcp, dup_flag);
err: return (ret);
}
@@ -930,7 +954,7 @@ err: return (ret);
* Set up pointer into existing data. Do it before the log
* message so we can use it inside of the log setup.
*/
- beg = HKEYDATA_DATA(H_PAIRDATA(hcp->page, hcp->indx));
+ beg = HKEYDATA_DATA(H_PAIRDATA(dbp, hcp->page, hcp->indx));
beg += dbt->doff;
/*
@@ -938,20 +962,22 @@ err: return (ret);
* all the parameters here. Then log the call before moving
* anything around.
*/
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
old_dbt.data = beg;
old_dbt.size = dbt->dlen;
- if ((ret = __ham_replace_log(dbp->dbenv,
- dbc->txn, &new_lsn, 0, dbp->log_fileid, PGNO(hcp->page),
+ if ((ret = __ham_replace_log(dbp,
+ dbc->txn, &new_lsn, 0, PGNO(hcp->page),
(u_int32_t)H_DATAINDEX(hcp->indx), &LSN(hcp->page),
(u_int32_t)dbt->doff, &old_dbt, dbt, make_dup)) != 0)
return (ret);
- LSN(hcp->page) = new_lsn; /* Structure assignment. */
- }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
- __ham_onpage_replace(hcp->page, dbp->pgsize,
- (u_int32_t)H_DATAINDEX(hcp->indx), (int32_t)dbt->doff, change, dbt);
+ __ham_onpage_replace(dbp, hcp->page, (u_int32_t)H_DATAINDEX(hcp->indx),
+ (int32_t)dbt->doff, change, dbt);
return (0);
}
@@ -967,34 +993,41 @@ err: return (ret);
* off: Offset at which we are beginning the replacement.
* change: the number of bytes (+ or -) that the element is growing/shrinking.
* dbt: the new data that gets written at beg.
- * PUBLIC: void __ham_onpage_replace __P((PAGE *, size_t, u_int32_t, int32_t,
- * PUBLIC: int32_t, DBT *));
+ *
+ * PUBLIC: void __ham_onpage_replace __P((DB *, PAGE *, u_int32_t,
+ * PUBLIC: int32_t, int32_t, DBT *));
*/
void
-__ham_onpage_replace(pagep, pgsize, ndx, off, change, dbt)
+__ham_onpage_replace(dbp, pagep, ndx, off, change, dbt)
+ DB *dbp;
PAGE *pagep;
- size_t pgsize;
u_int32_t ndx;
int32_t off;
int32_t change;
DBT *dbt;
{
- db_indx_t i;
+ db_indx_t i, *inp;
int32_t len;
+ size_t pgsize;
u_int8_t *src, *dest;
int zero_me;
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, pagep);
if (change != 0) {
zero_me = 0;
src = (u_int8_t *)(pagep) + HOFFSET(pagep);
if (off < 0)
- len = pagep->inp[ndx] - HOFFSET(pagep);
- else if ((u_int32_t)off >= LEN_HKEYDATA(pagep, pgsize, ndx)) {
- len = HKEYDATA_DATA(P_ENTRY(pagep, ndx)) +
- LEN_HKEYDATA(pagep, pgsize, ndx) - src;
+ len = inp[ndx] - HOFFSET(pagep);
+ else if ((u_int32_t)off >=
+ LEN_HKEYDATA(dbp, pagep, pgsize, ndx)) {
+ len = (int32_t)(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx))
+ + LEN_HKEYDATA(dbp, pagep, pgsize, ndx) - src);
zero_me = 1;
} else
- len = (HKEYDATA_DATA(P_ENTRY(pagep, ndx)) + off) - src;
+ len = (int32_t)(
+ (HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off) -
+ src);
dest = src - change;
memmove(dest, src, len);
if (zero_me)
@@ -1002,14 +1035,14 @@ __ham_onpage_replace(pagep, pgsize, ndx, off, change, dbt)
/* Now update the indices. */
for (i = ndx; i < NUM_ENT(pagep); i++)
- pagep->inp[i] -= change;
+ inp[i] -= change;
HOFFSET(pagep) -= change;
}
if (off >= 0)
- memcpy(HKEYDATA_DATA(P_ENTRY(pagep, ndx)) + off,
+ memcpy(HKEYDATA_DATA(P_ENTRY(dbp, pagep, ndx)) + off,
dbt->data, dbt->size);
else
- memcpy(P_ENTRY(pagep, ndx), dbt->data, dbt->size);
+ memcpy(P_ENTRY(dbp, pagep, ndx), dbt->data, dbt->size);
}
/*
@@ -1022,10 +1055,12 @@ __ham_split_page(dbc, obucket, nbucket)
{
DB *dbp;
DBC **carray;
- HASH_CURSOR *hcp, *cp;
DBT key, page_dbt;
DB_ENV *dbenv;
+ DB_LOCK block;
DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp, *cp;
PAGE **pp, *old_pagep, *temp_pagep, *new_pagep;
db_indx_t n;
db_pgno_t bucket_pgno, npgno, next_pgno;
@@ -1034,22 +1069,24 @@ __ham_split_page(dbc, obucket, nbucket)
void *big_buf;
dbp = dbc->dbp;
- hcp = (HASH_CURSOR *)dbc->internal;
dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
temp_pagep = old_pagep = new_pagep = NULL;
-
- if ((ret = __ham_get_clist(dbp, obucket, NDX_INVALID, &carray)) != 0)
- return (ret);
+ carray = NULL;
+ LOCK_INIT(block);
bucket_pgno = BUCKET_TO_PAGE(hcp, obucket);
- if ((ret = memp_fget(dbp->mpf,
+ if ((ret = __db_lget(dbc,
+ 0, bucket_pgno, DB_LOCK_WRITE, 0, &block)) != 0)
+ goto err;
+ if ((ret = mpf->get(mpf,
&bucket_pgno, DB_MPOOL_CREATE, &old_pagep)) != 0)
goto err;
/* Properly initialize the new bucket page. */
npgno = BUCKET_TO_PAGE(hcp, nbucket);
- if ((ret = memp_fget(dbp->mpf,
- &npgno, DB_MPOOL_CREATE, &new_pagep)) != 0)
+ if ((ret = mpf->get(mpf, &npgno, DB_MPOOL_CREATE, &new_pagep)) != 0)
goto err;
P_INIT(new_pagep,
dbp->pgsize, npgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
@@ -1057,33 +1094,35 @@ __ham_split_page(dbc, obucket, nbucket)
temp_pagep = hcp->split_buf;
memcpy(temp_pagep, old_pagep, dbp->pgsize);
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
page_dbt.size = dbp->pgsize;
page_dbt.data = old_pagep;
- if ((ret = __ham_splitdata_log(dbenv,
- dbc->txn, &new_lsn, 0, dbp->log_fileid, SPLITOLD,
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0, SPLITOLD,
PGNO(old_pagep), &page_dbt, &LSN(old_pagep))) != 0)
goto err;
- }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+
+ LSN(old_pagep) = new_lsn; /* Structure assignment. */
P_INIT(old_pagep, dbp->pgsize, PGNO(old_pagep), PGNO_INVALID,
PGNO_INVALID, 0, P_HASH);
- if (DB_LOGGING(dbc))
- LSN(old_pagep) = new_lsn; /* Structure assignment. */
-
big_len = 0;
big_buf = NULL;
key.flags = 0;
while (temp_pagep != NULL) {
+ if ((ret = __ham_get_clist(dbp,
+ PGNO(temp_pagep), NDX_INVALID, &carray)) != 0)
+ goto err;
+
for (n = 0; n < (db_indx_t)NUM_ENT(temp_pagep); n += 2) {
- if ((ret =
- __db_ret(dbp, temp_pagep, H_KEYINDEX(n),
- &key, &big_buf, &big_len)) != 0)
+ if ((ret = __db_ret(dbp, temp_pagep,
+ H_KEYINDEX(n), &key, &big_buf, &big_len)) != 0)
goto err;
- if (__ham_call_hash(dbc, key.data, key.size)
- == obucket)
+ if (__ham_call_hash(dbc, key.data, key.size) == obucket)
pp = &old_pagep;
else
pp = &new_pagep;
@@ -1092,25 +1131,24 @@ __ham_split_page(dbc, obucket, nbucket)
* Figure out how many bytes we need on the new
* page to store the key/data pair.
*/
-
- len = LEN_HITEM(temp_pagep, dbp->pgsize,
+ len = LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
H_DATAINDEX(n)) +
- LEN_HITEM(temp_pagep, dbp->pgsize,
+ LEN_HITEM(dbp, temp_pagep, dbp->pgsize,
H_KEYINDEX(n)) +
2 * sizeof(db_indx_t);
- if (P_FREESPACE(*pp) < len) {
- if (DB_LOGGING(dbc)) {
+ if (P_FREESPACE(dbp, *pp) < len) {
+ if (DBC_LOGGING(dbc)) {
page_dbt.size = dbp->pgsize;
page_dbt.data = *pp;
- if ((ret = __ham_splitdata_log(
- dbenv, dbc->txn,
- &new_lsn, 0, dbp->log_fileid,
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
SPLITNEW, PGNO(*pp), &page_dbt,
&LSN(*pp))) != 0)
goto err;
- LSN(*pp) = new_lsn;
- }
+ } else
+ LSN_NOT_LOGGED(new_lsn);
+ LSN(*pp) = new_lsn;
if ((ret =
__ham_add_ovflpage(dbc, *pp, 1, pp)) != 0)
goto err;
@@ -1122,28 +1160,25 @@ __ham_split_page(dbc, obucket, nbucket)
for (i = 0; carray[i] != NULL; i++) {
cp =
(HASH_CURSOR *)carray[i]->internal;
- if (cp->pgno == PGNO(temp_pagep)
- && cp->indx == n) {
+ if (cp->pgno == PGNO(temp_pagep) &&
+ cp->indx == n) {
cp->pgno = PGNO(*pp);
cp->indx = NUM_ENT(*pp);
found = 1;
}
}
- if (found && DB_LOGGING(dbc)
- && IS_SUBTRANSACTION(dbc->txn)) {
+ if (found && DBC_LOGGING(dbc) &&
+ IS_SUBTRANSACTION(dbc->txn)) {
if ((ret =
- __ham_chgpg_log(dbp->dbenv,
+ __ham_chgpg_log(dbp,
dbc->txn, &new_lsn, 0,
- dbp->log_fileid,
DB_HAM_SPLIT, PGNO(temp_pagep),
PGNO(*pp), n, NUM_ENT(*pp))) != 0)
goto err;
}
}
- __ham_copy_item(dbp->pgsize,
- temp_pagep, H_KEYINDEX(n), *pp);
- __ham_copy_item(dbp->pgsize,
- temp_pagep, H_DATAINDEX(n), *pp);
+ __ham_copy_item(dbp, temp_pagep, H_KEYINDEX(n), *pp);
+ __ham_copy_item(dbp, temp_pagep, H_DATAINDEX(n), *pp);
}
next_pgno = NEXT_PGNO(temp_pagep);
@@ -1156,23 +1191,30 @@ __ham_split_page(dbc, obucket, nbucket)
if (next_pgno == PGNO_INVALID)
temp_pagep = NULL;
- else if ((ret = memp_fget(dbp->mpf,
- &next_pgno, DB_MPOOL_CREATE, &temp_pagep)) != 0)
+ else if ((ret = mpf->get(
+ mpf, &next_pgno, DB_MPOOL_CREATE, &temp_pagep)) != 0)
goto err;
- if (temp_pagep != NULL && DB_LOGGING(dbc)) {
- page_dbt.size = dbp->pgsize;
- page_dbt.data = temp_pagep;
- if ((ret = __ham_splitdata_log(dbenv,
- dbc->txn, &new_lsn, 0, dbp->log_fileid,
- SPLITOLD, PGNO(temp_pagep),
- &page_dbt, &LSN(temp_pagep))) != 0)
- goto err;
+ if (temp_pagep != NULL) {
+ if (DBC_LOGGING(dbc)) {
+ page_dbt.size = dbp->pgsize;
+ page_dbt.data = temp_pagep;
+ if ((ret = __ham_splitdata_log(dbp,
+ dbc->txn, &new_lsn, 0,
+ SPLITOLD, PGNO(temp_pagep),
+ &page_dbt, &LSN(temp_pagep))) != 0)
+ goto err;
+ } else
+ LSN_NOT_LOGGED(new_lsn);
LSN(temp_pagep) = new_lsn;
}
+
+ if (carray != NULL) /* We never knew its size. */
+ __os_free(dbenv, carray);
+ carray = NULL;
}
if (big_buf != NULL)
- __os_free(big_buf, big_len);
+ __os_free(dbenv, big_buf);
/*
* If the original bucket spanned multiple pages, then we've got
@@ -1188,37 +1230,43 @@ __ham_split_page(dbc, obucket, nbucket)
/*
* Write new buckets out.
*/
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
page_dbt.size = dbp->pgsize;
page_dbt.data = old_pagep;
- if ((ret = __ham_splitdata_log(dbenv, dbc->txn, &new_lsn, 0,
- dbp->log_fileid, SPLITNEW, PGNO(old_pagep), &page_dbt,
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn,
+ &new_lsn, 0, SPLITNEW, PGNO(old_pagep), &page_dbt,
&LSN(old_pagep))) != 0)
goto err;
LSN(old_pagep) = new_lsn;
page_dbt.data = new_pagep;
- if ((ret = __ham_splitdata_log(dbenv, dbc->txn, &new_lsn, 0,
- dbp->log_fileid, SPLITNEW, PGNO(new_pagep), &page_dbt,
+ if ((ret = __ham_splitdata_log(dbp, dbc->txn, &new_lsn, 0,
+ SPLITNEW, PGNO(new_pagep), &page_dbt,
&LSN(new_pagep))) != 0)
goto err;
LSN(new_pagep) = new_lsn;
+ } else {
+ LSN_NOT_LOGGED(LSN(old_pagep));
+ LSN_NOT_LOGGED(LSN(new_pagep));
}
- ret = memp_fput(dbp->mpf, old_pagep, DB_MPOOL_DIRTY);
- if ((t_ret = memp_fput(dbp->mpf, new_pagep, DB_MPOOL_DIRTY)) != 0
- && ret == 0)
+
+ ret = mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
+ if ((t_ret =
+ mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
if (0) {
err: if (old_pagep != NULL)
- (void)memp_fput(dbp->mpf, old_pagep, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, old_pagep, DB_MPOOL_DIRTY);
if (new_pagep != NULL)
- (void)memp_fput(dbp->mpf, new_pagep, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, new_pagep, DB_MPOOL_DIRTY);
if (temp_pagep != NULL && PGNO(temp_pagep) != bucket_pgno)
- (void)memp_fput(dbp->mpf, temp_pagep, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, temp_pagep, DB_MPOOL_DIRTY);
}
+ if (LOCK_ISSET(block))
+ __TLPUT(dbc, block);
if (carray != NULL) /* We never knew its size. */
- __os_free(carray, 0);
+ __os_free(dbenv, carray);
return (ret);
}
@@ -1237,11 +1285,12 @@ __ham_add_el(dbc, key, val, type)
const DBT *key, *val;
int type;
{
- DB *dbp;
- HASH_CURSOR *hcp;
const DBT *pkey, *pdata;
+ DB *dbp;
DBT key_dbt, data_dbt;
DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
+ HASH_CURSOR *hcp;
HOFFPAGE doff, koff;
db_pgno_t next_pgno, pgno;
u_int32_t data_size, key_size, pairsize, rectype;
@@ -1249,13 +1298,14 @@ __ham_add_el(dbc, key, val, type)
int key_type, data_type;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
do_expand = 0;
- pgno = hcp->seek_found_page != PGNO_INVALID ? hcp->seek_found_page :
- hcp->pgno;
- if (hcp->page == NULL && (ret = memp_fget(dbp->mpf, &pgno,
- DB_MPOOL_CREATE, &hcp->page)) != 0)
+ pgno = hcp->seek_found_page != PGNO_INVALID ?
+ hcp->seek_found_page : hcp->pgno;
+ if (hcp->page == NULL &&
+ (ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
return (ret);
key_size = HKEYDATA_PSIZE(key->size);
@@ -1276,21 +1326,20 @@ __ham_add_el(dbc, key, val, type)
* anyway. Check if it's a bigpair that fits or a regular
* pair that fits.
*/
- if (P_FREESPACE(hcp->page) >= pairsize)
+ if (P_FREESPACE(dbp, hcp->page) >= pairsize)
break;
next_pgno = NEXT_PGNO(hcp->page);
- if ((ret =
- __ham_next_cpage(dbc, next_pgno, 0)) != 0)
+ if ((ret = __ham_next_cpage(dbc, next_pgno, 0)) != 0)
return (ret);
}
/*
* Check if we need to allocate a new page.
*/
- if (P_FREESPACE(hcp->page) < pairsize) {
+ if (P_FREESPACE(dbp, hcp->page) < pairsize) {
do_expand = 1;
if ((ret = __ham_add_ovflpage(dbc,
- (PAGE *)hcp->page, 1, (PAGE **)&hcp->page)) != 0)
+ (PAGE *)hcp->page, 1, (PAGE **)&hcp->page)) != 0)
return (ret);
hcp->pgno = PGNO(hcp->page);
}
@@ -1334,7 +1383,7 @@ __ham_add_el(dbc, key, val, type)
data_type = type;
}
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
rectype = PUTPAIR;
if (is_databig)
rectype |= PAIR_DATAMASK;
@@ -1343,18 +1392,18 @@ __ham_add_el(dbc, key, val, type)
if (type == H_DUPLICATE)
rectype |= PAIR_DUPMASK;
- if ((ret = __ham_insdel_log(dbp->dbenv, dbc->txn, &new_lsn, 0,
- rectype, dbp->log_fileid, PGNO(hcp->page),
- (u_int32_t)NUM_ENT(hcp->page), &LSN(hcp->page), pkey,
- pdata)) != 0)
+ if ((ret = __ham_insdel_log(dbp, dbc->txn, &new_lsn, 0,
+ rectype, PGNO(hcp->page), (u_int32_t)NUM_ENT(hcp->page),
+ &LSN(hcp->page), pkey, pdata)) != 0)
return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
- /* Move lsn onto page. */
- LSN(hcp->page) = new_lsn; /* Structure assignment. */
- }
+ /* Move lsn onto page. */
+ LSN(hcp->page) = new_lsn; /* Structure assignment. */
- __ham_putitem(hcp->page, pkey, key_type);
- __ham_putitem(hcp->page, pdata, data_type);
+ __ham_putitem(dbp, hcp->page, pkey, key_type);
+ __ham_putitem(dbp, hcp->page, pdata, data_type);
/*
* For splits, we are going to update item_info's page number
@@ -1369,8 +1418,11 @@ __ham_add_el(dbc, key, val, type)
* XXX
* Maybe keep incremental numbers here.
*/
- if (!STD_LOCKING(dbc))
+ if (!STD_LOCKING(dbc)) {
hcp->hdr->nelem++;
+ if ((ret = __ham_dirty_meta(dbc)) != 0)
+ return (ret);
+ }
if (do_expand || (hcp->hdr->ffactor != 0 &&
(u_int32_t)H_NUMPAIRS(hcp->page) > hcp->hdr->ffactor))
@@ -1384,28 +1436,32 @@ __ham_add_el(dbc, key, val, type)
* H_DUPLICATE, H_OFFDUP). Since we log splits at a high level, we
* do not need to do any logging here.
*
- * PUBLIC: void __ham_copy_item __P((size_t, PAGE *, u_int32_t, PAGE *));
+ * PUBLIC: void __ham_copy_item __P((DB *, PAGE *, u_int32_t, PAGE *));
*/
void
-__ham_copy_item(pgsize, src_page, src_ndx, dest_page)
- size_t pgsize;
+__ham_copy_item(dbp, src_page, src_ndx, dest_page)
+ DB *dbp;
PAGE *src_page;
u_int32_t src_ndx;
PAGE *dest_page;
{
u_int32_t len;
+ size_t pgsize;
void *src, *dest;
+ db_indx_t *inp;
+ pgsize = dbp->pgsize;
+ inp = P_INP(dbp, dest_page);
/*
* Copy the key and data entries onto this new page.
*/
- src = P_ENTRY(src_page, src_ndx);
+ src = P_ENTRY(dbp, src_page, src_ndx);
/* Set up space on dest. */
- len = LEN_HITEM(src_page, pgsize, src_ndx);
+ len = (u_int32_t)LEN_HITEM(dbp, src_page, pgsize, src_ndx);
HOFFSET(dest_page) -= len;
- dest_page->inp[NUM_ENT(dest_page)] = HOFFSET(dest_page);
- dest = P_ENTRY(dest_page, NUM_ENT(dest_page));
+ inp[NUM_ENT(dest_page)] = HOFFSET(dest_page);
+ dest = P_ENTRY(dbp, dest_page, NUM_ENT(dest_page));
NUM_ENT(dest_page)++;
memcpy(dest, src, len);
@@ -1414,8 +1470,8 @@ __ham_copy_item(pgsize, src_page, src_ndx, dest_page)
/*
*
* Returns:
- * pointer on success
- * NULL on error
+ * pointer on success
+ * NULL on error
*
* PUBLIC: int __ham_add_ovflpage __P((DBC *, PAGE *, int, PAGE **));
*/
@@ -1427,31 +1483,33 @@ __ham_add_ovflpage(dbc, pagep, release, pp)
PAGE **pp;
{
DB *dbp;
- HASH_CURSOR *hcp;
DB_LSN new_lsn;
+ DB_MPOOLFILE *mpf;
PAGE *new_pagep;
int ret;
dbp = dbc->dbp;
- hcp = (HASH_CURSOR *)dbc->internal;
+ mpf = dbp->mpf;
if ((ret = __db_new(dbc, P_HASH, &new_pagep)) != 0)
return (ret);
- if (DB_LOGGING(dbc)) {
- if ((ret = __ham_newpage_log(dbp->dbenv, dbc->txn, &new_lsn, 0,
- PUTOVFL, dbp->log_fileid, PGNO(pagep), &LSN(pagep),
+ if (DBC_LOGGING(dbc)) {
+ if ((ret = __ham_newpage_log(dbp, dbc->txn, &new_lsn, 0,
+ PUTOVFL, PGNO(pagep), &LSN(pagep),
PGNO(new_pagep), &LSN(new_pagep), PGNO_INVALID, NULL)) != 0)
return (ret);
+ } else
+ LSN_NOT_LOGGED(new_lsn);
- /* Move lsn onto page. */
- LSN(pagep) = LSN(new_pagep) = new_lsn;
- }
+ /* Move lsn onto page. */
+ LSN(pagep) = LSN(new_pagep) = new_lsn;
NEXT_PGNO(pagep) = PGNO(new_pagep);
+
PREV_PGNO(new_pagep) = PGNO(pagep);
if (release)
- ret = memp_fput(dbp->mpf, pagep, DB_MPOOL_DIRTY);
+ ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY);
*pp = new_pagep;
return (ret);
@@ -1467,10 +1525,12 @@ __ham_get_cpage(dbc, mode)
{
DB *dbp;
DB_LOCK tmp_lock;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
ret = 0;
@@ -1485,25 +1545,22 @@ __ham_get_cpage(dbc, mode)
* 4. If there is a lock, but it's for a different bucket, then we need
* to release the existing lock and get a new lock.
*/
- tmp_lock.off = LOCK_INVALID;
+ LOCK_INIT(tmp_lock);
if (STD_LOCKING(dbc)) {
- if (hcp->lock.off != LOCK_INVALID &&
- hcp->lbucket != hcp->bucket) { /* Case 4 */
- if (dbc->txn == NULL &&
- (ret = lock_put(dbp->dbenv, &hcp->lock)) != 0)
- return (ret);
- hcp->lock.off = LOCK_INVALID;
- }
- if ((hcp->lock.off != LOCK_INVALID &&
+ if (hcp->lbucket != hcp->bucket && /* Case 4 */
+ (ret = __TLPUT(dbc, hcp->lock)) != 0)
+ return (ret);
+
+ if ((LOCK_ISSET(hcp->lock) &&
(hcp->lock_mode == DB_LOCK_READ &&
mode == DB_LOCK_WRITE))) {
/* Case 3. */
tmp_lock = hcp->lock;
- hcp->lock.off = LOCK_INVALID;
+ LOCK_INIT(hcp->lock);
}
/* Acquire the lock. */
- if (hcp->lock.off == LOCK_INVALID)
+ if (!LOCK_ISSET(hcp->lock))
/* Cases 1, 3, and 4. */
if ((ret = __ham_lock_bucket(dbc, mode)) != 0)
return (ret);
@@ -1511,17 +1568,18 @@ __ham_get_cpage(dbc, mode)
if (ret == 0) {
hcp->lock_mode = mode;
hcp->lbucket = hcp->bucket;
- if (tmp_lock.off != LOCK_INVALID)
+ if (LOCK_ISSET(tmp_lock))
/* Case 3: release the original lock. */
- ret = lock_put(dbp->dbenv, &tmp_lock);
- } else if (tmp_lock.off != LOCK_INVALID)
+ ret =
+ dbp->dbenv->lock_put(dbp->dbenv, &tmp_lock);
+ } else if (LOCK_ISSET(tmp_lock))
hcp->lock = tmp_lock;
}
if (ret == 0 && hcp->page == NULL) {
if (hcp->pgno == PGNO_INVALID)
hcp->pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
- if ((ret = memp_fget(dbp->mpf,
+ if ((ret = mpf->get(mpf,
&hcp->pgno, DB_MPOOL_CREATE, &hcp->page)) != 0)
return (ret);
}
@@ -1543,18 +1601,21 @@ __ham_next_cpage(dbc, pgno, dirty)
int dirty;
{
DB *dbp;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
PAGE *p;
int ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
hcp = (HASH_CURSOR *)dbc->internal;
- if (hcp->page != NULL && (ret = memp_fput(dbp->mpf,
- hcp->page, dirty ? DB_MPOOL_DIRTY : 0)) != 0)
+ if (hcp->page != NULL &&
+ (ret = mpf->put(mpf, hcp->page, dirty ? DB_MPOOL_DIRTY : 0)) != 0)
return (ret);
+ hcp->page = NULL;
- if ((ret = memp_fget(dbp->mpf, &pgno, DB_MPOOL_CREATE, &p)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &p)) != 0)
return (ret);
hcp->page = p;
@@ -1576,7 +1637,7 @@ __ham_lock_bucket(dbc, mode)
db_lockmode_t mode;
{
HASH_CURSOR *hcp;
- u_int32_t flags;
+ db_pgno_t pgno;
int gotmeta, ret;
hcp = (HASH_CURSOR *)dbc->internal;
@@ -1584,17 +1645,12 @@ __ham_lock_bucket(dbc, mode)
if (gotmeta)
if ((ret = __ham_get_meta(dbc)) != 0)
return (ret);
- dbc->lock.pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
+ pgno = BUCKET_TO_PAGE(hcp, hcp->bucket);
if (gotmeta)
if ((ret = __ham_release_meta(dbc)) != 0)
return (ret);
- flags = 0;
- if (DB_NONBLOCK(dbc))
- LF_SET(DB_LOCK_NOWAIT);
-
- ret = lock_get(dbc->dbp->dbenv,
- dbc->locker, flags, &dbc->lock_dbt, mode, &hcp->lock);
+ ret = __db_lget(dbc, 0, pgno, mode, 0, &hcp->lock);
hcp->lock_mode = mode;
return (ret);
@@ -1606,6 +1662,9 @@ __ham_lock_bucket(dbc, mode)
* represents. The caller is responsible for freeing up duplicates
* or offpage entries that might be referenced by this pair.
*
+ * Recovery assumes that this may be called without the metadata
+ * page pinned.
+ *
* PUBLIC: void __ham_dpair __P((DB *, PAGE *, u_int32_t));
*/
void
@@ -1614,15 +1673,16 @@ __ham_dpair(dbp, p, indx)
PAGE *p;
u_int32_t indx;
{
- db_indx_t delta, n;
+ db_indx_t delta, n, *inp;
u_int8_t *dest, *src;
+ inp = P_INP(dbp, p);
/*
* Compute "delta", the amount we have to shift all of the
* offsets. To find the delta, we just need to calculate
* the size of the pair of elements we are removing.
*/
- delta = H_PAIRSIZE(p, dbp->pgsize, indx);
+ delta = H_PAIRSIZE(dbp, p, dbp->pgsize, indx);
/*
* The hard case: we want to remove something other than
@@ -1641,7 +1701,7 @@ __ham_dpair(dbp, p, indx)
* be an overlapping copy, so we have to use memmove.
*/
dest = src + delta;
- memmove(dest, src, p->inp[H_DATAINDEX(indx)] - HOFFSET(p));
+ memmove(dest, src, inp[H_DATAINDEX(indx)] - HOFFSET(p));
}
/* Adjust page metadata. */
@@ -1650,6 +1710,153 @@ __ham_dpair(dbp, p, indx)
/* Adjust the offsets. */
for (n = (db_indx_t)indx; n < (db_indx_t)(NUM_ENT(p)); n++)
- p->inp[n] = p->inp[n + 2] + delta;
+ inp[n] = inp[n + 2] + delta;
+
+}
+
+/*
+ * __ham_c_delpg --
+ *
+ * Adjust the cursors after we've emptied a page in a bucket, taking
+ * care that when we move cursors pointing to deleted items, their
+ * orders don't collide with the orders of cursors on the page we move
+ * them to (since after this function is called, cursors with the same
+ * index on the two pages will be otherwise indistinguishable--they'll
+ * all have pgno new_pgno). There are three cases:
+ *
+ * 1) The emptied page is the first page in the bucket. In this
+ * case, we've copied all the items from the second page into the
+ * first page, so the first page is new_pgno and the second page is
+ * old_pgno. new_pgno is empty, but can have deleted cursors
+ * pointing at indx 0, so we need to be careful of the orders
+ * there. This is DB_HAM_DELFIRSTPG.
+ *
+ * 2) The page is somewhere in the middle of a bucket. Our caller
+ * can just delete such a page, so it's old_pgno. old_pgno is
+ * empty, but may have deleted cursors pointing at indx 0, so we
+ * need to be careful of indx 0 when we move those cursors to
+ * new_pgno. This is DB_HAM_DELMIDPG.
+ *
+ * 3) The page is the last in a bucket. Again the empty page is
+ * old_pgno, and again it should only have cursors that are deleted
+ * and at indx == 0. This time, though, there's no next page to
+ * move them to, so we set them to indx == num_ent on the previous
+ * page--and indx == num_ent is the index whose cursors we need to
+ * be careful of. This is DB_HAM_DELLASTPG.
+ */
+static int
+__ham_c_delpg(dbc, old_pgno, new_pgno, num_ent, op, orderp)
+ DBC *dbc;
+ db_pgno_t old_pgno, new_pgno;
+ u_int32_t num_ent;
+ db_ham_mode op;
+ u_int32_t *orderp;
+{
+ DB *dbp, *ldbp;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DB_TXN *my_txn;
+ DBC *cp;
+ HASH_CURSOR *hcp;
+ int found, ret;
+ db_indx_t indx;
+ u_int32_t order;
+
+ /* Which is the worrisome index? */
+ indx = (op == DB_HAM_DELLASTPG) ? num_ent : 0;
+ dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+
+ my_txn = IS_SUBTRANSACTION(dbc->txn) ? dbc->txn : NULL;
+ found = 0;
+
+ MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
+ /*
+ * Find the highest order of any cursor our movement
+ * may collide with.
+ */
+ order = 1;
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+ hcp = (HASH_CURSOR *)cp->internal;
+ if (hcp->pgno == new_pgno) {
+ if (hcp->indx == indx &&
+ F_ISSET(hcp, H_DELETED) &&
+ hcp->order >= order)
+ order = hcp->order + 1;
+ DB_ASSERT(op != DB_HAM_DELFIRSTPG ||
+ hcp->indx == NDX_INVALID ||
+ (hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED)));
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+
+ for (ldbp = __dblist_get(dbenv, dbp->adj_fileid);
+ ldbp != NULL && ldbp->adj_fileid == dbp->adj_fileid;
+ ldbp = LIST_NEXT(ldbp, dblistlinks)) {
+ MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
+ for (cp = TAILQ_FIRST(&ldbp->active_queue); cp != NULL;
+ cp = TAILQ_NEXT(cp, links)) {
+ if (cp == dbc || cp->dbtype != DB_HASH)
+ continue;
+
+ hcp = (HASH_CURSOR *)cp->internal;
+
+ if (hcp->pgno == old_pgno) {
+ switch (op) {
+ case DB_HAM_DELFIRSTPG:
+ /*
+ * We're moving all items,
+ * regardless of index.
+ */
+ hcp->pgno = new_pgno;
+
+ /*
+ * But we have to be careful of
+ * the order values.
+ */
+ if (hcp->indx == indx)
+ hcp->order += order;
+ break;
+ case DB_HAM_DELMIDPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->order += order;
+ break;
+ case DB_HAM_DELLASTPG:
+ hcp->pgno = new_pgno;
+ DB_ASSERT(hcp->indx == 0 &&
+ F_ISSET(hcp, H_DELETED));
+ hcp->indx = indx;
+ hcp->order += order;
+ break;
+ default:
+ DB_ASSERT(0);
+ return (__db_panic(dbenv, EINVAL));
+ }
+ if (my_txn != NULL && cp->txn != my_txn)
+ found = 1;
+ }
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
+
+ if (found != 0 && DBC_LOGGING(dbc)) {
+ if ((ret = __ham_chgpg_log(dbp, my_txn, &lsn, 0, op,
+ old_pgno, new_pgno, indx, order)) != 0)
+ return (ret);
+ }
+ *orderp = order;
+ return (0);
}
diff --git a/bdb/hash/hash_rec.c b/bdb/hash/hash_rec.c
index ded58c281e9..24d3473c508 100644
--- a/bdb/hash/hash_rec.c
+++ b/bdb/hash/hash_rec.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_rec.c,v 11.34 2001/01/11 18:19:52 bostic Exp $";
+static const char revid[] = "$Id: hash_rec.c,v 11.69 2002/09/03 14:12:49 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -53,15 +53,12 @@ static const char revid[] = "$Id: hash_rec.c,v 11.34 2001/01/11 18:19:52 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "lock.h"
-#include "log.h"
-#include "mp.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
-static int __ham_alloc_pages __P((DB *, __ham_groupalloc_args *));
+static int __ham_alloc_pages __P((DB *, __ham_groupalloc_args *, DB_LSN *));
/*
* __ham_insdel_recover --
@@ -82,16 +79,16 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- u_int32_t opcode;
- int cmp_n, cmp_p, flags, getmeta, ret, type;
+ u_int32_t flags, opcode;
+ int cmp_n, cmp_p, ret, type;
+ pagep = NULL;
COMPQUIET(info, NULL);
- getmeta = 0;
REC_PRINT(__ham_insdel_print);
REC_INTRO(__ham_insdel_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -100,15 +97,11 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
* don't bother creating a page.
*/
goto done;
- } else if ((ret = memp_fget(mpf, &argp->pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
- getmeta = 1;
-
cmp_n = log_compare(lsnp, &LSN(pagep));
cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
@@ -135,7 +128,7 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
*/
if (opcode != DELPAIR ||
argp->ndx == (u_int32_t)NUM_ENT(pagep)) {
- __ham_putitem(pagep, &argp->key,
+ __ham_putitem(file_dbp, pagep, &argp->key,
DB_UNDO(op) || PAIR_ISKEYBIG(argp->opcode) ?
H_OFFPAGE : H_KEYDATA);
@@ -145,31 +138,32 @@ __ham_insdel_recover(dbenv, dbtp, lsnp, op, info)
type = H_OFFPAGE;
else
type = H_KEYDATA;
- __ham_putitem(pagep, &argp->data, type);
+ __ham_putitem(file_dbp, pagep, &argp->data, type);
} else
- (void)__ham_reputpair(pagep, file_dbp->pgsize,
+ (void)__ham_reputpair(file_dbp, pagep,
argp->ndx, &argp->key, &argp->data);
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
flags = DB_MPOOL_DIRTY;
- } else if ((opcode == DELPAIR && cmp_p == 0 && DB_REDO(op))
- || (opcode == PUTPAIR && cmp_n == 0 && DB_UNDO(op))) {
+ } else if ((opcode == DELPAIR && cmp_p == 0 && DB_REDO(op)) ||
+ (opcode == PUTPAIR && cmp_n == 0 && DB_UNDO(op))) {
/* Need to undo a put or redo a delete. */
__ham_dpair(file_dbp, pagep, argp->ndx);
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
/* Return the previous LSN. */
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (getmeta)
- (void)__ham_release_meta(dbc);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
REC_CLOSE;
}
@@ -194,15 +188,16 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- int cmp_n, cmp_p, flags, getmeta, ret;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
- getmeta = 0;
REC_PRINT(__ham_newpage_print);
REC_INTRO(__ham_newpage_read, 1);
- if ((ret = memp_fget(mpf, &argp->new_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->new_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -212,15 +207,11 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
*/
ret = 0;
goto ppage;
- } else if ((ret = memp_fget(mpf, &argp->new_pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->new_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
- getmeta = 1;
-
/*
* There are potentially three pages we need to check: the one
* that we created/deleted, the one before it and the one after
@@ -250,12 +241,13 @@ __ham_newpage_recover(dbenv, dbtp, lsnp, op, info)
if (flags)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->pagelsn;
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
/* Now do the prev page. */
ppage: if (argp->prev_pgno != PGNO_INVALID) {
- if ((ret = memp_fget(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->prev_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist.
@@ -265,9 +257,8 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) {
*/
ret = 0;
goto npage;
- } else if ((ret =
- memp_fget(mpf, &argp->prev_pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->prev_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -281,7 +272,8 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) {
/* Redo a create new page or undo a delete new page. */
pagep->next_pgno = argp->new_pgno;
flags = DB_MPOOL_DIRTY;
- } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
(cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
/* Redo a delete or undo a create new page. */
pagep->next_pgno = argp->next_pgno;
@@ -291,13 +283,14 @@ ppage: if (argp->prev_pgno != PGNO_INVALID) {
if (flags)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->prevlsn;
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
}
/* Now time to do the next page */
npage: if (argp->next_pgno != PGNO_INVALID) {
- if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist.
@@ -306,9 +299,8 @@ npage: if (argp->next_pgno != PGNO_INVALID) {
* this case, don't bother creating a page.
*/
goto done;
- } else if ((ret =
- memp_fget(mpf, &argp->next_pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -322,7 +314,8 @@ npage: if (argp->next_pgno != PGNO_INVALID) {
/* Redo a create new page or undo a delete new page. */
pagep->prev_pgno = argp->new_pgno;
flags = DB_MPOOL_DIRTY;
- } else if ((cmp_p == 0 && DB_REDO(op) && argp->opcode == DELOVFL) ||
+ } else if ((cmp_p == 0 &&
+ DB_REDO(op) && argp->opcode == DELOVFL) ||
(cmp_n == 0 && DB_UNDO(op) && argp->opcode == PUTOVFL)) {
/* Redo a delete or undo a create new page. */
pagep->prev_pgno = argp->prev_pgno;
@@ -332,14 +325,15 @@ npage: if (argp->next_pgno != PGNO_INVALID) {
if (flags)
LSN(pagep) = DB_REDO(op) ? *lsnp : argp->nextlsn;
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
}
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (getmeta)
- (void)__ham_release_meta(dbc);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
REC_CLOSE;
}
@@ -366,17 +360,18 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info)
DB_MPOOLFILE *mpf;
DBT dbt;
PAGE *pagep;
+ u_int32_t flags;
int32_t grow;
- int cmp_n, cmp_p, flags, getmeta, ret;
+ int cmp_n, cmp_p, ret;
u_int8_t *hk;
+ pagep = NULL;
COMPQUIET(info, NULL);
- getmeta = 0;
REC_PRINT(__ham_replace_print);
REC_INTRO(__ham_replace_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -385,15 +380,11 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info)
* don't bother creating a page.
*/
goto done;
- } else if ((ret = memp_fget(mpf, &argp->pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
- getmeta = 1;
-
cmp_n = log_compare(lsnp, &LSN(pagep));
cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
@@ -419,10 +410,10 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info)
}
if (flags) {
- __ham_onpage_replace(pagep,
- file_dbp->pgsize, argp->ndx, argp->off, grow, &dbt);
+ __ham_onpage_replace(file_dbp, pagep,
+ argp->ndx, argp->off, grow, &dbt);
if (argp->makedup) {
- hk = P_ENTRY(pagep, argp->ndx);
+ hk = P_ENTRY(file_dbp, pagep, argp->ndx);
if (DB_REDO(op))
HPAGE_PTYPE(hk) = H_DUPLICATE;
else
@@ -430,14 +421,15 @@ __ham_replace_recover(dbenv, dbtp, lsnp, op, info)
}
}
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (getmeta)
- (void)__ham_release_meta(dbc);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
REC_CLOSE;
}
@@ -460,15 +452,16 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- int cmp_n, cmp_p, flags, getmeta, ret;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
- getmeta = 0;
REC_PRINT(__ham_splitdata_print);
REC_INTRO(__ham_splitdata_read, 1);
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -477,15 +470,11 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
* don't bother creating a page.
*/
goto done;
- } else if ((ret = memp_fget(mpf, &argp->pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
- getmeta = 1;
-
cmp_n = log_compare(lsnp, &LSN(pagep));
cmp_p = log_compare(&LSN(pagep), &argp->pagelsn);
CHECK_LSN(op, cmp_p, &LSN(pagep), &argp->pagelsn);
@@ -519,14 +508,15 @@ __ham_splitdata_recover(dbenv, dbtp, lsnp, op, info)
LSN(pagep) = argp->pagelsn;
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(file_dbp->mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (getmeta)
- (void)__ham_release_meta(dbc);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
REC_CLOSE;
}
@@ -550,21 +540,19 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- int cmp_n, cmp_p, flags, getmeta, ret;
+ u_int32_t flags;
+ int cmp_n, cmp_p, ret;
+ pagep = NULL;
COMPQUIET(info, NULL);
- getmeta = 0;
REC_PRINT(__ham_copypage_print);
REC_INTRO(__ham_copypage_read, 1);
- if ((ret = __ham_get_meta(dbc)) != 0)
- goto out;
- getmeta = 1;
flags = 0;
/* This is the bucket page. */
- if ((ret = memp_fget(mpf, &argp->pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -574,8 +562,8 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
*/
ret = 0;
goto donext;
- } else if ((ret = memp_fget(mpf, &argp->pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -597,11 +585,12 @@ __ham_copypage_recover(dbenv, dbtp, lsnp, op, info)
LSN(pagep) = argp->pagelsn;
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
donext: /* Now fix up the "next" page. */
- if ((ret = memp_fget(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->next_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -611,8 +600,8 @@ donext: /* Now fix up the "next" page. */
*/
ret = 0;
goto do_nn;
- } else if ((ret = memp_fget(mpf, &argp->next_pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->next_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -629,14 +618,15 @@ donext: /* Now fix up the "next" page. */
memcpy(pagep, argp->page.data, argp->page.size);
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
/* Now fix up the next's next page. */
do_nn: if (argp->nnext_pgno == PGNO_INVALID)
goto done;
- if ((ret = memp_fget(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) {
+ if ((ret = mpf->get(mpf, &argp->nnext_pgno, 0, &pagep)) != 0) {
if (DB_UNDO(op)) {
/*
* We are undoing and the page doesn't exist. That
@@ -645,8 +635,8 @@ do_nn: if (argp->nnext_pgno == PGNO_INVALID)
* don't bother creating a page.
*/
goto done;
- } else if ((ret = memp_fget(mpf, &argp->nnext_pgno,
- DB_MPOOL_CREATE, &pagep)) != 0)
+ } else if ((ret = mpf->get(mpf,
+ &argp->nnext_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
}
@@ -666,14 +656,15 @@ do_nn: if (argp->nnext_pgno == PGNO_INVALID)
LSN(pagep) = argp->nnextlsn;
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
+ pagep = NULL;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: if (getmeta)
- (void)__ham_release_meta(dbc);
+out: if (pagep != NULL)
+ (void)mpf->put(mpf, pagep, 0);
REC_CLOSE;
}
@@ -695,13 +686,17 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
__ham_metagroup_args *argp;
HASH_CURSOR *hcp;
DB *file_dbp;
+ DBMETA *mmeta;
DBC *dbc;
DB_MPOOLFILE *mpf;
PAGE *pagep;
- db_pgno_t last_pgno;
- int cmp_n, cmp_p, flags, groupgrow, ret;
+ db_pgno_t pgno;
+ u_int32_t flags, mmeta_flags;
+ int cmp_n, cmp_p, did_recover, groupgrow, ret;
COMPQUIET(info, NULL);
+ mmeta_flags = 0;
+ mmeta = NULL;
REC_PRINT(__ham_metagroup_print);
REC_INTRO(__ham_metagroup_read, 1);
@@ -709,22 +704,24 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
* This logs the virtual create of pages pgno to pgno + bucket
* Since the mpool page-allocation is not really able to be
* transaction protected, we can never undo it. Even in an abort,
- * we have to allocate these pages to the hash table.
+ * we have to allocate these pages to the hash table if they
+ * were actually created. In particular, during disaster
+ * recovery the metapage may be before this point if we
+ * are rolling backward. If the file has not been extended
+ * then the metapage could not have been updated.
* The log record contains:
* bucket: new bucket being allocated.
* pgno: page number of the new bucket.
* if bucket is a power of 2, then we allocated a whole batch of
* pages; if it's not, then we simply allocated one new page.
*/
- groupgrow =
- (u_int32_t)(1 << __db_log2(argp->bucket + 1)) == argp->bucket + 1;
+ groupgrow = (u_int32_t)(1 << __db_log2(argp->bucket + 1)) ==
+ argp->bucket + 1;
+ pgno = argp->pgno;
+ if (argp->newalloc)
+ pgno += argp->bucket;
- last_pgno = argp->pgno;
- if (groupgrow)
- /* Read the last page. */
- last_pgno += argp->bucket;
-
- if ((ret = memp_fget(mpf, &last_pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
cmp_n = log_compare(lsnp, &LSN(pagep));
@@ -743,7 +740,7 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
pagep->lsn = argp->pagelsn;
flags = DB_MPOOL_DIRTY;
}
- if ((ret = memp_fput(mpf, pagep, flags)) != 0)
+ if ((ret = mpf->put(mpf, pagep, flags)) != 0)
goto out;
/* Now we have to update the meta-data page. */
@@ -753,39 +750,90 @@ __ham_metagroup_recover(dbenv, dbtp, lsnp, op, info)
cmp_n = log_compare(lsnp, &hcp->hdr->dbmeta.lsn);
cmp_p = log_compare(&hcp->hdr->dbmeta.lsn, &argp->metalsn);
CHECK_LSN(op, cmp_p, &hcp->hdr->dbmeta.lsn, &argp->metalsn);
- if ((cmp_p == 0 && DB_REDO(op)) || (cmp_n == 0 && DB_UNDO(op))) {
- if (DB_REDO(op)) {
- /* Redo the actual updating of bucket counts. */
- ++hcp->hdr->max_bucket;
- if (groupgrow) {
- hcp->hdr->low_mask = hcp->hdr->high_mask;
- hcp->hdr->high_mask =
- (argp->bucket + 1) | hcp->hdr->low_mask;
- }
- hcp->hdr->dbmeta.lsn = *lsnp;
- } else {
- /* Undo the actual updating of bucket counts. */
- --hcp->hdr->max_bucket;
- if (groupgrow) {
- hcp->hdr->high_mask = hcp->hdr->low_mask;
- hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
- }
- hcp->hdr->dbmeta.lsn = argp->metalsn;
+ did_recover = 0;
+ if (cmp_p == 0 && DB_REDO(op)) {
+ /* Redo the actual updating of bucket counts. */
+ ++hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->low_mask = hcp->hdr->high_mask;
+ hcp->hdr->high_mask =
+ (argp->bucket + 1) | hcp->hdr->low_mask;
}
- if (groupgrow &&
- hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] ==
- PGNO_INVALID)
- hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] =
- argp->pgno - argp->bucket - 1;
- F_SET(hcp, H_DIRTY);
+ hcp->hdr->dbmeta.lsn = *lsnp;
+ did_recover = 1;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ /* Undo the actual updating of bucket counts. */
+ --hcp->hdr->max_bucket;
+ if (groupgrow) {
+ hcp->hdr->high_mask = hcp->hdr->low_mask;
+ hcp->hdr->low_mask = hcp->hdr->high_mask >> 1;
+ }
+ hcp->hdr->dbmeta.lsn = argp->metalsn;
+ did_recover = 1;
+ }
+
+ /*
+ * Now we need to fix up the spares array. Each entry in the
+ * spares array indicates the beginning page number for the
+ * indicated doubling. We need to fill this in whenever the
+ * spares array is invalid, since we never reclaim pages from
+ * the spares array and we have to allocate the pages to the
+ * spares array in both the redo and undo cases.
+ */
+ if (argp->newalloc &&
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] == PGNO_INVALID) {
+ hcp->hdr->spares[__db_log2(argp->bucket + 1) + 1] =
+ argp->pgno - argp->bucket - 1;
+ did_recover = 1;
+ }
+
+ /*
+ * Finally, we need to potentially fix up the last_pgno field
+ * in the master meta-data page (which may or may not be the
+ * same as the hash header page).
+ */
+ if (argp->mmpgno != argp->mpgno) {
+ if ((ret =
+ mpf->get(mpf, &argp->mmpgno, 0, (PAGE **)&mmeta)) != 0)
+ goto out;
+ mmeta_flags = 0;
+ cmp_n = log_compare(lsnp, &mmeta->lsn);
+ cmp_p = log_compare(&mmeta->lsn, &argp->mmetalsn);
+ if (cmp_p == 0 && DB_REDO(op)) {
+ mmeta->lsn = *lsnp;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ } else if (cmp_n == 0 && DB_UNDO(op)) {
+ mmeta->lsn = argp->mmetalsn;
+ mmeta_flags = DB_MPOOL_DIRTY;
+ }
+ } else
+ mmeta = (DBMETA *)hcp->hdr;
+
+ if (argp->newalloc) {
+ if (mmeta->last_pgno < pgno)
+ mmeta->last_pgno = pgno;
+ mmeta_flags = DB_MPOOL_DIRTY;
}
- if ((ret = __ham_release_meta(dbc)) != 0)
+
+ if (argp->mmpgno != argp->mpgno &&
+ (ret = mpf->put(mpf, mmeta, mmeta_flags)) != 0)
goto out;
+ mmeta = NULL;
+
+ if (did_recover)
+ F_SET(hcp, H_DIRTY);
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, 0);
+ if (dbc != NULL)
+ (void)__ham_release_meta(dbc);
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+
+ REC_CLOSE;
}
/*
@@ -808,17 +856,20 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
DB_MPOOLFILE *mpf;
DB *file_dbp;
DBC *dbc;
+ PAGE *pagep;
db_pgno_t pgno;
- int cmp_n, cmp_p, flags, ret;
+ int cmp_n, cmp_p, modified, ret;
+ mmeta = NULL;
+ modified = 0;
REC_PRINT(__ham_groupalloc_print);
REC_INTRO(__ham_groupalloc_read, 0);
pgno = PGNO_BASE_MD;
- if ((ret = memp_fget(mpf, &pgno, 0, &mmeta)) != 0) {
+ if ((ret = mpf->get(mpf, &pgno, 0, &mmeta)) != 0) {
if (DB_REDO(op)) {
/* Page should have existed. */
- (void)__db_pgerr(file_dbp, pgno);
+ __db_pgerr(file_dbp, pgno, ret);
goto out;
} else {
ret = 0;
@@ -839,37 +890,48 @@ __ham_groupalloc_recover(dbenv, dbtp, lsnp, op, info)
* that the pages were never allocated, so we'd better check for
* that and handle it here.
*/
-
- flags = 0;
if (DB_REDO(op)) {
- if ((ret = __ham_alloc_pages(file_dbp, argp)) != 0)
- goto out1;
+ if ((ret = __ham_alloc_pages(file_dbp, argp, lsnp)) != 0)
+ goto out;
if (cmp_p == 0) {
LSN(mmeta) = *lsnp;
- flags = DB_MPOOL_DIRTY;
+ modified = 1;
}
- }
+ } else if (DB_UNDO(op)) {
+ /*
+ * Reset the last page back to its preallocation state.
+ */
+ pgno = argp->start_pgno + argp->num - 1;
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
- /*
- * Always put the pages into the limbo list and free them later.
- */
- else if (DB_UNDO(op)) {
+ if (log_compare(&pagep->lsn, lsnp) == 0)
+ ZERO_LSN(pagep->lsn);
+
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ goto out;
+ } else if (ret != DB_PAGE_NOTFOUND)
+ goto out;
+ /*
+ * Always put the pages into the limbo list and free them later.
+ */
if ((ret = __db_add_limbo(dbenv,
info, argp->fileid, argp->start_pgno, argp->num)) != 0)
goto out;
if (cmp_n == 0) {
LSN(mmeta) = argp->meta_lsn;
- flags = DB_MPOOL_DIRTY;
+ modified = 1;
}
}
-out1: if ((ret = memp_fput(mpf, mmeta, flags)) != 0)
- goto out;
-
done: if (ret == 0)
*lsnp = argp->prev_lsn;
-out: REC_CLOSE;
+out: if (mmeta != NULL)
+ (void)mpf->put(mpf, mmeta, modified ? DB_MPOOL_DIRTY : 0);
+
+ if (ret == ENOENT && op == DB_TXN_BACKWARD_ALLOC)
+ ret = 0;
+ REC_CLOSE;
}
/*
@@ -883,9 +945,10 @@ out: REC_CLOSE;
* Hash normally has holes in its files and handles them appropriately.
*/
static int
-__ham_alloc_pages(dbp, argp)
+__ham_alloc_pages(dbp, argp, lsnp)
DB *dbp;
__ham_groupalloc_args *argp;
+ DB_LSN *lsnp;
{
DB_MPOOLFILE *mpf;
PAGE *pagep;
@@ -898,38 +961,26 @@ __ham_alloc_pages(dbp, argp)
pgno = argp->start_pgno + argp->num - 1;
/* If the page exists, and it has been initialized, then we're done. */
- if ((ret = memp_fget(mpf, &pgno, 0, &pagep)) == 0) {
- if ((pagep->type == P_INVALID) && IS_ZERO_LSN(pagep->lsn))
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) == 0) {
+ if (NUM_ENT(pagep) == 0 && IS_ZERO_LSN(pagep->lsn))
goto reinit_page;
- if ((ret = memp_fput(mpf, pagep, 0)) != 0)
+ if ((ret = mpf->put(mpf, pagep, 0)) != 0)
return (ret);
return (0);
}
- /*
- * Had to create the page. On some systems (read "Windows"),
- * you can find random garbage on pages to which you haven't
- * yet written. So, we have an os layer that will do the
- * right thing for group allocations. We call that directly
- * to make sure all the pages are allocated and then continue
- * merrily on our way with normal recovery.
- */
- if ((ret = __os_fpinit(dbp->dbenv, &mpf->fh,
- argp->start_pgno, argp->num, dbp->pgsize)) != 0)
- return (ret);
-
- if ((ret = memp_fget(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
- (void)__db_pgerr(dbp, pgno);
+ /* Had to create the page. */
+ if ((ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &pagep)) != 0) {
+ __db_pgerr(dbp, pgno, ret);
return (ret);
}
reinit_page:
/* Initialize the newly allocated page. */
- P_INIT(pagep,
- dbp->pgsize, pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
- ZERO_LSN(pagep->lsn);
+ P_INIT(pagep, dbp->pgsize, pgno, PGNO_INVALID, PGNO_INVALID, 0, P_HASH);
+ pagep->lsn = *lsnp;
- if ((ret = memp_fput(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
+ if ((ret = mpf->put(mpf, pagep, DB_MPOOL_DIRTY)) != 0)
return (ret);
return (0);
@@ -942,7 +993,6 @@ reinit_page:
* PUBLIC: int __ham_curadj_recover
* PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
*/
-
int
__ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
DB_ENV *dbenv;
@@ -958,14 +1008,13 @@ __ham_curadj_recover(dbenv, dbtp, lsnp, op, info)
int ret;
HASH_CURSOR *hcp;
- REC_PRINT(__ham_groupalloc_print);
+ COMPQUIET(info, NULL);
+ REC_PRINT(__ham_curadj_print);
+ REC_INTRO(__ham_curadj_read, 0);
- ret = 0;
if (op != DB_TXN_ABORT)
goto done;
- REC_INTRO(__ham_curadj_read, 0);
- COMPQUIET(info, NULL);
/*
* Undo the adjustment by reinitializing the the cursor
* to look like the one that was used to do the adustment,
@@ -991,7 +1040,6 @@ out: REC_CLOSE;
* PUBLIC: int __ham_chgpg_recover
* PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
*/
-
int
__ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
DB_ENV *dbenv;
@@ -1008,15 +1056,18 @@ __ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
int ret;
DBC *cp;
HASH_CURSOR *lcp;
+ u_int32_t order, indx;
+ COMPQUIET(info, NULL);
REC_PRINT(__ham_chgpg_print);
+ REC_INTRO(__ham_chgpg_read, 0);
- ret = 0;
if (op != DB_TXN_ABORT)
- goto out;
- REC_INTRO(__ham_chgpg_read, 0);
+ goto done;
- COMPQUIET(info, NULL);
+ /* Overloaded fields for DB_HAM_DEL*PG */
+ indx = argp->old_indx;
+ order = argp->new_indx;
MUTEX_THREAD_LOCK(dbenv, dbenv->dblist_mutexp);
for (ldbp = __dblist_get(dbenv, file_dbp->adj_fileid);
@@ -1029,50 +1080,77 @@ __ham_chgpg_recover(dbenv, dbtp, lsnp, op, info)
lcp = (HASH_CURSOR *)cp->internal;
switch (argp->mode) {
- case DB_HAM_CHGPG:
+ case DB_HAM_DELFIRSTPG:
if (lcp->pgno != argp->new_pgno)
break;
-
- if (argp->old_indx == NDX_INVALID)
+ if (lcp->indx != indx ||
+ !F_ISSET(lcp, H_DELETED) ||
+ lcp->order >= order) {
lcp->pgno = argp->old_pgno;
- else if (lcp->indx == argp->new_indx) {
- lcp->indx = argp->old_indx;
+ if (lcp->indx == indx)
+ lcp->order -= order;
+ }
+ break;
+ case DB_HAM_DELMIDPG:
+ case DB_HAM_DELLASTPG:
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == indx &&
+ F_ISSET(lcp, H_DELETED) &&
+ lcp->order >= order) {
lcp->pgno = argp->old_pgno;
+ lcp->order -= order;
+ lcp->indx = 0;
}
break;
-
+ case DB_HAM_CHGPG:
+ /*
+ * If we're doing a CHGPG, we're undoing
+ * the move of a non-deleted item to a
+ * new page. Any cursors with the deleted
+ * flag set do not belong to this item;
+ * don't touch them.
+ */
+ if (F_ISSET(lcp, H_DELETED))
+ break;
+ /* FALLTHROUGH */
case DB_HAM_SPLIT:
- if (lcp->pgno == argp->new_pgno
- && lcp->indx == argp->new_indx) {
+ if (lcp->pgno == argp->new_pgno &&
+ lcp->indx == argp->new_indx) {
lcp->indx = argp->old_indx;
lcp->pgno = argp->old_pgno;
}
break;
-
case DB_HAM_DUP:
- if (lcp->opd != NULL) {
- opdcp =
- (BTREE_CURSOR *)lcp->opd->internal;
- if (opdcp->pgno == argp->new_pgno &&
- opdcp->indx == argp->new_indx) {
- if (F_ISSET(opdcp, C_DELETED))
- F_SET(lcp, H_DELETED);
- if ((ret =
- lcp->opd->c_close(
- lcp->opd)) != 0)
- goto out;
- lcp->opd = NULL;
- }
- }
+ if (lcp->opd == NULL)
+ break;
+ opdcp = (BTREE_CURSOR *)lcp->opd->internal;
+ if (opdcp->pgno != argp->new_pgno ||
+ opdcp->indx != argp->new_indx)
+ break;
+
+ if (F_ISSET(opdcp, C_DELETED))
+ F_SET(lcp, H_DELETED);
+ /*
+ * We can't close a cursor while we have the
+ * dbp mutex locked, since c_close reacquires
+ * it. It should be safe to drop the mutex
+ * here, though, since newly opened cursors
+ * are put only at the end of the tailq and
+ * the cursor we're adjusting can't be closed
+ * under us.
+ */
+ MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
+ if ((ret = lcp->opd->c_close(lcp->opd)) != 0)
+ goto out;
+ MUTEX_THREAD_LOCK(dbenv, file_dbp->mutexp);
+ lcp->opd = NULL;
break;
}
}
-
MUTEX_THREAD_UNLOCK(dbenv, file_dbp->mutexp);
}
MUTEX_THREAD_UNLOCK(dbenv, dbenv->dblist_mutexp);
done: *lsnp = argp->prev_lsn;
- ret = 0;
out: REC_CLOSE;
}
diff --git a/bdb/hash/hash_reclaim.c b/bdb/hash/hash_reclaim.c
index 8857c5406a4..ac90ffff08a 100644
--- a/bdb/hash/hash_reclaim.c
+++ b/bdb/hash/hash_reclaim.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_reclaim.c,v 11.4 2000/11/30 00:58:37 ubell Exp $";
+static const char revid[] = "$Id: hash_reclaim.c,v 11.12 2002/03/28 19:49:43 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,10 +18,8 @@ static const char revid[] = "$Id: hash_reclaim.c,v 11.4 2000/11/30 00:58:37 ubel
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "hash.h"
-#include "lock.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
/*
* __ham_reclaim --
@@ -52,8 +50,8 @@ __ham_reclaim(dbp, txn)
if ((ret = __ham_get_meta(dbc)) != 0)
goto err;
- if ((ret = __ham_traverse(dbp,
- dbc, DB_LOCK_WRITE, __db_reclaim_callback, dbc)) != 0)
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_reclaim_callback, dbc, 1)) != 0)
goto err;
if ((ret = dbc->c_close(dbc)) != 0)
goto err;
@@ -66,3 +64,48 @@ err: if (hcp->hdr != NULL)
(void)dbc->c_close(dbc);
return (ret);
}
+
+/*
+ * __ham_truncate --
+ * Reclaim the pages from a subdatabase and return them to the
+ * parent free list.
+ *
+ * PUBLIC: int __ham_truncate __P((DB *, DB_TXN *txn, u_int32_t *));
+ */
+int
+__ham_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ HASH_CURSOR *hcp;
+ db_trunc_param trunc;
+ int ret;
+
+ /* Open up a cursor that we'll use for traversing. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+ hcp = (HASH_CURSOR *)dbc->internal;
+
+ if ((ret = __ham_get_meta(dbc)) != 0)
+ goto err;
+
+ trunc.count = 0;
+ trunc.dbc = dbc;
+
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_WRITE, __db_truncate_callback, &trunc, 1)) != 0)
+ goto err;
+ if ((ret = __ham_release_meta(dbc)) != 0)
+ goto err;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ *countp = trunc.count;
+ return (0);
+
+err: if (hcp->hdr != NULL)
+ (void)__ham_release_meta(dbc);
+ (void)dbc->c_close(dbc);
+ return (ret);
+}
diff --git a/bdb/hash/hash_stat.c b/bdb/hash/hash_stat.c
index ed64bbc68bd..f9ee1d099cb 100644
--- a/bdb/hash/hash_stat.c
+++ b/bdb/hash/hash_stat.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_stat.c,v 11.24 2000/12/21 21:54:35 margo Exp $";
+static const char revid[] = "$Id: hash_stat.c,v 11.48 2002/08/06 06:11:28 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,11 +18,9 @@ static const char revid[] = "$Id: hash_stat.c,v 11.24 2000/12/21 21:54:35 margo
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "btree.h"
-#include "hash.h"
-#include "lock.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
static int __ham_stat_callback __P((DB *, PAGE *, void *, int *));
@@ -30,24 +28,29 @@ static int __ham_stat_callback __P((DB *, PAGE *, void *, int *));
* __ham_stat --
* Gather/print the hash statistics
*
- * PUBLIC: int __ham_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ * PUBLIC: int __ham_stat __P((DB *, void *, u_int32_t));
*/
int
-__ham_stat(dbp, spp, db_malloc, flags)
+__ham_stat(dbp, spp, flags)
DB *dbp;
- void *spp, *(*db_malloc) __P((size_t));
+ void *spp;
u_int32_t flags;
{
+ DBC *dbc;
+ DB_ENV *dbenv;
DB_HASH_STAT *sp;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
- DBC *dbc;
PAGE *h;
db_pgno_t pgno;
int ret;
- PANIC_CHECK(dbp->dbenv);
+ dbenv = dbp->dbenv;
+
+ PANIC_CHECK(dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
+ mpf = dbp->mpf;
sp = NULL;
/* Check for invalid flags. */
@@ -62,39 +65,39 @@ __ham_stat(dbp, spp, db_malloc, flags)
goto err;
/* Allocate and clear the structure. */
- if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ if ((ret = __os_umalloc(dbenv, sizeof(*sp), &sp)) != 0)
goto err;
memset(sp, 0, sizeof(*sp));
- if (flags == DB_CACHED_COUNTS) {
- sp->hash_nkeys = hcp->hdr->dbmeta.key_count;
- sp->hash_ndata = hcp->hdr->dbmeta.record_count;
- goto done;
- }
-
/* Copy the fields that we have. */
+ sp->hash_nkeys = hcp->hdr->dbmeta.key_count;
+ sp->hash_ndata = hcp->hdr->dbmeta.record_count;
sp->hash_pagesize = dbp->pgsize;
sp->hash_buckets = hcp->hdr->max_bucket + 1;
sp->hash_magic = hcp->hdr->dbmeta.magic;
sp->hash_version = hcp->hdr->dbmeta.version;
sp->hash_metaflags = hcp->hdr->dbmeta.flags;
- sp->hash_nelem = hcp->hdr->nelem;
sp->hash_ffactor = hcp->hdr->ffactor;
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS)
+ goto done;
+
/* Walk the free list, counting pages. */
for (sp->hash_free = 0, pgno = hcp->hdr->dbmeta.free;
pgno != PGNO_INVALID;) {
++sp->hash_free;
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
goto err;
pgno = h->next_pgno;
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
}
/* Now traverse the rest of the table. */
- if ((ret = __ham_traverse(dbp,
- dbc, DB_LOCK_READ, __ham_stat_callback, sp)) != 0)
+ sp->hash_nkeys = 0;
+ sp->hash_ndata = 0;
+ if ((ret = __ham_traverse(dbc,
+ DB_LOCK_READ, __ham_stat_callback, sp, 0)) != 0)
goto err;
if (!F_ISSET(dbp, DB_AM_RDONLY)) {
@@ -114,7 +117,7 @@ done:
return (0);
err: if (sp != NULL)
- __os_free(sp, sizeof(*sp));
+ __os_ufree(dbenv, sp);
if (hcp->hdr != NULL)
(void)__ham_release_meta(dbc);
(void)dbc->c_close(dbc);
@@ -127,26 +130,30 @@ err: if (sp != NULL)
* Traverse an entire hash table. We use the callback so that we
* can use this both for stat collection and for deallocation.
*
- * PUBLIC: int __ham_traverse __P((DB *, DBC *, db_lockmode_t,
- * PUBLIC: int (*)(DB *, PAGE *, void *, int *), void *));
+ * PUBLIC: int __ham_traverse __P((DBC *, db_lockmode_t,
+ * PUBLIC: int (*)(DB *, PAGE *, void *, int *), void *, int));
*/
int
-__ham_traverse(dbp, dbc, mode, callback, cookie)
- DB *dbp;
+__ham_traverse(dbc, mode, callback, cookie, look_past_max)
DBC *dbc;
db_lockmode_t mode;
int (*callback) __P((DB *, PAGE *, void *, int *));
void *cookie;
+ int look_past_max;
{
+ DB *dbp;
+ DBC *opd;
+ DB_MPOOLFILE *mpf;
HASH_CURSOR *hcp;
HKEYDATA *hk;
- DBC *opd;
db_pgno_t pgno, opgno;
- u_int32_t bucket;
int did_put, i, ret, t_ret;
+ u_int32_t bucket, spares_entry;
- hcp = (HASH_CURSOR *)dbc->internal;
+ dbp = dbc->dbp;
opd = NULL;
+ mpf = dbp->mpf;
+ hcp = (HASH_CURSOR *)dbc->internal;
ret = 0;
/*
@@ -156,12 +163,47 @@ __ham_traverse(dbp, dbc, mode, callback, cookie)
* locking easy, makes this a pain in the butt. We have to traverse
* duplicate, overflow and big pages from the bucket so that we
* don't access anything that isn't properly locked.
+ *
*/
- for (bucket = 0; bucket <= hcp->hdr->max_bucket; bucket++) {
+ for (bucket = 0;; bucket++) {
+ /*
+ * We put the loop exit condition check here, because
+ * it made for a really vile extended ?: that made SCO's
+ * compiler drop core.
+ *
+ * If look_past_max is not set, we can stop at max_bucket;
+ * if it is set, we need to include pages that are part of
+ * the current doubling but beyond the highest bucket we've
+ * split into, as well as pages from a "future" doubling
+ * that may have been created within an aborted
+ * transaction. To do this, keep looping (and incrementing
+ * bucket) until the corresponding spares array entries
+ * cease to be defined.
+ */
+ if (look_past_max) {
+ spares_entry = __db_log2(bucket + 1);
+ if (spares_entry >= NCACHED ||
+ hcp->hdr->spares[spares_entry] == 0)
+ break;
+ } else {
+ if (bucket > hcp->hdr->max_bucket)
+ break;
+ }
+
hcp->bucket = bucket;
hcp->pgno = pgno = BUCKET_TO_PAGE(hcp, bucket);
for (ret = __ham_get_cpage(dbc, mode); ret == 0;
ret = __ham_next_cpage(dbc, pgno, 0)) {
+
+ /*
+ * If we are cleaning up pages past the max_bucket,
+ * then they may be on the free list and have their
+ * next pointers set, but the should be ignored. In
+ * fact, we really ought to just skip anybody who is
+ * not a valid page.
+ */
+ if (TYPE(hcp->page) == P_INVALID)
+ break;
pgno = NEXT_PGNO(hcp->page);
/*
@@ -171,17 +213,17 @@ __ham_traverse(dbp, dbc, mode, callback, cookie)
* case we have to count those pages).
*/
for (i = 0; i < NUM_ENT(hcp->page); i++) {
- hk = (HKEYDATA *)P_ENTRY(hcp->page, i);
+ hk = (HKEYDATA *)P_ENTRY(dbp, hcp->page, i);
switch (HPAGE_PTYPE(hk)) {
case H_OFFDUP:
memcpy(&opgno, HOFFDUP_PGNO(hk),
sizeof(db_pgno_t));
if ((ret = __db_c_newopd(dbc,
- opgno, &opd)) != 0)
+ opgno, NULL, &opd)) != 0)
return (ret);
if ((ret = __bam_traverse(opd,
DB_LOCK_READ, opgno,
- __ham_stat_callback, cookie))
+ callback, cookie))
!= 0)
goto err;
if ((ret = opd->c_close(opd)) != 0)
@@ -221,10 +263,10 @@ __ham_traverse(dbp, dbc, mode, callback, cookie)
goto err;
if (STD_LOCKING(dbc))
- (void)lock_put(dbp->dbenv, &hcp->lock);
+ (void)dbp->dbenv->lock_put(dbp->dbenv, &hcp->lock);
if (hcp->page != NULL) {
- if ((ret = memp_fput(dbc->dbp->mpf, hcp->page, 0)) != 0)
+ if ((ret = mpf->put(mpf, hcp->page, 0)) != 0)
return (ret);
hcp->page = NULL;
}
@@ -247,6 +289,7 @@ __ham_stat_callback(dbp, pagep, cookie, putp)
DB_BTREE_STAT bstat;
db_indx_t indx, len, off, tlen, top;
u_int8_t *hk;
+ int ret;
*putp = 0;
sp = cookie;
@@ -266,15 +309,15 @@ __ham_stat_callback(dbp, pagep, cookie, putp)
* is a bucket.
*/
if (PREV_PGNO(pagep) == PGNO_INVALID)
- sp->hash_bfree += P_FREESPACE(pagep);
+ sp->hash_bfree += P_FREESPACE(dbp, pagep);
else {
sp->hash_overflows++;
- sp->hash_ovfl_free += P_FREESPACE(pagep);
+ sp->hash_ovfl_free += P_FREESPACE(dbp, pagep);
}
top = NUM_ENT(pagep);
/* Correct for on-page duplicates and deleted items. */
for (indx = 0; indx < top; indx += P_INDX) {
- switch (*H_PAIRDATA(pagep, indx)) {
+ switch (*H_PAIRDATA(dbp, pagep, indx)) {
case H_OFFDUP:
case H_OFFPAGE:
break;
@@ -282,8 +325,8 @@ __ham_stat_callback(dbp, pagep, cookie, putp)
sp->hash_ndata++;
break;
case H_DUPLICATE:
- tlen = LEN_HDATA(pagep, 0, indx);
- hk = H_PAIRDATA(pagep, indx);
+ tlen = LEN_HDATA(dbp, pagep, 0, indx);
+ hk = H_PAIRDATA(dbp, pagep, indx);
for (off = 0; off < tlen;
off += len + 2 * sizeof (db_indx_t)) {
sp->hash_ndata++;
@@ -310,7 +353,8 @@ __ham_stat_callback(dbp, pagep, cookie, putp)
bstat.bt_int_pgfree = 0;
bstat.bt_leaf_pgfree = 0;
bstat.bt_ndata = 0;
- __bam_stat_callback(dbp, pagep, &bstat, putp);
+ if ((ret = __bam_stat_callback(dbp, pagep, &bstat, putp)) != 0)
+ return (ret);
sp->hash_dup++;
sp->hash_dup_free += bstat.bt_leaf_pgfree +
bstat.bt_dup_pgfree + bstat.bt_int_pgfree;
@@ -318,11 +362,10 @@ __ham_stat_callback(dbp, pagep, cookie, putp)
break;
case P_OVERFLOW:
sp->hash_bigpages++;
- sp->hash_big_bfree += P_OVFLSPACE(dbp->pgsize, pagep);
+ sp->hash_big_bfree += P_OVFLSPACE(dbp, dbp->pgsize, pagep);
break;
default:
- return (__db_unknown_type(dbp->dbenv,
- "__ham_stat_callback", pagep->type));
+ return (__db_pgfmt(dbp->dbenv, pagep->pgno));
}
return (0);
diff --git a/bdb/hash/hash_upgrade.c b/bdb/hash/hash_upgrade.c
index c34381276b4..2dd21d7b644 100644
--- a/bdb/hash/hash_upgrade.c
+++ b/bdb/hash/hash_upgrade.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_upgrade.c,v 11.25 2000/12/14 19:18:32 bostic Exp $";
+static const char revid[] = "$Id: hash_upgrade.c,v 11.32 2002/08/06 05:34:58 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,14 +18,13 @@ static const char revid[] = "$Id: hash_upgrade.c,v 11.25 2000/12/14 19:18:32 bos
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "hash.h"
-#include "db_upgrade.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/db_upgrade.h"
/*
* __ham_30_hashmeta --
- * Upgrade the database from version 4/5 to version 6.
+ * Upgrade the database from version 4/5 to version 6.
*
* PUBLIC: int __ham_30_hashmeta __P((DB *, char *, u_int8_t *));
*/
@@ -163,10 +162,6 @@ __ham_30_sizefix(dbp, fhp, realname, metabuf)
return (ret);
if ((ret = __os_write(dbenv, fhp, buf, pagesize, &nw)) != 0)
return (ret);
- if (nw != pagesize) {
- __db_err(dbenv, "Short write during upgrade");
- return (EIO);
- }
}
return (0);
@@ -174,7 +169,7 @@ __ham_30_sizefix(dbp, fhp, realname, metabuf)
/*
* __ham_31_hashmeta --
- * Upgrade the database from version 6 to version 7.
+ * Upgrade the database from version 6 to version 7.
*
* PUBLIC: int __ham_31_hashmeta
* PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
@@ -229,7 +224,7 @@ __ham_31_hashmeta(dbp, real_name, flags, fhp, h, dirtyp)
/*
* __ham_31_hash --
- * Upgrade the database hash leaf pages.
+ * Upgrade the database hash leaf pages.
*
* PUBLIC: int __ham_31_hash
* PUBLIC: __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
@@ -252,7 +247,7 @@ __ham_31_hash(dbp, real_name, flags, fhp, h, dirtyp)
ret = 0;
for (indx = 0; indx < NUM_ENT(h); indx += 2) {
- hk = (HKEYDATA *)H_PAIRDATA(h, indx);
+ hk = (HKEYDATA *)H_PAIRDATA(dbp, h, indx);
if (HPAGE_PTYPE(hk) == H_OFFDUP) {
memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
tpgno = pgno;
diff --git a/bdb/hash/hash_verify.c b/bdb/hash/hash_verify.c
index 31dd7cc2299..e6f5a2b0d65 100644
--- a/bdb/hash/hash_verify.c
+++ b/bdb/hash/hash_verify.c
@@ -1,16 +1,16 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $
+ * $Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $";
+static const char revid[] = "$Id: hash_verify.c,v 1.53 2002/08/06 05:35:02 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,10 +20,10 @@ static const char revid[] = "$Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_verify.h"
-#include "btree.h"
-#include "hash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/btree.h"
+#include "dbinc/hash.h"
static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
@@ -83,8 +83,8 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
if (!LF_ISSET(DB_NOORDERCHK))
if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
EPRINT((dbp->dbenv,
-"Database has different custom hash function; reverify with DB_NOORDERCHK set"
- ));
+"Page %lu: database has different custom hash function; reverify with DB_NOORDERCHK set",
+ (u_long)pgno));
/*
* Return immediately; this is probably a sign
* of user error rather than database corruption, so
@@ -97,8 +97,8 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
/* max_bucket must be less than the last pgno. */
if (m->max_bucket > vdp->last_pgno) {
EPRINT((dbp->dbenv,
- "Impossible max_bucket %lu on meta page %lu",
- m->max_bucket, pgno));
+ "Page %lu: Impossible max_bucket %lu on meta page",
+ (u_long)pgno, (u_long)m->max_bucket));
/*
* Most other fields depend somehow on max_bucket, so
* we just return--there will be lots of extraneous
@@ -118,15 +118,15 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
if (m->high_mask != pwr - 1) {
EPRINT((dbp->dbenv,
- "Incorrect high_mask %lu on page %lu, should be %lu",
- m->high_mask, pgno, pwr - 1));
+ "Page %lu: incorrect high_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->high_mask, (u_long)pwr - 1));
isbad = 1;
}
pwr >>= 1;
if (m->low_mask != pwr - 1) {
EPRINT((dbp->dbenv,
- "Incorrect low_mask %lu on page %lu, should be %lu",
- m->low_mask, pgno, pwr - 1));
+ "Page %lu: incorrect low_mask %lu, should be %lu",
+ (u_long)pgno, (u_long)m->low_mask, (u_long)pwr - 1));
isbad = 1;
}
@@ -140,8 +140,8 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
*/
if (m->nelem > 0x80000000) {
EPRINT((dbp->dbenv,
- "Suspiciously high nelem of %lu on page %lu",
- m->nelem, pgno));
+ "Page %lu: suspiciously high nelem of %lu",
+ (u_long)pgno, (u_long)m->nelem));
isbad = 1;
pip->h_nelem = 0;
} else
@@ -164,13 +164,14 @@ __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
mbucket = (1 << i) - 1;
if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
EPRINT((dbp->dbenv,
- "Spares array entry %lu, page %lu is invalid",
- i, pgno));
+ "Page %lu: spares array entry %d is invalid",
+ (u_long)pgno, i));
isbad = 1;
}
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -192,6 +193,7 @@ __ham_vrfy(dbp, vdp, h, pgno, flags)
{
VRFY_PAGEINFO *pip;
u_int32_t ent, himark, inpend;
+ db_indx_t *inp;
int isbad, ret, t_ret;
isbad = 0;
@@ -226,31 +228,33 @@ __ham_vrfy(dbp, vdp, h, pgno, flags)
* In any case, we return immediately if things are bad, as it would
* be unsafe to proceed.
*/
+ inp = P_INP(dbp, h);
for (ent = 0, himark = dbp->pgsize,
- inpend = (u_int8_t *)h->inp - (u_int8_t *)h;
+ inpend = (u_int32_t)((u_int8_t *)inp - (u_int8_t *)h);
ent < NUM_ENT(h); ent++)
- if (h->inp[ent] >= himark) {
+ if (inp[ent] >= himark) {
EPRINT((dbp->dbenv,
- "Item %lu on page %lu out of order or nonsensical",
- ent, pgno));
+ "Page %lu: item %lu is out of order or nonsensical",
+ (u_long)pgno, (u_long)ent));
isbad = 1;
goto err;
} else if (inpend >= himark) {
EPRINT((dbp->dbenv,
- "inp array collided with data on page %lu",
- pgno));
+ "Page %lu: entries array collided with data",
+ (u_long)pgno));
isbad = 1;
goto err;
} else {
- himark = h->inp[ent];
+ himark = inp[ent];
inpend += sizeof(db_indx_t);
if ((ret = __ham_vrfy_item(
dbp, vdp, pgno, h, ent, flags)) != 0)
goto err;
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
}
@@ -279,7 +283,7 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
return (ret);
- switch (HPAGE_TYPE(h, i)) {
+ switch (HPAGE_TYPE(dbp, h, i)) {
case H_KEYDATA:
/* Nothing to do here--everything but the type field is data */
break;
@@ -287,8 +291,8 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
/* Are we a datum or a key? Better be the former. */
if (i % 2 == 0) {
EPRINT((dbp->dbenv,
- "Hash key stored as duplicate at page %lu item %lu",
- pip->pgno, i));
+ "Page %lu: hash key stored as duplicate item %lu",
+ (u_long)pip->pgno, (u_long)i));
}
/*
* Dups are encoded as a series within a single HKEYDATA,
@@ -300,16 +304,16 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
* Note that at this point, we've verified item i-1, so
* it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
*/
- len = LEN_HKEYDATA(h, dbp->pgsize, i);
- databuf = HKEYDATA_DATA(P_ENTRY(h, i));
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
+ databuf = HKEYDATA_DATA(P_ENTRY(dbp, h, i));
for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
/* Make sure the length is plausible. */
if (offset + DUP_SIZE(dlen) > len) {
EPRINT((dbp->dbenv,
- "Duplicate item %lu, page %lu has bad length",
- i, pip->pgno));
+ "Page %lu: duplicate item %lu has bad length",
+ (u_long)pip->pgno, (u_long)i));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -323,8 +327,8 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
sizeof(db_indx_t));
if (elen != dlen) {
EPRINT((dbp->dbenv,
- "Duplicate item %lu, page %lu has two different lengths",
- i, pip->pgno));
+ "Page %lu: duplicate item %lu has two different lengths",
+ (u_long)pip->pgno, (u_long)i));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -336,12 +340,12 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
break;
case H_OFFPAGE:
/* Offpage item. Make sure pgno is sane, save off. */
- memcpy(&hop, P_ENTRY(h, i), HOFFPAGE_SIZE);
+ memcpy(&hop, P_ENTRY(dbp, h, i), HOFFPAGE_SIZE);
if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
hop.pgno == PGNO_INVALID) {
EPRINT((dbp->dbenv,
- "Offpage item %lu, page %lu has bad page number",
- i, pip->pgno));
+ "Page %lu: offpage item %lu has bad pgno %lu",
+ (u_long)pip->pgno, (u_long)i, (u_long)hop.pgno));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -354,12 +358,12 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
break;
case H_OFFDUP:
/* Offpage duplicate item. Same drill. */
- memcpy(&hod, P_ENTRY(h, i), HOFFDUP_SIZE);
+ memcpy(&hod, P_ENTRY(dbp, h, i), HOFFDUP_SIZE);
if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
hod.pgno == PGNO_INVALID) {
EPRINT((dbp->dbenv,
- "Offpage item %lu, page %lu has bad page number",
- i, pip->pgno));
+ "Page %lu: offpage item %lu has bad page number",
+ (u_long)pip->pgno, (u_long)i));
ret = DB_VERIFY_BAD;
goto err;
}
@@ -372,12 +376,14 @@ __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
break;
default:
EPRINT((dbp->dbenv,
- "Item %i, page %lu has bad type", i, pip->pgno));
+ "Page %lu: item %i has bad type",
+ (u_long)pip->pgno, (u_long)i));
ret = DB_VERIFY_BAD;
break;
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return (ret);
}
@@ -397,29 +403,32 @@ __ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
u_int32_t flags;
{
DB *pgset;
+ DB_MPOOLFILE *mpf;
HMETA *m;
PAGE *h;
VRFY_PAGEINFO *pip;
int isbad, p, ret, t_ret;
db_pgno_t pgno;
- u_int32_t bucket;
+ u_int32_t bucket, spares_entry;
- ret = isbad = 0;
- h = NULL;
+ mpf = dbp->mpf;
pgset = vdp->pgset;
+ h = NULL;
+ ret = isbad = 0;
if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
return (ret);
if (p != 0) {
EPRINT((dbp->dbenv,
- "Hash meta page %lu referenced twice", meta_pgno));
+ "Page %lu: Hash meta page referenced twice",
+ (u_long)meta_pgno));
return (DB_VERIFY_BAD);
}
if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
return (ret);
/* Get the meta page; we'll need it frequently. */
- if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &m)) != 0)
+ if ((ret = mpf->get(mpf, &meta_pgno, 0, &m)) != 0)
return (ret);
/* Loop through bucket by bucket. */
@@ -445,8 +454,8 @@ __ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
* Note that this should be safe, since we've already verified
* that the spares array is sane.
*/
- for (bucket = m->max_bucket + 1;
- m->spares[__db_log2(bucket + 1)] != 0; bucket++) {
+ for (bucket = m->max_bucket + 1; spares_entry = __db_log2(bucket + 1),
+ spares_entry < NCACHED && m->spares[spares_entry] != 0; bucket++) {
pgno = BS_TO_PAGE(bucket, m->spares);
if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
goto err;
@@ -454,43 +463,51 @@ __ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
/* It's okay if these pages are totally zeroed; unmark it. */
F_CLR(pip, VRFY_IS_ALLZEROES);
+ /* It's also OK if this page is simply invalid. */
+ if (pip->type == P_INVALID) {
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
+ goto err;
+ continue;
+ }
+
if (pip->type != P_HASH) {
EPRINT((dbp->dbenv,
- "Hash bucket %lu maps to non-hash page %lu",
- bucket, pgno));
+ "Page %lu: hash bucket %lu maps to non-hash page",
+ (u_long)pgno, (u_long)bucket));
isbad = 1;
} else if (pip->entries != 0) {
EPRINT((dbp->dbenv,
- "Non-empty page %lu in unused hash bucket %lu",
- pgno, bucket));
+ "Page %lu: non-empty page in unused hash bucket %lu",
+ (u_long)pgno, (u_long)bucket));
isbad = 1;
} else {
if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
goto err;
if (p != 0) {
EPRINT((dbp->dbenv,
- "Hash page %lu above max_bucket referenced",
- pgno));
+ "Page %lu: above max_bucket referenced",
+ (u_long)pgno));
isbad = 1;
} else {
if ((ret =
__db_vrfy_pgset_inc(pgset, pgno)) != 0)
goto err;
- if ((ret =
- __db_vrfy_putpageinfo(vdp, pip)) != 0)
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv,
+ vdp, pip)) != 0)
goto err;
continue;
}
}
/* If we got here, it's an error. */
- (void)__db_vrfy_putpageinfo(vdp, pip);
+ (void)__db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
goto err;
}
-err: if ((t_ret = memp_fput(dbp->mpf, m, 0)) != 0)
+err: if ((t_ret = mpf->put(mpf, m, 0)) != 0)
return (t_ret);
- if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if (h != NULL && (t_ret = mpf->put(mpf, h, 0)) != 0)
return (t_ret);
return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
}
@@ -535,8 +552,9 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
/* Make sure we got a plausible page number. */
if (pgno > vdp->last_pgno || pip->type != P_HASH) {
- EPRINT((dbp->dbenv, "Bucket %lu has impossible first page %lu",
- bucket, pgno));
+ EPRINT((dbp->dbenv,
+ "Page %lu: impossible first page in bucket %lu",
+ (u_long)pgno, (u_long)bucket));
/* Unsafe to continue. */
isbad = 1;
goto err;
@@ -544,7 +562,8 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
if (pip->prev_pgno != PGNO_INVALID) {
EPRINT((dbp->dbenv,
- "First hash page %lu in bucket %lu has a prev_pgno", pgno));
+ "Page %lu: first page in hash bucket %lu has a prev_pgno",
+ (u_long)pgno, (u_long)bucket));
isbad = 1;
}
@@ -564,7 +583,8 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
goto err;
if (p != 0) {
EPRINT((dbp->dbenv,
- "Hash page %lu referenced twice", pgno));
+ "Page %lu: hash page referenced twice",
+ (u_long)pgno));
isbad = 1;
/* Unsafe to continue. */
goto err;
@@ -584,11 +604,11 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
F_CLR(pip, VRFY_IS_ALLZEROES);
/* If we have dups, our meta page had better know about it. */
- if (F_ISSET(pip, VRFY_HAS_DUPS)
- && !F_ISSET(mip, VRFY_HAS_DUPS)) {
+ if (F_ISSET(pip, VRFY_HAS_DUPS) &&
+ !F_ISSET(mip, VRFY_HAS_DUPS)) {
EPRINT((dbp->dbenv,
- "Duplicates present in non-duplicate database, page %lu",
- pgno));
+ "Page %lu: duplicates present in non-duplicate database",
+ (u_long)pgno));
isbad = 1;
}
@@ -599,8 +619,8 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
EPRINT((dbp->dbenv,
- "Unsorted dups in sorted-dup database, page %lu",
- pgno));
+ "Page %lu: unsorted dups in sorted-dup database",
+ (u_long)pgno));
isbad = 1;
}
@@ -625,8 +645,8 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
}
if ((ret = __bam_vrfy_subtree(dbp, vdp,
child->pgno, NULL, NULL,
- flags | ST_RECNUM | ST_DUPSET, NULL,
- NULL, NULL)) != 0) {
+ flags | ST_RECNUM | ST_DUPSET | ST_TOPLEVEL,
+ NULL, NULL, NULL)) != 0) {
if (ret == DB_VERIFY_BAD)
isbad = 1;
else
@@ -648,7 +668,7 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
}
next_pgno = pip->next_pgno;
- ret = __db_vrfy_putpageinfo(vdp, pip);
+ ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip);
pip = NULL;
if (ret != 0)
@@ -661,7 +681,8 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
if (!IS_VALID_PGNO(next_pgno)) {
DB_ASSERT(0);
EPRINT((dbp->dbenv,
- "Hash page %lu has bad next_pgno", pgno));
+ "Page %lu: hash page has bad next_pgno",
+ (u_long)pgno));
isbad = 1;
goto err;
}
@@ -670,8 +691,9 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
goto err;
if (pip->prev_pgno != pgno) {
- EPRINT((dbp->dbenv, "Hash page %lu has bad prev_pgno",
- next_pgno));
+ EPRINT((dbp->dbenv,
+ "Page %lu: hash page has bad prev_pgno",
+ (u_long)next_pgno));
isbad = 1;
}
pgno = next_pgno;
@@ -679,11 +701,11 @@ __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
ret = t_ret;
- if (mip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, mip)) != 0) &&
- ret == 0)
+ if (mip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0)
ret = t_ret;
- if (pip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0) &&
- ret == 0)
+ if (pip != NULL && ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0)
ret = t_ret;
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
}
@@ -707,16 +729,19 @@ __ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
{
DBT dbt;
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_indx_t i;
int ret, t_ret, isbad;
u_int32_t hval, bucket;
+ mpf = dbp->mpf;
ret = isbad = 0;
+
memset(&dbt, 0, sizeof(DBT));
F_SET(&dbt, DB_DBT_REALLOC);
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
for (i = 0; i < nentries; i += 2) {
@@ -738,15 +763,15 @@ __ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
if (bucket != thisbucket) {
EPRINT((dbp->dbenv,
- "Item %lu on page %lu hashes incorrectly",
- i, pgno));
+ "Page %lu: item %lu hashes incorrectly",
+ (u_long)pgno, (u_long)i));
isbad = 1;
}
}
err: if (dbt.data != NULL)
- __os_free(dbt.data, 0);
- if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ __os_ufree(dbp->dbenv, dbt.data);
+ if ((t_ret = mpf->put(mpf, h, 0)) != 0)
return (t_ret);
return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
@@ -782,7 +807,7 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
dbt.flags = DB_DBT_REALLOC;
memset(&unkdbt, 0, sizeof(DBT));
- unkdbt.size = strlen("UNKNOWN") + 1;
+ unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1;
unkdbt.data = "UNKNOWN";
err_ret = 0;
@@ -791,7 +816,7 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
* Allocate a buffer for overflow items. Start at one page;
* __db_safe_goff will realloc as needed.
*/
- if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &buf)) != 0)
+ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &buf)) != 0)
return (ret);
himark = dbp->pgsize;
@@ -808,8 +833,8 @@ __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
break;
if (ret == 0) {
- hk = P_ENTRY(h, i);
- len = LEN_HKEYDATA(h, dbp->pgsize, i);
+ hk = P_ENTRY(dbp, h, i);
+ len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i);
if ((u_int32_t)(hk + len - (u_int8_t *)h) >
dbp->pgsize) {
/*
@@ -834,7 +859,7 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
dbt.size = len;
dbt.data = buf;
if ((ret = __db_prdbt(&dbt,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
case H_OFFPAGE:
@@ -848,11 +873,11 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
dpgno, &dbt, &buf, flags)) != 0) {
err_ret = ret;
(void)__db_prdbt(&unkdbt, 0, " ",
- handle, callback, 0, NULL);
+ handle, callback, 0, vdp);
break;
}
if ((ret = __db_prdbt(&dbt,
- 0, " ", handle, callback, 0, NULL)) != 0)
+ 0, " ", handle, callback, 0, vdp)) != 0)
err_ret = ret;
break;
case H_OFFDUP:
@@ -865,7 +890,7 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
/* UNKNOWN iff pgno is bad or we're a key. */
if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
if ((ret = __db_prdbt(&unkdbt, 0, " ",
- handle, callback, 0, NULL)) != 0)
+ handle, callback, 0, vdp)) != 0)
err_ret = ret;
} else if ((ret = __db_salvage_duptree(dbp,
vdp, dpgno, &dbt, handle, callback,
@@ -908,7 +933,7 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
dbt.size = dlen;
dbt.data = buf;
if ((ret = __db_prdbt(&dbt, 0, " ",
- handle, callback, 0, NULL)) != 0)
+ handle, callback, 0, vdp)) != 0)
err_ret = ret;
tlen += sizeof(db_indx_t);
}
@@ -917,7 +942,7 @@ keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
}
}
- __os_free(buf, 0);
+ __os_free(dbp->dbenv, buf);
if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
return (t_ret);
return ((ret == 0 && err_ret != 0) ? err_ret : ret);
@@ -938,6 +963,7 @@ int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
u_int32_t flags;
DB *pgset;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
db_pgno_t pgno;
u_int32_t bucket, totpgs;
@@ -951,6 +977,7 @@ int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
DB_ASSERT(pgset != NULL);
+ mpf = dbp->mpf;
totpgs = 0;
/*
@@ -967,7 +994,7 @@ int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
* Safely walk the list of pages in this bucket.
*/
for (;;) {
- if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
+ if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0)
return (ret);
if (TYPE(h) == P_HASH) {
@@ -976,24 +1003,26 @@ int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
* pgset.
*/
if (++totpgs > vdp->last_pgno) {
- (void)memp_fput(dbp->mpf, h, 0);
+ (void)mpf->put(mpf, h, 0);
return (DB_VERIFY_BAD);
}
if ((ret =
- __db_vrfy_pgset_inc(pgset, pgno)) != 0)
+ __db_vrfy_pgset_inc(pgset, pgno)) != 0) {
+ (void)mpf->put(mpf, h, 0);
return (ret);
+ }
pgno = NEXT_PGNO(h);
} else
pgno = PGNO_INVALID;
- if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
+ if ((ret = mpf->put(mpf, h, 0)) != 0)
return (ret);
/* If the new pgno is wonky, go onto the next bucket. */
if (!IS_VALID_PGNO(pgno) ||
pgno == PGNO_INVALID)
- goto nextbucket;
+ break;
/*
* If we've touched this page before, we have a cycle;
@@ -1002,9 +1031,8 @@ int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
return (ret);
if (val != 0)
- goto nextbucket;
+ break;
}
-nextbucket: ;
}
return (0);
}
diff --git a/bdb/hmac/hmac.c b/bdb/hmac/hmac.c
new file mode 100644
index 00000000000..d39a154ec63
--- /dev/null
+++ b/bdb/hmac/hmac.c
@@ -0,0 +1,207 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * Some parts of this code originally written by Adam Stubblefield,
+ * astubble@rice.edu.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: hmac.c,v 1.25 2002/09/10 02:40:40 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h" /* for hash.h only */
+#include "dbinc/hash.h"
+#include "dbinc/hmac.h"
+
+#define HMAC_OUTPUT_SIZE 20
+#define HMAC_BLOCK_SIZE 64
+
+static void __db_hmac __P((u_int8_t *, u_int8_t *, size_t, u_int8_t *));
+
+/*
+ * !!!
+ * All of these functions use a ctx structure on the stack. The __db_SHA1Init
+ * call does not initialize the 64-byte buffer portion of it. The
+ * underlying SHA1 functions will properly pad the buffer if the data length
+ * is less than 64-bytes, so there isn't a chance of reading uninitialized
+ * memory. Although it would be cleaner to do a memset(ctx.buffer, 0, 64)
+ * we do not want to incur that penalty if we don't have to for performance.
+ */
+
+/*
+ * __db_hmac --
+ * Do a hashed MAC.
+ */
+static void
+__db_hmac(k, data, data_len, mac)
+ u_int8_t *k, *data, *mac;
+ size_t data_len;
+{
+ SHA1_CTX ctx;
+ u_int8_t key[HMAC_BLOCK_SIZE];
+ u_int8_t ipad[HMAC_BLOCK_SIZE];
+ u_int8_t opad[HMAC_BLOCK_SIZE];
+ u_int8_t tmp[HMAC_OUTPUT_SIZE];
+ int i;
+
+ memset(key, 0x00, HMAC_BLOCK_SIZE);
+ memset(ipad, 0x36, HMAC_BLOCK_SIZE);
+ memset(opad, 0x5C, HMAC_BLOCK_SIZE);
+
+ memcpy(key, k, HMAC_OUTPUT_SIZE);
+
+ for (i = 0; i < HMAC_BLOCK_SIZE; i++) {
+ ipad[i] ^= key[i];
+ opad[i] ^= key[i];
+ }
+
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, ipad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, data, data_len);
+ __db_SHA1Final(tmp, &ctx);
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, opad, HMAC_BLOCK_SIZE);
+ __db_SHA1Update(&ctx, tmp, HMAC_OUTPUT_SIZE);
+ __db_SHA1Final(mac, &ctx);
+ return;
+}
+
+/*
+ * __db_chksum --
+ * Create a MAC/SHA1 checksum.
+ *
+ * PUBLIC: void __db_chksum __P((u_int8_t *, size_t, u_int8_t *, u_int8_t *));
+ */
+void
+__db_chksum(data, data_len, mac_key, store)
+ u_int8_t *data;
+ size_t data_len;
+ u_int8_t *mac_key;
+ u_int8_t *store;
+{
+ int sumlen;
+ u_int32_t hash4;
+ u_int8_t tmp[DB_MAC_KEY];
+
+ /*
+ * Since the checksum might be on a page of data we are checksumming
+ * we might be overwriting after checksumming, we zero-out the
+ * checksum value so that we can have a known value there when
+ * we verify the checksum.
+ */
+ if (mac_key == NULL)
+ sumlen = sizeof(u_int32_t);
+ else
+ sumlen = DB_MAC_KEY;
+ memset(store, 0, sumlen);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ memcpy(store, &hash4, sumlen);
+ } else {
+ memset(tmp, 0, DB_MAC_KEY);
+ __db_hmac(mac_key, data, data_len, tmp);
+ memcpy(store, tmp, sumlen);
+ }
+ return;
+}
+/*
+ * __db_derive_mac --
+ * Create a MAC/SHA1 key.
+ *
+ * PUBLIC: void __db_derive_mac __P((u_int8_t *, size_t, u_int8_t *));
+ */
+void
+__db_derive_mac(passwd, plen, mac_key)
+ u_int8_t *passwd;
+ size_t plen;
+ u_int8_t *mac_key;
+{
+ SHA1_CTX ctx;
+
+ /* Compute the MAC key. mac_key must be 20 bytes. */
+ __db_SHA1Init(&ctx);
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Update(&ctx, (u_int8_t *)DB_MAC_MAGIC, strlen(DB_MAC_MAGIC));
+ __db_SHA1Update(&ctx, passwd, plen);
+ __db_SHA1Final(mac_key, &ctx);
+
+ return;
+}
+
+/*
+ * __db_check_chksum --
+ * Verify a checksum.
+ *
+ * Return 0 on success, >0 (errno) on error, -1 on checksum mismatch.
+ *
+ * PUBLIC: int __db_check_chksum __P((DB_ENV *,
+ * PUBLIC: DB_CIPHER *, u_int8_t *, void *, size_t, int));
+ */
+int
+__db_check_chksum(dbenv, db_cipher, chksum, data, data_len, is_hmac)
+ DB_ENV *dbenv;
+ DB_CIPHER *db_cipher;
+ u_int8_t *chksum;
+ void *data;
+ size_t data_len;
+ int is_hmac;
+{
+ int ret;
+ size_t sum_len;
+ u_int32_t hash4;
+ u_int8_t *mac_key, old[DB_MAC_KEY], new[DB_MAC_KEY];
+
+ /*
+ * If we are just doing checksumming and not encryption, then checksum
+ * is 4 bytes. Otherwise, it is DB_MAC_KEY size. Check for illegal
+ * combinations of crypto/non-crypto checksums.
+ */
+ if (is_hmac == 0) {
+ if (db_cipher != NULL) {
+ __db_err(dbenv,
+ "Unencrypted checksum with a supplied encryption key");
+ return (EINVAL);
+ }
+ sum_len = sizeof(u_int32_t);
+ mac_key = NULL;
+ } else {
+ if (db_cipher == NULL) {
+ __db_err(dbenv,
+ "Encrypted checksum: no encryption key specified");
+ return (EINVAL);
+ }
+ sum_len = DB_MAC_KEY;
+ mac_key = db_cipher->mac_key;
+ }
+
+ /*
+ * !!!
+ * Since the checksum might be on the page, we need to have known data
+ * there so that we can generate the same original checksum. We zero
+ * it out, just like we do in __db_chksum above.
+ */
+ memcpy(old, chksum, sum_len);
+ memset(chksum, 0, sum_len);
+ if (mac_key == NULL) {
+ /* Just a hash, no MAC */
+ hash4 = __ham_func4(NULL, data, (u_int32_t)data_len);
+ ret = memcmp((u_int32_t *)old, &hash4, sum_len) ? -1 : 0;
+ } else {
+ __db_hmac(mac_key, data, data_len, new);
+ ret = memcmp(old, new, sum_len) ? -1 : 0;
+ }
+
+ return (ret);
+}
diff --git a/bdb/hmac/sha1.c b/bdb/hmac/sha1.c
new file mode 100644
index 00000000000..2f2c806a21f
--- /dev/null
+++ b/bdb/hmac/sha1.c
@@ -0,0 +1,294 @@
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: sha1.c,v 1.13 2002/04/09 13:40:36 sue Exp $";
+#endif /* not lint */
+/*
+SHA-1 in C
+By Steve Reid <sreid@sea-to-sky.net>
+100% Public Domain
+
+-----------------
+Modified 7/98
+By James H. Brown <jbrown@burgoyne.com>
+Still 100% Public Domain
+
+Corrected a problem which generated improper hash values on 16 bit machines
+Routine SHA1Update changed from
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned int
+len)
+to
+ void SHA1Update(SHA1_CTX* context, unsigned char* data, unsigned
+long len)
+
+The 'len' parameter was declared an int which works fine on 32 bit machines.
+However, on 16 bit machines an int is too small for the shifts being done
+against
+it. This caused the hash function to generate incorrect values if len was
+greater than 8191 (8K - 1) due to the 'len << 3' on line 3 of SHA1Update().
+
+Since the file IO in main() reads 16K at a time, any file 8K or larger would
+be guaranteed to generate the wrong hash (e.g. Test Vector #3, a million
+"a"s).
+
+I also changed the declaration of variables i & j in SHA1Update to
+unsigned long from unsigned int for the same reason.
+
+These changes should make no difference to any 32 bit implementations since
+an
+int and a long are the same size in those environments.
+
+--
+I also corrected a few compiler warnings generated by Borland C.
+1. Added #include <process.h> for exit() prototype
+2. Removed unused variable 'j' in SHA1Final
+3. Changed exit(0) to return(0) at end of main.
+
+ALL changes I made can be located by searching for comments containing 'JHB'
+-----------------
+Modified 8/98
+By Steve Reid <sreid@sea-to-sky.net>
+Still 100% public domain
+
+1- Removed #include <process.h> and used return() instead of exit()
+2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall)
+3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net
+
+-----------------
+Modified 4/01
+By Saul Kravitz <Saul.Kravitz@celera.com>
+Still 100% PD
+Modified to run on Compaq Alpha hardware.
+
+
+*/
+
+/*
+Test Vectors (from FIPS PUB 180-1)
+"abc"
+ A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
+"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
+A million repetitions of "a"
+ 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
+*/
+
+#define SHA1HANDSOFF
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/hmac.h"
+
+/* #include <process.h> */ /* prototype for exit() - JHB */
+/* Using return() instead of exit() - SWR */
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) is_bigendian ? block->l[i] : \
+ (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
+ |(rol(block->l[i],8)&0x00FF00FF))
+#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
+ ^block->l[(i+2)&15]^block->l[i&15],1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+
+#ifdef VERBOSE /* SAK */
+static void __db_SHAPrintContext __P((SHA1_CTX *, char *));
+static void
+__db_SHAPrintContext(context, msg)
+ SHA1_CTX *context;
+ char *msg;
+{
+ printf("%s (%d,%d) %x %x %x %x %x\n",
+ msg,
+ context->count[0], context->count[1],
+ context->state[0],
+ context->state[1],
+ context->state[2],
+ context->state[3],
+ context->state[4]);
+}
+#endif
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+
+/*
+ * __db_SHA1Transform --
+ *
+ * PUBLIC: void __db_SHA1Transform __P((u_int32_t *, unsigned char *));
+ */
+void
+__db_SHA1Transform(state, buffer)
+ u_int32_t *state;
+ unsigned char *buffer;
+{
+u_int32_t a, b, c, d, e;
+typedef union {
+ unsigned char c[64];
+ u_int32_t l[16];
+} CHAR64LONG16;
+CHAR64LONG16* block;
+static int is_bigendian = -1;
+#ifdef SHA1HANDSOFF
+ unsigned char workspace[64];
+
+ block = (CHAR64LONG16*)workspace;
+ memcpy(block, buffer, 64);
+#else
+ block = (CHAR64LONG16*)buffer;
+#endif
+ if (is_bigendian == -1)
+ is_bigendian = __db_isbigendian();
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+}
+
+
+/* SHA1Init - Initialize new context */
+
+/*
+ * __db_SHA1Init --
+ * Initialize new context
+ *
+ * PUBLIC: void __db_SHA1Init __P((SHA1_CTX *));
+ */
+void
+__db_SHA1Init(context)
+ SHA1_CTX *context;
+{
+ /* SHA1 initialization constants */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+ context->count[0] = context->count[1] = 0;
+}
+
+
+/* Run your data through this. */
+
+/*
+ * __db_SHA1Update --
+ * Run your data through this.
+ *
+ * PUBLIC: void __db_SHA1Update __P((SHA1_CTX *, unsigned char *,
+ * PUBLIC: size_t));
+ */
+void
+__db_SHA1Update(context, data, len)
+ SHA1_CTX *context;
+ unsigned char *data;
+ size_t len;
+{
+u_int32_t i, j; /* JHB */
+
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "before");
+#endif
+ j = (context->count[0] >> 3) & 63;
+ if ((context->count[0] += (u_int32_t)len << 3) < (len << 3)) context->count[1]++;
+ context->count[1] += (u_int32_t)(len >> 29);
+ if ((j + len) > 63) {
+ memcpy(&context->buffer[j], data, (i = 64-j));
+ __db_SHA1Transform(context->state, context->buffer);
+ for ( ; i + 63 < len; i += 64) {
+ __db_SHA1Transform(context->state, &data[i]);
+ }
+ j = 0;
+ }
+ else i = 0;
+ memcpy(&context->buffer[j], &data[i], len - i);
+#ifdef VERBOSE
+ __db_SHAPrintContext(context, "after ");
+#endif
+}
+
+
+/* Add padding and return the message digest. */
+
+/*
+ * __db_SHA1Final --
+ * Add padding and return the message digest.
+ *
+ * PUBLIC: void __db_SHA1Final __P((unsigned char *, SHA1_CTX *));
+ */
+void
+__db_SHA1Final(digest, context)
+ unsigned char *digest;
+ SHA1_CTX *context;
+{
+u_int32_t i; /* JHB */
+unsigned char finalcount[8];
+
+ for (i = 0; i < 8; i++) {
+ finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)]
+ >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
+ }
+ __db_SHA1Update(context, (unsigned char *)"\200", 1);
+ while ((context->count[0] & 504) != 448) {
+ __db_SHA1Update(context, (unsigned char *)"\0", 1);
+ }
+ __db_SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform()
+*/
+ for (i = 0; i < 20; i++) {
+ digest[i] = (unsigned char)
+ ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+ /* Wipe variables */
+ i = 0; /* JHB */
+ memset(context->buffer, 0, 64);
+ memset(context->state, 0, 20);
+ memset(context->count, 0, 8);
+ memset(finalcount, 0, 8); /* SWR */
+#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
+ __db_SHA1Transform(context->state, context->buffer);
+#endif
+}
+
+/*************************************************************/
+
diff --git a/bdb/hsearch/hsearch.c b/bdb/hsearch/hsearch.c
index c2869c4c47c..9760aeeb9e8 100644
--- a/bdb/hsearch/hsearch.c
+++ b/bdb/hsearch/hsearch.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -43,7 +43,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: hsearch.c,v 11.5 2000/11/30 00:58:37 ubell Exp $";
+static const char revid[] = "$Id: hsearch.c,v 11.12 2002/02/22 01:55:57 mjc Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -58,6 +58,18 @@ static const char revid[] = "$Id: hsearch.c,v 11.5 2000/11/30 00:58:37 ubell Exp
static DB *dbp;
static ENTRY retval;
+/*
+ * Translate HSEARCH calls into DB calls so that DB doesn't step on the
+ * application's name space.
+ *
+ * EXTERN: #if DB_DBM_HSEARCH != 0
+ *
+ * EXTERN: int __db_hcreate __P((size_t));
+ * EXTERN: ENTRY *__db_hsearch __P((ENTRY, ACTION));
+ * EXTERN: void __db_hdestroy __P((void));
+ *
+ * EXTERN: #endif
+ */
int
__db_hcreate(nel)
size_t nel;
@@ -71,9 +83,9 @@ __db_hcreate(nel)
if ((ret = dbp->set_pagesize(dbp, 512)) != 0 ||
(ret = dbp->set_h_ffactor(dbp, 16)) != 0 ||
- (ret = dbp->set_h_nelem(dbp, nel)) != 0 ||
+ (ret = dbp->set_h_nelem(dbp, (u_int32_t)nel)) != 0 ||
(ret = dbp->open(dbp,
- NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
+ NULL, NULL, NULL, DB_HASH, DB_CREATE, __db_omode("rw----"))) != 0)
__os_set_errno(ret);
/*
@@ -98,12 +110,12 @@ __db_hsearch(item, action)
memset(&key, 0, sizeof(key));
memset(&val, 0, sizeof(val));
key.data = item.key;
- key.size = strlen(item.key) + 1;
+ key.size = (u_int32_t)strlen(item.key) + 1;
switch (action) {
case ENTER:
val.data = item.data;
- val.size = strlen(item.data) + 1;
+ val.size = (u_int32_t)strlen(item.data) + 1;
/*
* Try and add the key to the database. If we fail because
diff --git a/bdb/include/db.src b/bdb/include/db.src
deleted file mode 100644
index 5226d4e98c6..00000000000
--- a/bdb/include/db.src
+++ /dev/null
@@ -1,1383 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: db.src,v 11.121 2001/01/10 15:43:08 sue Exp $
- */
-
-#ifndef _DB_H_
-#define _DB_H_
-
-#ifndef __NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <stdio.h>
-#endif
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/*
- * XXX
- * Handle function prototypes and the keyword "const". This steps on name
- * space that DB doesn't control, but all of the other solutions are worse.
- *
- * XXX
- * While Microsoft's compiler is ANSI C compliant, it doesn't have _STDC_
- * defined by default, you specify a command line flag or #pragma to turn
- * it on. Don't do that, however, because some of Microsoft's own header
- * files won't compile.
- */
-#undef __P
-#if defined(__STDC__) || defined(__cplusplus) || defined(_MSC_VER)
-#define __P(protos) protos /* ANSI C prototypes */
-#else
-#define const
-#define __P(protos) () /* K&R C preprocessor */
-#endif
-
-/*
- * !!!
- * DB needs basic information about specifically sized types. If they're
- * not provided by the system, typedef them here.
- *
- * We protect them against multiple inclusion using __BIT_TYPES_DEFINED__,
- * as does BIND and Kerberos, since we don't know for sure what #include
- * files the user is using.
- *
- * !!!
- * We also provide the standard u_int, u_long etc., if they're not provided
- * by the system.
- */
-#ifndef __BIT_TYPES_DEFINED__
-#define __BIT_TYPES_DEFINED__
-@u_int8_decl@
-@int16_decl@
-@u_int16_decl@
-@int32_decl@
-@u_int32_decl@
-#endif
-
-@u_char_decl@
-@u_short_decl@
-@u_int_decl@
-@u_long_decl@
-@ssize_t_decl@
-
-#define DB_VERSION_MAJOR @DB_VERSION_MAJOR@
-#define DB_VERSION_MINOR @DB_VERSION_MINOR@
-#define DB_VERSION_PATCH @DB_VERSION_PATCH@
-#define DB_VERSION_STRING @DB_VERSION_STRING@
-
-typedef u_int32_t db_pgno_t; /* Page number type. */
-typedef u_int16_t db_indx_t; /* Page offset type. */
-#define DB_MAX_PAGES 0xffffffff /* >= # of pages in a file */
-
-typedef u_int32_t db_recno_t; /* Record number type. */
-#define DB_MAX_RECORDS 0xffffffff /* >= # of records in a tree */
-
-/* Forward structure declarations, so applications get type checking. */
-struct __db; typedef struct __db DB;
-#ifdef DB_DBM_HSEARCH
- typedef struct __db DBM;
-#endif
-struct __db_bt_stat; typedef struct __db_bt_stat DB_BTREE_STAT;
-struct __db_dbt; typedef struct __db_dbt DBT;
-struct __db_env; typedef struct __db_env DB_ENV;
-struct __db_h_stat; typedef struct __db_h_stat DB_HASH_STAT;
-struct __db_ilock; typedef struct __db_ilock DB_LOCK_ILOCK;
-struct __db_lock_stat; typedef struct __db_lock_stat DB_LOCK_STAT;
-struct __db_lock_u; typedef struct __db_lock_u DB_LOCK;
-struct __db_lockreq; typedef struct __db_lockreq DB_LOCKREQ;
-struct __db_log_stat; typedef struct __db_log_stat DB_LOG_STAT;
-struct __db_lsn; typedef struct __db_lsn DB_LSN;
-struct __db_mpool_finfo;typedef struct __db_mpool_finfo DB_MPOOL_FINFO;
-struct __db_mpool_fstat;typedef struct __db_mpool_fstat DB_MPOOL_FSTAT;
-struct __db_mpool_stat; typedef struct __db_mpool_stat DB_MPOOL_STAT;
-struct __db_mpoolfile; typedef struct __db_mpoolfile DB_MPOOLFILE;
-struct __db_qam_stat; typedef struct __db_qam_stat DB_QUEUE_STAT;
-struct __db_txn; typedef struct __db_txn DB_TXN;
-struct __db_txn_active; typedef struct __db_txn_active DB_TXN_ACTIVE;
-struct __db_txn_stat; typedef struct __db_txn_stat DB_TXN_STAT;
-struct __dbc; typedef struct __dbc DBC;
-struct __dbc_internal; typedef struct __dbc_internal DBC_INTERNAL;
-struct __fh_t; typedef struct __fh_t DB_FH;
-struct __key_range; typedef struct __key_range DB_KEY_RANGE;
-
-/* Key/data structure -- a Data-Base Thang. */
-struct __db_dbt {
- /*
- * data/size must be fields 1 and 2 for DB 1.85 compatibility.
- */
- void *data; /* Key/data */
- u_int32_t size; /* key/data length */
-
- u_int32_t ulen; /* RO: length of user buffer. */
- u_int32_t dlen; /* RO: get/put record length. */
- u_int32_t doff; /* RO: get/put record offset. */
-
- void *app_private; /* Application-private handle. */
-#define DB_DBT_ISSET 0x001 /* Lower level calls set value. */
-#define DB_DBT_MALLOC 0x002 /* Return in malloc'd memory. */
-#define DB_DBT_PARTIAL 0x004 /* Partial put/get. */
-#define DB_DBT_REALLOC 0x008 /* Return in realloc'd memory. */
-#define DB_DBT_USERMEM 0x010 /* Return in user's memory. */
-#define DB_DBT_DUPOK 0x020 /* Insert if duplicate. */
- u_int32_t flags;
-};
-
-/*
- * Common flags --
- * Interfaces which use any of these common flags should never have
- * interface specific flags in this range.
- */
-#define DB_CREATE 0x000001 /* Create file as necessary. */
-#define DB_CXX_NO_EXCEPTIONS 0x000002 /* C++: return error values. */
-#define DB_FORCE 0x000004 /* Force (anything). */
-#define DB_NOMMAP 0x000008 /* Don't mmap underlying file. */
-#define DB_RDONLY 0x000010 /* Read-only (O_RDONLY). */
-#define DB_RECOVER 0x000020 /* Run normal recovery. */
-#define DB_THREAD 0x000040 /* Applications are threaded. */
-#define DB_TXN_NOSYNC 0x000080 /* Do not sync log on commit. */
-#define DB_USE_ENVIRON 0x000100 /* Use the environment. */
-#define DB_USE_ENVIRON_ROOT 0x000200 /* Use the environment if root. */
-
-/*
- * Flags private to db_env_create.
- */
-#define DB_CLIENT 0x000400 /* Open for a client environment. */
-
-/*
- * Flags private to db_create.
- */
-#define DB_XA_CREATE 0x000400 /* Open in an XA environment. */
-
-/*
- * Flags private to DBENV->open.
- */
-#define DB_INIT_CDB 0x000400 /* Concurrent Access Methods. */
-#define DB_INIT_LOCK 0x000800 /* Initialize locking. */
-#define DB_INIT_LOG 0x001000 /* Initialize logging. */
-#define DB_INIT_MPOOL 0x002000 /* Initialize mpool. */
-#define DB_INIT_TXN 0x004000 /* Initialize transactions. */
-#define DB_JOINENV 0x008000 /* Initialize all subsystems present. */
-#define DB_LOCKDOWN 0x010000 /* Lock memory into physical core. */
-#define DB_PRIVATE 0x020000 /* DB_ENV is process local. */
-#define DB_RECOVER_FATAL 0x040000 /* Run catastrophic recovery. */
-#define DB_SYSTEM_MEM 0x080000 /* Use system-backed memory. */
-
-/*
- * Flags private to DB->open.
- */
-#define DB_EXCL 0x000400 /* Exclusive open (O_EXCL). */
-#define DB_FCNTL_LOCKING 0x000800 /* UNDOC: fcntl(2) locking. */
-#define DB_ODDFILESIZE 0x001000 /* UNDOC: truncate to N * pgsize. */
-#define DB_RDWRMASTER 0x002000 /* UNDOC: allow subdb master open R/W */
-#define DB_TRUNCATE 0x004000 /* Discard existing DB (O_TRUNC). */
-#define DB_EXTENT 0x008000 /* UNDOC: dealing with an extent. */
-
-/*
- * Flags private to DBENV->txn_begin.
- */
-#define DB_TXN_NOWAIT 0x000400 /* Do not wait for locks in this TXN. */
-#define DB_TXN_SYNC 0x000800 /* Always sync log on commit. */
-
-/*
- * Flags private to DBENV->set_flags.
- */
-#define DB_CDB_ALLDB 0x000400 /* In CDB, lock across environment. */
-
-/*
- * Flags private to DB->set_feedback's callback.
- */
-#define DB_UPGRADE 0x000400 /* Upgrading. */
-#define DB_VERIFY 0x000800 /* Verifying. */
-
-/*
- * Flags private to DB->set_flags.
- *
- * DB->set_flags does not share common flags and so values start at 0x01.
- */
-#define DB_DUP 0x0001 /* Btree, Hash: duplicate keys. */
-#define DB_DUPSORT 0x0002 /* Btree, Hash: duplicate keys. */
-#define DB_RECNUM 0x0004 /* Btree: record numbers. */
-#define DB_RENUMBER 0x0008 /* Recno: renumber on insert/delete. */
-#define DB_REVSPLITOFF 0x0010 /* Btree: turn off reverse splits. */
-#define DB_SNAPSHOT 0x0020 /* Recno: snapshot the input. */
-
-/*
- * Flags private to DB->join.
- *
- * DB->join does not share common flags and so values start at 0x01.
- */
-#define DB_JOIN_NOSORT 0x0001 /* Don't try to optimize join. */
-
-/*
- * Flags private to DB->verify.
- *
- * DB->verify does not share common flags and so values start at 0x01.
- */
-#define DB_AGGRESSIVE 0x0001 /* Salvage anything which might be data.*/
-#define DB_NOORDERCHK 0x0002 /* Skip order check; subdb w/ user func */
-#define DB_ORDERCHKONLY 0x0004 /* Only perform an order check on subdb */
-#define DB_PR_PAGE 0x0008 /* Show page contents (-da). */
-#define DB_PR_HEADERS 0x0010 /* Show only page headers (-dh). */
-#define DB_PR_RECOVERYTEST 0x0020 /* Recovery test (-dr). */
-#define DB_SALVAGE 0x0040 /* Salvage what looks like data. */
-/*
- * !!!
- * These must not go over 0x8000, or they will collide with the flags
- * used by __bam_vrfy_subtree.
- */
-#define DB_VRFY_FLAGMASK 0xffff /* For masking above flags. */
-
-/*
- * Deadlock detector modes; used in the DBENV structure to configure the
- * locking subsystem.
- */
-#define DB_LOCK_NORUN 0
-#define DB_LOCK_DEFAULT 1 /* Default policy. */
-#define DB_LOCK_OLDEST 2 /* Abort oldest transaction. */
-#define DB_LOCK_RANDOM 3 /* Abort random transaction. */
-#define DB_LOCK_YOUNGEST 4 /* Abort youngest transaction. */
-
-/*******************************************************
- * Environment.
- *******************************************************/
-#define DB_REGION_MAGIC 0x120897 /* Environment magic number. */
-
-typedef enum {
- DB_NOTICE_LOGFILE_CHANGED
- /* DB_NOTICE_DISK_LOW */
-} db_notices;
-
-typedef enum {
- DB_TXN_ABORT,
- DB_TXN_BACKWARD_ROLL,
- DB_TXN_FORWARD_ROLL,
- DB_TXN_OPENFILES
-} db_recops;
-
-#define DB_UNDO(op) ((op) == DB_TXN_ABORT || (op) == DB_TXN_BACKWARD_ROLL)
-#define DB_REDO(op) ((op) == DB_TXN_FORWARD_ROLL)
-
-struct __db_env {
- /*******************************************************
- * Public: owned by the application.
- *******************************************************/
- FILE *db_errfile; /* Error message file stream. */
- const char *db_errpfx; /* Error message prefix. */
- /* Callbacks. */
- void (*db_errcall) __P((const char *, char *));
- void (*db_feedback) __P((DB_ENV *, int, int));
- void (*db_noticecall) __P((DB_ENV *, db_notices));
- void (*db_paniccall) __P((DB_ENV *, int));
- int (*db_recovery_init) __P((DB_ENV *));
-
- /*
- * Currently, the verbose list is a bit field with room for 32
- * entries. There's no reason that it needs to be limited, if
- * there are ever more than 32 entries, convert to a bit array.
- */
-#define DB_VERB_CHKPOINT 0x0001 /* List checkpoints. */
-#define DB_VERB_DEADLOCK 0x0002 /* Deadlock detection information. */
-#define DB_VERB_RECOVERY 0x0004 /* Recovery information. */
-#define DB_VERB_WAITSFOR 0x0008 /* Dump waits-for table. */
- u_int32_t verbose; /* Verbose output. */
-
- void *app_private; /* Application-private handle. */
-
- /* Locking. */
- u_int8_t *lk_conflicts; /* Two dimensional conflict matrix. */
- u_int32_t lk_modes; /* Number of lock modes in table. */
- u_int32_t lk_max; /* Maximum number of locks. */
- u_int32_t lk_max_lockers;/* Maximum number of lockers. */
- u_int32_t lk_max_objects;/* Maximum number of locked objects. */
- u_int32_t lk_detect; /* Deadlock detect on all conflicts. */
-
- /* Logging. */
- u_int32_t lg_bsize; /* Buffer size. */
- u_int32_t lg_max; /* Maximum file size. */
-
- /* Memory pool. */
- u_int32_t mp_gbytes; /* Cachesize: GB. */
- u_int32_t mp_bytes; /* Cachesize: Bytes. */
- size_t mp_size; /* DEPRECATED: Cachesize: bytes. */
- int mp_ncache; /* Number of cache regions. */
- size_t mp_mmapsize; /* Maximum file size for mmap. */
-
- /* Transactions. */
- u_int32_t tx_max; /* Maximum number of transactions. */
- time_t tx_timestamp; /* Recover to specific timestamp. */
- int (*tx_recover) /* Dispatch function for recovery. */
- __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
-
- /*******************************************************
- * Private: owned by DB.
- *******************************************************/
- int db_panic; /* Panic causing errno. */
-
- /* User files, paths. */
- char *db_home; /* Database home. */
- char *db_log_dir; /* Database log file directory. */
- char *db_tmp_dir; /* Database tmp file directory. */
-
- char **db_data_dir; /* Database data file directories. */
- int data_cnt; /* Database data file slots. */
- int data_next; /* Next Database data file slot. */
-
- int db_mode; /* Default open permissions. */
-
- void *reginfo; /* REGINFO structure reference. */
- DB_FH *lockfhp; /* fcntl(2) locking file handle. */
- long shm_key; /* shmget(2) key. */
-
- void *lg_handle; /* Log handle. */
-
- void *lk_handle; /* Lock handle. */
-
- void *mp_handle; /* Mpool handle. */
-
- void *tx_handle; /* Txn handle. */
-
- int (**dtab) /* Dispatch table */
- __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- size_t dtab_size; /* Slots in the dispatch table. */
-
- void *cl_handle; /* RPC: remote client handle. */
- long cl_id; /* RPC: Remote client env id. */
-
- int dblocal_ref; /* DB_ENV_DBLOCAL: reference count. */
- u_int32_t db_mutexlocks; /* db_set_mutexlocks */
-
- /*
- * List of open DB handles for this DB_ENV, used for cursor
- * adjustment. Must be protected for multi-threaded support.
- *
- * !!!
- * As this structure is allocated in per-process memory, the
- * mutex may need to be stored elsewhere on architectures unable
- * to support mutexes in heap memory, e.g. HP/UX 9.
- */
- void *dblist_mutexp; /* Mutex. */
- /*
- * !!!
- * Explicit representation of structure in queue.h.
- * LIST_HEAD(dblist, __db);
- */
- struct {
- struct __db *lh_first;
- } dblist;
-
- /*
- * XA support.
- *
- * !!!
- * Explicit representations of structures in queue.h.
- *
- * TAILQ_ENTRY(__db_env);
- */
- struct {
- struct __db_env *tqe_next;
- struct __db_env **tqe_prev;
- } links;
- int xa_rmid; /* XA Resource Manager ID. */
- DB_TXN *xa_txn; /* XA Current transaction. */
-
- void *cj_internal; /* C++/Java private. */
-
- /* Methods. */
- int (*close) __P((DB_ENV *, u_int32_t));
- void (*err) __P((const DB_ENV *, int, const char *, ...));
- void (*errx) __P((const DB_ENV *, const char *, ...));
- int (*open) __P((DB_ENV *, const char *, u_int32_t, int));
- int (*remove) __P((DB_ENV *, const char *, u_int32_t));
- int (*set_data_dir) __P((DB_ENV *, const char *));
- void (*set_errcall) __P((DB_ENV *, void (*)(const char *, char *)));
- void (*set_errfile) __P((DB_ENV *, FILE *));
- void (*set_errpfx) __P((DB_ENV *, const char *));
- int (*set_feedback) __P((DB_ENV *, void (*)(DB_ENV *, int, int)));
- int (*set_flags) __P((DB_ENV *, u_int32_t, int));
- int (*set_mutexlocks) __P((DB_ENV *, int));
- void (*set_noticecall) __P((DB_ENV *, void (*)(DB_ENV *, db_notices)));
- int (*set_paniccall) __P((DB_ENV *, void (*)(DB_ENV *, int)));
- int (*set_recovery_init) __P((DB_ENV *, int (*)(DB_ENV *)));
- int (*set_server) __P((DB_ENV *, char *, long, long, u_int32_t));
- int (*set_shm_key) __P((DB_ENV *, long));
- int (*set_tmp_dir) __P((DB_ENV *, const char *));
- int (*set_verbose) __P((DB_ENV *, u_int32_t, int));
-
- int (*set_lg_bsize) __P((DB_ENV *, u_int32_t));
- int (*set_lg_dir) __P((DB_ENV *, const char *));
- int (*set_lg_max) __P((DB_ENV *, u_int32_t));
-
- int (*set_lk_conflicts) __P((DB_ENV *, u_int8_t *, int));
- int (*set_lk_detect) __P((DB_ENV *, u_int32_t));
- int (*set_lk_max) __P((DB_ENV *, u_int32_t));
- int (*set_lk_max_locks) __P((DB_ENV *, u_int32_t));
- int (*set_lk_max_lockers) __P((DB_ENV *, u_int32_t));
- int (*set_lk_max_objects) __P((DB_ENV *, u_int32_t));
-
- int (*set_mp_mmapsize) __P((DB_ENV *, size_t));
- int (*set_cachesize) __P((DB_ENV *, u_int32_t, u_int32_t, int));
-
- int (*set_tx_max) __P((DB_ENV *, u_int32_t));
- int (*set_tx_recover) __P((DB_ENV *,
- int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
- int (*set_tx_timestamp) __P((DB_ENV *, time_t *));
-
-#ifdef CONFIG_TEST
-#define DB_TEST_PREOPEN 1 /* before __os_open */
-#define DB_TEST_POSTOPEN 2 /* after __os_open */
-#define DB_TEST_POSTLOGMETA 3 /* after logging meta in btree */
-#define DB_TEST_POSTLOG 4 /* after logging all pages */
-#define DB_TEST_POSTSYNC 5 /* after syncing the log */
-#define DB_TEST_PRERENAME 6 /* before __os_rename */
-#define DB_TEST_POSTRENAME 7 /* after __os_rename */
- int test_abort; /* Abort value for testing. */
- int test_copy; /* Copy value for testing. */
-#endif
-
-#define DB_ENV_CDB 0x00001 /* DB_INIT_CDB. */
-#define DB_ENV_CDB_ALLDB 0x00002 /* CDB environment wide locking. */
-#define DB_ENV_CREATE 0x00004 /* DB_CREATE set. */
-#define DB_ENV_DBLOCAL 0x00008 /* DB_ENV allocated for private DB. */
-#define DB_ENV_LOCKDOWN 0x00010 /* DB_LOCKDOWN set. */
-#define DB_ENV_NOMMAP 0x00020 /* DB_NOMMAP set. */
-#define DB_ENV_OPEN_CALLED 0x00040 /* DBENV->open called (paths valid). */
-#define DB_ENV_PRIVATE 0x00080 /* DB_PRIVATE set. */
-#define DB_ENV_RPCCLIENT 0x00100 /* DB_CLIENT set. */
-#define DB_ENV_STANDALONE 0x00200 /* Test: freestanding environment. */
-#define DB_ENV_SYSTEM_MEM 0x00400 /* DB_SYSTEM_MEM set. */
-#define DB_ENV_THREAD 0x00800 /* DB_THREAD set. */
-#define DB_ENV_TXN_NOSYNC 0x01000 /* DB_TXN_NOSYNC set. */
-#define DB_ENV_USER_ALLOC 0x02000 /* User allocated the structure. */
- u_int32_t flags; /* Flags. */
-};
-
-/*******************************************************
- * Access methods.
- *******************************************************/
-/*
- * !!!
- * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
- */
-typedef enum {
- DB_BTREE=1,
- DB_HASH,
- DB_RECNO,
- DB_QUEUE,
- DB_UNKNOWN /* Figure it out on open. */
-} DBTYPE;
-
-#define DB_BTREEVERSION 8 /* Current btree version. */
-#define DB_BTREEOLDVER 6 /* Oldest btree version supported. */
-#define DB_BTREEMAGIC 0x053162
-
-#define DB_HASHVERSION 7 /* Current hash version. */
-#define DB_HASHOLDVER 4 /* Oldest hash version supported. */
-#define DB_HASHMAGIC 0x061561
-
-#define DB_QAMVERSION 3 /* Current queue version. */
-#define DB_QAMOLDVER 1 /* Oldest queue version supported. */
-#define DB_QAMMAGIC 0x042253
-
-#define DB_LOGVERSION 3 /* Current log version. */
-#define DB_LOGOLDVER 3 /* Oldest log version supported. */
-#define DB_LOGMAGIC 0x040988
-
-/*
- * DB access method and cursor operation values. Each value is an operation
- * code to which additional bit flags are added.
- */
-#define DB_AFTER 1 /* c_put() */
-#define DB_APPEND 2 /* put() */
-#define DB_BEFORE 3 /* c_put() */
-#define DB_CACHED_COUNTS 4 /* stat() */
-#define DB_CHECKPOINT 5 /* log_put(), log_get() */
-#define DB_CONSUME 6 /* get() */
-#define DB_CONSUME_WAIT 7 /* get() */
-#define DB_CURLSN 8 /* log_put() */
-#define DB_CURRENT 9 /* c_get(), c_put(), log_get() */
-#define DB_FIRST 10 /* c_get(), log_get() */
-#define DB_FLUSH 11 /* log_put() */
-#define DB_GET_BOTH 12 /* get(), c_get() */
-#define DB_GET_BOTHC 13 /* c_get() (internal) */
-#define DB_GET_RECNO 14 /* c_get() */
-#define DB_JOIN_ITEM 15 /* c_get(); do not do primary lookup */
-#define DB_KEYFIRST 16 /* c_put() */
-#define DB_KEYLAST 17 /* c_put() */
-#define DB_LAST 18 /* c_get(), log_get() */
-#define DB_NEXT 19 /* c_get(), log_get() */
-#define DB_NEXT_DUP 20 /* c_get() */
-#define DB_NEXT_NODUP 21 /* c_get() */
-#define DB_NODUPDATA 22 /* put(), c_put() */
-#define DB_NOOVERWRITE 23 /* put() */
-#define DB_NOSYNC 24 /* close() */
-#define DB_POSITION 25 /* c_dup() */
-#define DB_POSITIONI 26 /* c_dup() (internal) */
-#define DB_PREV 27 /* c_get(), log_get() */
-#define DB_PREV_NODUP 28 /* c_get(), log_get() */
-#define DB_RECORDCOUNT 29 /* stat() */
-#define DB_SET 30 /* c_get(), log_get() */
-#define DB_SET_RANGE 31 /* c_get() */
-#define DB_SET_RECNO 32 /* get(), c_get() */
-#define DB_WRITECURSOR 33 /* cursor() */
-#define DB_WRITELOCK 34 /* cursor() (internal) */
-
-/* This has to change when the max opcode hits 255. */
-#define DB_OPFLAGS_MASK 0x000000ff /* Mask for operations flags. */
-#define DB_RMW 0x80000000 /* Acquire write flag immediately. */
-
-/*
- * DB (user visible) error return codes.
- *
- * !!!
- * Changes to any of the user visible error return codes must be reflected
- * in java/src/com/sleepycat/db/Db.java.
- *
- * !!!
- * For source compatibility with DB 2.X deadlock return (EAGAIN), use the
- * following:
- * #include <errno.h>
- * #define DB_LOCK_DEADLOCK EAGAIN
- *
- * !!!
- * We don't want our error returns to conflict with other packages where
- * possible, so pick a base error value that's hopefully not common. We
- * document that we own the error name space from -30,800 to -30,999.
- */
-/* Public error return codes. */
-#define DB_INCOMPLETE (-30999)/* Sync didn't finish. */
-#define DB_KEYEMPTY (-30998)/* Key/data deleted or never created. */
-#define DB_KEYEXIST (-30997)/* The key/data pair already exists. */
-#define DB_LOCK_DEADLOCK (-30996)/* Deadlock. */
-#define DB_LOCK_NOTGRANTED (-30995)/* Lock unavailable. */
-#define DB_NOSERVER (-30994)/* Server panic return. */
-#define DB_NOSERVER_HOME (-30993)/* Bad home sent to server. */
-#define DB_NOSERVER_ID (-30992)/* Bad ID sent to server. */
-#define DB_NOTFOUND (-30991)/* Key/data pair not found (EOF). */
-#define DB_OLD_VERSION (-30990)/* Out-of-date version. */
-#define DB_RUNRECOVERY (-30989)/* Panic return. */
-#define DB_VERIFY_BAD (-30988)/* Verify failed; bad format. */
-
-/* DB (private) error return codes. */
-#define DB_ALREADY_ABORTED (-30899)
-#define DB_DELETED (-30898)/* Recovery file marked deleted. */
-#define DB_JAVA_CALLBACK (-30897)/* Exception during a java callback. */
-#define DB_NEEDSPLIT (-30896)/* Page needs to be split. */
-#define DB_SWAPBYTES (-30895)/* Database needs byte swapping. */
-#define DB_TXN_CKP (-30894)/* Encountered ckp record in log. */
-#define DB_VERIFY_FATAL (-30893)/* Fatal: DB->verify cannot proceed. */
-
-#define DB_FILE_ID_LEN 20 /* DB file ID length. */
-
-/* DB access method description structure. */
-struct __db {
- /*******************************************************
- * Public: owned by the application.
- *******************************************************/
- u_int32_t pgsize; /* Database logical page size. */
-
- /* Callbacks. */
- int (*db_append_recno) __P((DB *, DBT *, db_recno_t));
- void (*db_feedback) __P((DB *, int, int));
- void *(*db_malloc) __P((size_t));
- void *(*db_realloc) __P((void *, size_t));
- int (*dup_compare) __P((DB *, const DBT *, const DBT *));
-
- void *app_private; /* Application-private handle. */
-
- /*******************************************************
- * Private: owned by DB.
- *******************************************************/
- DB_ENV *dbenv; /* Backing environment. */
-
- DBTYPE type; /* DB access method type. */
-
- DB_MPOOLFILE *mpf; /* Backing buffer pool. */
-
- void *mutexp; /* Synchronization for free threading */
-
- u_int8_t fileid[DB_FILE_ID_LEN];/* File's unique ID for locking. */
-
- u_int32_t adj_fileid; /* File's unique ID for curs. adj. */
-
-#define DB_LOGFILEID_INVALID -1
- int32_t log_fileid; /* File's unique ID for logging. */
- db_pgno_t meta_pgno; /* Meta page number */
- DB_TXN *open_txn; /* Transaction to protect creates. */
-
- long cl_id; /* RPC: remote client id. */
-
- /*
- * !!!
- * Some applications use DB but implement their own locking outside of
- * DB. If they're using fcntl(2) locking on the underlying database
- * file, and we open and close a file descriptor for that file, we will
- * discard their locks. The DB_FCNTL_LOCKING flag to DB->open is an
- * undocumented interface to support this usage which leaves any file
- * descriptors we open until DB->close. This will only work with the
- * DB->open interface and simple caches, e.g., creating a transaction
- * thread may open/close file descriptors this flag doesn't protect.
- * Locking with fcntl(2) on a file that you don't own is a very, very
- * unsafe thing to do. 'Nuff said.
- */
- DB_FH *saved_open_fhp; /* Saved file handle. */
-
- /*
- * Linked list of DBP's, used in the log's dbentry table
- * to keep track of all open db handles for a given log id.
- * !!!
- * Explicit representations of structures in queue.h.
- *
- * TAILQ_ENTRY(__db) links;
- */
- struct {
- struct __db *tqe_next;
- struct __db **tqe_prev;
- } links;
-
- /*
- * Linked list of DBP's, linked from the DB_ENV, used to
- * keep track of all open db handles for cursor adjustment.
- *
- * XXX
- * Eventually, this should be merged with "links" above.
- *
- * !!!
- * Explicit representations of structures in queue.h.
- *
- * LIST_ENTRY(__db) dblistlinks;
- */
- struct {
- struct __db *le_next;
- struct __db **le_prev;
- } dblistlinks;
-
- /*
- * Cursor queues.
- *
- * !!!
- * Explicit representations of structures in queue.h.
- *
- * TAILQ_HEAD(free_queue, __dbc);
- * TAILQ_HEAD(active_queue, __dbc);
- * TAILQ_HEAD(join_queue, __dbc);
- */
- struct {
- struct __dbc *tqh_first;
- struct __dbc **tqh_last;
- } free_queue;
- struct {
- struct __dbc *tqh_first;
- struct __dbc **tqh_last;
- } active_queue;
- struct {
- struct __dbc *tqh_first;
- struct __dbc **tqh_last;
- } join_queue;
-
- void *bt_internal; /* Btree/Recno access method private. */
- void *cj_internal; /* C++/Java private. */
- void *h_internal; /* Hash access method private. */
- void *q_internal; /* Queue access method private. */
- void *xa_internal; /* XA private. */
-
- /* Methods. */
- int (*close) __P((DB *, u_int32_t));
- int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
- int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
- void (*err) __P((DB *, int, const char *, ...));
- void (*errx) __P((DB *, const char *, ...));
- int (*fd) __P((DB *, int *));
- int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
- int (*get_byteswapped) __P((DB *));
- DBTYPE
- (*get_type) __P((DB *));
- int (*join) __P((DB *, DBC **, DBC **, u_int32_t));
- int (*key_range) __P((DB *,
- DB_TXN *, DBT *, DB_KEY_RANGE *, u_int32_t));
- int (*open) __P((DB *,
- const char *, const char *, DBTYPE, u_int32_t, int));
- int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
- int (*remove) __P((DB *, const char *, const char *, u_int32_t));
- int (*rename) __P((DB *,
- const char *, const char *, const char *, u_int32_t));
- int (*set_append_recno) __P((DB *, int (*)(DB *, DBT *, db_recno_t)));
- int (*set_cachesize) __P((DB *, u_int32_t, u_int32_t, int));
- int (*set_dup_compare) __P((DB *,
- int (*)(DB *, const DBT *, const DBT *)));
- void (*set_errcall) __P((DB *, void (*)(const char *, char *)));
- void (*set_errfile) __P((DB *, FILE *));
- void (*set_errpfx) __P((DB *, const char *));
- int (*set_feedback) __P((DB *, void (*)(DB *, int, int)));
- int (*set_flags) __P((DB *, u_int32_t));
- int (*set_lorder) __P((DB *, int));
- int (*set_malloc) __P((DB *, void *(*)(size_t)));
- int (*set_pagesize) __P((DB *, u_int32_t));
- int (*set_paniccall) __P((DB *, void (*)(DB_ENV *, int)));
- int (*set_realloc) __P((DB *, void *(*)(void *, size_t)));
- int (*stat) __P((DB *, void *, void *(*)(size_t), u_int32_t));
- int (*sync) __P((DB *, u_int32_t));
- int (*upgrade) __P((DB *, const char *, u_int32_t));
- int (*verify) __P((DB *,
- const char *, const char *, FILE *, u_int32_t));
-
- int (*set_bt_compare) __P((DB *,
- int (*)(DB *, const DBT *, const DBT *)));
- int (*set_bt_maxkey) __P((DB *, u_int32_t));
- int (*set_bt_minkey) __P((DB *, u_int32_t));
- int (*set_bt_prefix) __P((DB *,
- size_t (*)(DB *, const DBT *, const DBT *)));
-
- int (*set_h_ffactor) __P((DB *, u_int32_t));
- int (*set_h_hash) __P((DB *,
- u_int32_t (*)(DB *, const void *, u_int32_t)));
- int (*set_h_nelem) __P((DB *, u_int32_t));
-
- int (*set_re_delim) __P((DB *, int));
- int (*set_re_len) __P((DB *, u_int32_t));
- int (*set_re_pad) __P((DB *, int));
- int (*set_re_source) __P((DB *, const char *));
- int (*set_q_extentsize) __P((DB *, u_int32_t));
-
- int (*db_am_remove) __P((DB *, const char *,
- const char *, DB_LSN *, int (**)(DB *, void*), void **));
- int (*db_am_rename) __P((DB *,
- const char *, const char *, const char *));
-
-#define DB_OK_BTREE 0x01
-#define DB_OK_HASH 0x02
-#define DB_OK_QUEUE 0x04
-#define DB_OK_RECNO 0x08
- u_int32_t am_ok; /* Legal AM choices. */
-
-#define DB_AM_DISCARD 0x00001 /* Discard any cached pages. */
-#define DB_AM_DUP 0x00002 /* DB_DUP. */
-#define DB_AM_DUPSORT 0x00004 /* DB_DUPSORT. */
-#define DB_AM_INMEM 0x00008 /* In-memory; no sync on close. */
-#define DB_AM_PGDEF 0x00010 /* Page size was defaulted. */
-#define DB_AM_RDONLY 0x00020 /* Database is readonly. */
-#define DB_AM_RECOVER 0x00040 /* DBP opened by recovery routine. */
-#define DB_AM_SUBDB 0x00080 /* Subdatabases supported. */
-#define DB_AM_SWAP 0x00100 /* Pages need to be byte-swapped. */
-#define DB_AM_TXN 0x00200 /* DBP was in a transaction. */
-#define DB_AM_VERIFYING 0x00400 /* DB handle is in the verifier. */
-#define DB_BT_RECNUM 0x00800 /* DB_RECNUM. */
-#define DB_BT_REVSPLIT 0x01000 /* DB_REVSPLITOFF. */
-#define DB_DBM_ERROR 0x02000 /* Error in DBM/NDBM database. */
-#define DB_OPEN_CALLED 0x04000 /* DB->open called. */
-#define DB_RE_DELIMITER 0x08000 /* Variablen length delimiter set. */
-#define DB_RE_FIXEDLEN 0x10000 /* Fixed-length records. */
-#define DB_RE_PAD 0x20000 /* Fixed-length record pad. */
-#define DB_RE_RENUMBER 0x40000 /* DB_RENUMBER. */
-#define DB_RE_SNAPSHOT 0x80000 /* DB_SNAPSHOT. */
- u_int32_t flags;
-};
-
-/*
- * DB_LOCK_ILOCK --
- * Internal DB access method lock.
- */
-struct __db_ilock {
- db_pgno_t pgno; /* Page being locked. */
- u_int8_t fileid[DB_FILE_ID_LEN];/* File id. */
-#define DB_RECORD_LOCK 1
-#define DB_PAGE_LOCK 2
- u_int8_t type; /* Record or Page lock */
-};
-
-/*
- * DB_LOCK --
- * The structure is allocated by the caller and filled in during a
- * lock_get request (or a lock_vec/DB_LOCK_GET).
- */
-struct __db_lock_u {
- size_t off; /* Offset of the lock in the region */
- u_int32_t ndx; /* Index of the object referenced by
- * this lock; used for locking. */
- u_int32_t gen; /* Generation number of this lock. */
-};
-
-/* Cursor description structure. */
-struct __dbc {
- DB *dbp; /* Related DB access method. */
- DB_TXN *txn; /* Associated transaction. */
-
- /*
- * !!!
- * Explicit representations of structures in queue.h.
- *
- * TAILQ_ENTRY(__dbc) links; Active/free cursor queues.
- */
- struct {
- DBC *tqe_next;
- DBC **tqe_prev;
- } links;
-
- DBT rkey; /* Returned key. */
- DBT rdata; /* Returned data. */
-
- u_int32_t lid; /* Default process' locker id. */
- u_int32_t locker; /* Locker for this operation. */
- DBT lock_dbt; /* DBT referencing lock. */
- DB_LOCK_ILOCK lock; /* Object to be locked. */
- DB_LOCK mylock; /* Lock held on this cursor. */
-
- long cl_id; /* Remote client id. */
-
- DBTYPE dbtype; /* Cursor type. */
-
- DBC_INTERNAL *internal; /* Access method private. */
-
- int (*c_close) __P((DBC *)); /* Methods: public. */
- int (*c_count) __P((DBC *, db_recno_t *, u_int32_t));
- int (*c_del) __P((DBC *, u_int32_t));
- int (*c_dup) __P((DBC *, DBC **, u_int32_t));
- int (*c_get) __P((DBC *, DBT *, DBT *, u_int32_t));
- int (*c_put) __P((DBC *, DBT *, DBT *, u_int32_t));
-
- /* Methods: private. */
- int (*c_am_close) __P((DBC *, db_pgno_t, int *));
- int (*c_am_del) __P((DBC *));
- int (*c_am_destroy) __P((DBC *));
- int (*c_am_get) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
- int (*c_am_put) __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
- int (*c_am_writelock) __P((DBC *));
-
-#define DBC_ACTIVE 0x001 /* Cursor is being used. */
-#define DBC_OPD 0x002 /* Cursor references off-page dups. */
-#define DBC_RECOVER 0x004 /* Cursor created by recovery routine
- * (do not log or lock).
- */
-#define DBC_RMW 0x008 /* Acquire write flag in read op. */
-#define DBC_WRITECURSOR 0x010 /* Cursor may be used to write (CDB). */
-#define DBC_WRITEDUP 0x020 /* idup'ed DBC_WRITECURSOR (CDB). */
-#define DBC_WRITER 0x040 /* Cursor immediately writing (CDB). */
-#define DBC_TRANSIENT 0x080 /* Cursor is transient. */
-#define DBC_COMPENSATE 0x100 /* Cursor is doing compensation
- * do not lock.
- */
- u_int32_t flags;
-};
-
-/* Key range statistics structure */
-struct __key_range {
- double less;
- double equal;
- double greater;
-};
-
-/* Btree/Recno statistics structure. */
-struct __db_bt_stat {
- u_int32_t bt_magic; /* Magic number. */
- u_int32_t bt_version; /* Version number. */
- u_int32_t bt_metaflags; /* Metadata flags. */
- u_int32_t bt_nkeys; /* Number of unique keys. */
- u_int32_t bt_ndata; /* Number of data items. */
- u_int32_t bt_pagesize; /* Page size. */
- u_int32_t bt_maxkey; /* Maxkey value. */
- u_int32_t bt_minkey; /* Minkey value. */
- u_int32_t bt_re_len; /* Fixed-length record length. */
- u_int32_t bt_re_pad; /* Fixed-length record pad. */
- u_int32_t bt_levels; /* Tree levels. */
- u_int32_t bt_int_pg; /* Internal pages. */
- u_int32_t bt_leaf_pg; /* Leaf pages. */
- u_int32_t bt_dup_pg; /* Duplicate pages. */
- u_int32_t bt_over_pg; /* Overflow pages. */
- u_int32_t bt_free; /* Pages on the free list. */
- u_int32_t bt_int_pgfree; /* Bytes free in internal pages. */
- u_int32_t bt_leaf_pgfree; /* Bytes free in leaf pages. */
- u_int32_t bt_dup_pgfree; /* Bytes free in duplicate pages. */
- u_int32_t bt_over_pgfree; /* Bytes free in overflow pages. */
-};
-
-/* Queue statistics structure. */
-struct __db_qam_stat {
- u_int32_t qs_magic; /* Magic number. */
- u_int32_t qs_version; /* Version number. */
- u_int32_t qs_metaflags; /* Metadata flags. */
- u_int32_t qs_nkeys; /* Number of unique keys. */
- u_int32_t qs_ndata; /* Number of data items. */
- u_int32_t qs_pagesize; /* Page size. */
- u_int32_t qs_pages; /* Data pages. */
- u_int32_t qs_re_len; /* Fixed-length record length. */
- u_int32_t qs_re_pad; /* Fixed-length record pad. */
- u_int32_t qs_pgfree; /* Bytes free in data pages. */
- u_int32_t qs_first_recno; /* First not deleted record. */
- u_int32_t qs_cur_recno; /* Last allocated record number. */
-};
-
-/* Hash statistics structure. */
-struct __db_h_stat {
- u_int32_t hash_magic; /* Magic number. */
- u_int32_t hash_version; /* Version number. */
- u_int32_t hash_metaflags; /* Metadata flags. */
- u_int32_t hash_nkeys; /* Number of unique keys. */
- u_int32_t hash_ndata; /* Number of data items. */
- u_int32_t hash_pagesize; /* Page size. */
- u_int32_t hash_nelem; /* Original nelem specified. */
- u_int32_t hash_ffactor; /* Fill factor specified at create. */
- u_int32_t hash_buckets; /* Number of hash buckets. */
- u_int32_t hash_free; /* Pages on the free list. */
- u_int32_t hash_bfree; /* Bytes free on bucket pages. */
- u_int32_t hash_bigpages; /* Number of big key/data pages. */
- u_int32_t hash_big_bfree; /* Bytes free on big item pages. */
- u_int32_t hash_overflows; /* Number of overflow pages. */
- u_int32_t hash_ovfl_free; /* Bytes free on ovfl pages. */
- u_int32_t hash_dup; /* Number of dup pages. */
- u_int32_t hash_dup_free; /* Bytes free on duplicate pages. */
-};
-
-int db_create __P((DB **, DB_ENV *, u_int32_t));
-int db_env_create __P((DB_ENV **, u_int32_t));
-int db_env_set_func_close __P((int (*)(int)));
-int db_env_set_func_dirfree __P((void (*)(char **, int)));
-int db_env_set_func_dirlist __P((int (*)(const char *, char ***, int *)));
-int db_env_set_func_exists __P((int (*)(const char *, int *)));
-int db_env_set_func_free __P((void (*)(void *)));
-int db_env_set_func_fsync __P((int (*)(int)));
-int db_env_set_func_ioinfo __P((int (*)(const char *,
- int, u_int32_t *, u_int32_t *, u_int32_t *)));
-int db_env_set_func_malloc __P((void *(*)(size_t)));
-int db_env_set_func_map __P((int (*)(char *, size_t, int, int, void **)));
-int db_env_set_func_open __P((int (*)(const char *, int, ...)));
-int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
-int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
-int db_env_set_func_rename __P((int (*)(const char *, const char *)));
-int db_env_set_func_seek
- __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
-int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
-int db_env_set_func_unlink __P((int (*)(const char *)));
-int db_env_set_func_unmap __P((int (*)(void *, size_t)));
-int db_env_set_func_write __P((ssize_t (*)(int, const void *, size_t)));
-int db_env_set_func_yield __P((int (*)(void)));
-int db_env_set_pageyield __P((int));
-int db_env_set_panicstate __P((int));
-int db_env_set_region_init __P((int));
-int db_env_set_tas_spins __P((u_int32_t));
-char *db_strerror __P((int));
-char *db_version __P((int *, int *, int *));
-
-/*******************************************************
- * Locking
- *******************************************************/
-#define DB_LOCKVERSION 1
-
-/* Flag values for lock_vec(), lock_get(). */
-#define DB_LOCK_NOWAIT 0x01 /* Don't wait on unavailable lock. */
-#define DB_LOCK_RECORD 0x02 /* Internal: record lock. */
-#define DB_LOCK_UPGRADE 0x04 /* Internal: upgrade existing lock. */
-#define DB_LOCK_SWITCH 0x08 /* Internal: switch existing lock. */
-
-/* Flag values for lock_detect(). */
-#define DB_LOCK_CONFLICT 0x01 /* Run on any conflict. */
-
-/*
- * Request types.
- *
- * !!!
- * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
- */
-typedef enum {
- DB_LOCK_DUMP=0, /* Display held locks. */
- DB_LOCK_GET, /* Get the lock. */
- DB_LOCK_INHERIT, /* Pass locks to parent. */
- DB_LOCK_PUT, /* Release the lock. */
- DB_LOCK_PUT_ALL, /* Release locker's locks. */
- DB_LOCK_PUT_OBJ /* Release locker's locks on obj. */
-} db_lockop_t;
-
-/*
- * Simple R/W lock modes and for multi-granularity intention locking.
- *
- * !!!
- * These values are NOT random, as they are used as an index into the lock
- * conflicts arrays, i.e., DB_LOCK_IWRITE must be == 3, and DB_LOCK_IREAD
- * must be == 4.
- *
- * !!!
- * Changes here must be reflected in java/src/com/sleepycat/db/Db.java.
- */
-typedef enum {
- DB_LOCK_NG=0, /* Not granted. */
- DB_LOCK_READ, /* Shared/read. */
- DB_LOCK_WRITE, /* Exclusive/write. */
- DB_LOCK_WAIT, /* Wait for event */
- DB_LOCK_IWRITE, /* Intent exclusive/write. */
- DB_LOCK_IREAD, /* Intent to share/read. */
- DB_LOCK_IWR /* Intent to read and write. */
-} db_lockmode_t;
-
-/*
- * Status of a lock.
- */
-typedef enum {
- DB_LSTAT_ABORTED, /* Lock belongs to an aborted txn. */
- DB_LSTAT_ERR, /* Lock is bad. */
- DB_LSTAT_FREE, /* Lock is unallocated. */
- DB_LSTAT_HELD, /* Lock is currently held. */
- DB_LSTAT_NOGRANT, /* Lock was not granted. */
- DB_LSTAT_PENDING, /* Lock was waiting and has been
- * promoted; waiting for the owner
- * to run and upgrade it to held. */
- DB_LSTAT_WAITING /* Lock is on the wait queue. */
-} db_status_t;
-
-/* Lock request structure. */
-struct __db_lockreq {
- db_lockop_t op; /* Operation. */
- db_lockmode_t mode; /* Requested mode. */
- u_int32_t locker; /* Locker identity. */
- DBT *obj; /* Object being locked. */
- DB_LOCK lock; /* Lock returned. */
-};
-
-/*
- * Commonly used conflict matrices.
- *
- */
-
-/* Multi-granularity locking. */
-#define DB_LOCK_RIW_N 7
-extern const u_int8_t db_riw_conflicts[];
-
-struct __db_lock_stat {
- u_int32_t st_lastid; /* Last allocated locker ID. */
- u_int32_t st_maxlocks; /* Maximum number of locks in table. */
- u_int32_t st_maxlockers; /* Maximum number of lockers in table. */
- u_int32_t st_maxobjects; /* Maximum number of objects in table. */
- u_int32_t st_nmodes; /* Number of lock modes. */
- u_int32_t st_nlocks; /* Current number of locks. */
- u_int32_t st_maxnlocks; /* Maximum number of locks so far. */
- u_int32_t st_nlockers; /* Current number of lockers. */
- u_int32_t st_maxnlockers; /* Maximum number of lockers so far. */
- u_int32_t st_nobjects; /* Current number of objects. */
- u_int32_t st_maxnobjects; /* Maximum number of objects so far. */
- u_int32_t st_nconflicts; /* Number of lock conflicts. */
- u_int32_t st_nrequests; /* Number of lock gets. */
- u_int32_t st_nreleases; /* Number of lock puts. */
- u_int32_t st_nnowaits; /* Number of requests that would have
- waited, but NOWAIT was set. */
- u_int32_t st_ndeadlocks; /* Number of lock deadlocks. */
- u_int32_t st_region_wait; /* Region lock granted after wait. */
- u_int32_t st_region_nowait; /* Region lock granted without wait. */
- u_int32_t st_regsize; /* Region size. */
-};
-
-int lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
-int lock_get __P((DB_ENV *,
- u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
-int lock_id __P((DB_ENV *, u_int32_t *));
-int lock_put __P((DB_ENV *, DB_LOCK *));
-int lock_stat __P((DB_ENV *, DB_LOCK_STAT **, void *(*)(size_t)));
-int lock_vec __P((DB_ENV *,
- u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
-
-/*******************************************************
- * Logging.
- *******************************************************/
-/* Flag values for log_archive(). */
-#define DB_ARCH_ABS 0x001 /* Absolute pathnames. */
-#define DB_ARCH_DATA 0x002 /* Data files. */
-#define DB_ARCH_LOG 0x004 /* Log files. */
-
-/*
- * A DB_LSN has two parts, a fileid which identifies a specific file, and an
- * offset within that file. The fileid is an unsigned 4-byte quantity that
- * uniquely identifies a file within the log directory -- currently a simple
- * counter inside the log. The offset is also an unsigned 4-byte value. The
- * log manager guarantees the offset is never more than 4 bytes by switching
- * to a new log file before the maximum length imposed by an unsigned 4-byte
- * offset is reached.
- */
-struct __db_lsn {
- u_int32_t file; /* File ID. */
- u_int32_t offset; /* File offset. */
-};
-
-/* Log statistics structure. */
-struct __db_log_stat {
- u_int32_t st_magic; /* Log file magic number. */
- u_int32_t st_version; /* Log file version number. */
- int st_mode; /* Log file mode. */
- u_int32_t st_lg_bsize; /* Log buffer size. */
- u_int32_t st_lg_max; /* Maximum log file size. */
- u_int32_t st_w_bytes; /* Bytes to log. */
- u_int32_t st_w_mbytes; /* Megabytes to log. */
- u_int32_t st_wc_bytes; /* Bytes to log since checkpoint. */
- u_int32_t st_wc_mbytes; /* Megabytes to log since checkpoint. */
- u_int32_t st_wcount; /* Total writes to the log. */
- u_int32_t st_wcount_fill; /* Overflow writes to the log. */
- u_int32_t st_scount; /* Total syncs to the log. */
- u_int32_t st_region_wait; /* Region lock granted after wait. */
- u_int32_t st_region_nowait; /* Region lock granted without wait. */
- u_int32_t st_cur_file; /* Current log file number. */
- u_int32_t st_cur_offset; /* Current log file offset. */
- u_int32_t st_regsize; /* Region size. */
-};
-
-int log_archive __P((DB_ENV *, char **[], u_int32_t, void *(*)(size_t)));
-int log_compare __P((const DB_LSN *, const DB_LSN *));
-int log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
-int log_flush __P((DB_ENV *, const DB_LSN *));
-int log_get __P((DB_ENV *, DB_LSN *, DBT *, u_int32_t));
-int log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
-int log_register __P((DB_ENV *, DB *, const char *));
-int log_stat __P((DB_ENV *, DB_LOG_STAT **, void *(*)(size_t)));
-int log_unregister __P((DB_ENV *, DB *));
-
-/*******************************************************
- * Mpool
- *******************************************************/
-/* Flag values for memp_fget(). */
-#define DB_MPOOL_CREATE 0x001 /* Create a page. */
-#define DB_MPOOL_LAST 0x002 /* Return the last page. */
-#define DB_MPOOL_NEW 0x004 /* Create a new page. */
-#define DB_MPOOL_NEW_GROUP 0x008 /* Create a group of pages. */
-#define DB_MPOOL_EXTENT 0x010 /* Get for an extent. */
-
-/* Flag values for memp_fput(), memp_fset(). */
-#define DB_MPOOL_CLEAN 0x001 /* Page is not modified. */
-#define DB_MPOOL_DIRTY 0x002 /* Page is modified. */
-#define DB_MPOOL_DISCARD 0x004 /* Don't cache the page. */
-
-/* Mpool statistics structure. */
-struct __db_mpool_stat {
- u_int32_t st_cache_hit; /* Pages found in the cache. */
- u_int32_t st_cache_miss; /* Pages not found in the cache. */
- u_int32_t st_map; /* Pages from mapped files. */
- u_int32_t st_page_create; /* Pages created in the cache. */
- u_int32_t st_page_in; /* Pages read in. */
- u_int32_t st_page_out; /* Pages written out. */
- u_int32_t st_ro_evict; /* Clean pages forced from the cache. */
- u_int32_t st_rw_evict; /* Dirty pages forced from the cache. */
- u_int32_t st_hash_buckets; /* Number of hash buckets. */
- u_int32_t st_hash_searches; /* Total hash chain searches. */
- u_int32_t st_hash_longest; /* Longest hash chain searched. */
- u_int32_t st_hash_examined; /* Total hash entries searched. */
- u_int32_t st_page_clean; /* Clean pages. */
- u_int32_t st_page_dirty; /* Dirty pages. */
- u_int32_t st_page_trickle; /* Pages written by memp_trickle. */
- u_int32_t st_region_wait; /* Region lock granted after wait. */
- u_int32_t st_region_nowait; /* Region lock granted without wait. */
- u_int32_t st_gbytes; /* Total cache size: GB. */
- u_int32_t st_bytes; /* Total cache size: B. */
- u_int32_t st_ncache; /* Number of caches. */
- u_int32_t st_regsize; /* Cache size. */
-};
-
-/* Mpool file open information structure. */
-struct __db_mpool_finfo {
- int ftype; /* File type. */
- DBT *pgcookie; /* Byte-string passed to pgin/pgout. */
- u_int8_t *fileid; /* Unique file ID. */
- int32_t lsn_offset; /* LSN offset in page. */
- u_int32_t clear_len; /* Cleared length on created pages. */
-};
-
-/* Mpool file statistics structure. */
-struct __db_mpool_fstat {
- char *file_name; /* File name. */
- size_t st_pagesize; /* Page size. */
- u_int32_t st_cache_hit; /* Pages found in the cache. */
- u_int32_t st_cache_miss; /* Pages not found in the cache. */
- u_int32_t st_map; /* Pages from mapped files. */
- u_int32_t st_page_create; /* Pages created in the cache. */
- u_int32_t st_page_in; /* Pages read in. */
- u_int32_t st_page_out; /* Pages written out. */
-};
-
-int memp_fclose __P((DB_MPOOLFILE *));
-int memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
-int memp_fopen __P((DB_ENV *, const char *,
- u_int32_t, int, size_t, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
-int memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
-int memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
-int memp_fsync __P((DB_MPOOLFILE *));
-int memp_register __P((DB_ENV *, int,
- int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
- int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
-int memp_stat __P((DB_ENV *,
- DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, void *(*)(size_t)));
-int memp_sync __P((DB_ENV *, DB_LSN *));
-int memp_trickle __P((DB_ENV *, int, int *));
-
-/*******************************************************
- * Transactions.
- *******************************************************/
-#define DB_TXNVERSION 1
-
-/* Operations values to the tx_recover() function. */
-#define DB_TXN_BACKWARD_ROLL 1 /* Read the log backwards. */
-#define DB_TXN_FORWARD_ROLL 2 /* Read the log forwards. */
-#define DB_TXN_OPENFILES 3 /* Read for open files. */
-#define DB_TXN_REDO 4 /* Redo the operation. */
-#define DB_TXN_UNDO 5 /* Undo the operation. */
-
-/* Internal transaction status values. */
-
-/* Transaction statistics structure. */
-struct __db_txn_active {
- u_int32_t txnid; /* Transaction ID */
- u_int32_t parentid; /* Transaction ID of parent */
- DB_LSN lsn; /* Lsn of the begin record */
-};
-
-struct __db_txn_stat {
- DB_LSN st_last_ckp; /* lsn of the last checkpoint */
- DB_LSN st_pending_ckp; /* last checkpoint did not finish */
- time_t st_time_ckp; /* time of last checkpoint */
- u_int32_t st_last_txnid; /* last transaction id given out */
- u_int32_t st_maxtxns; /* maximum txns possible */
- u_int32_t st_naborts; /* number of aborted transactions */
- u_int32_t st_nbegins; /* number of begun transactions */
- u_int32_t st_ncommits; /* number of committed transactions */
- u_int32_t st_nactive; /* number of active transactions */
- u_int32_t st_maxnactive; /* maximum active transactions */
- DB_TXN_ACTIVE
- *st_txnarray; /* array of active transactions */
- u_int32_t st_region_wait; /* Region lock granted after wait. */
- u_int32_t st_region_nowait; /* Region lock granted without wait. */
- u_int32_t st_regsize; /* Region size. */
-};
-
-int txn_abort __P((DB_TXN *));
-int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
-int txn_checkpoint __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
-int txn_commit __P((DB_TXN *, u_int32_t));
-u_int32_t txn_id __P((DB_TXN *));
-int txn_prepare __P((DB_TXN *));
-int txn_stat __P((DB_ENV *, DB_TXN_STAT **, void *(*)(size_t)));
-
-#ifndef DB_DBM_HSEARCH
-#define DB_DBM_HSEARCH 0 /* No historic interfaces by default. */
-#endif
-#if DB_DBM_HSEARCH != 0
-/*******************************************************
- * Dbm/Ndbm historic interfaces.
- *******************************************************/
-#define DBM_INSERT 0 /* Flags to dbm_store(). */
-#define DBM_REPLACE 1
-
-/*
- * The DB support for ndbm(3) always appends this suffix to the
- * file name to avoid overwriting the user's original database.
- */
-#define DBM_SUFFIX ".db"
-
-#if defined(_XPG4_2)
-typedef struct {
- char *dptr;
- size_t dsize;
-} datum;
-#else
-typedef struct {
- char *dptr;
- int dsize;
-} datum;
-#endif
-
-/*
- * Translate DBM calls into DB calls so that DB doesn't step on the
- * application's name space.
- *
- * The global variables dbrdonly, dirf and pagf were not retained when 4BSD
- * replaced the dbm interface with ndbm, and are not supported here.
- */
-#define dbminit(a) __db_dbm_init(a)
-#define dbmclose __db_dbm_close
-#if !defined(__cplusplus)
-#define delete(a) __db_dbm_delete(a)
-#endif
-#define fetch(a) __db_dbm_fetch(a)
-#define firstkey __db_dbm_firstkey
-#define nextkey(a) __db_dbm_nextkey(a)
-#define store(a, b) __db_dbm_store(a, b)
-
-/* Prototype the DB calls. */
-int __db_dbm_close __P((void));
-int __db_dbm_dbrdonly __P((void));
-int __db_dbm_delete __P((datum));
-int __db_dbm_dirf __P((void));
-datum __db_dbm_fetch __P((datum));
-datum __db_dbm_firstkey __P((void));
-int __db_dbm_init __P((char *));
-datum __db_dbm_nextkey __P((datum));
-int __db_dbm_pagf __P((void));
-int __db_dbm_store __P((datum, datum));
-
-/*
- * Translate NDBM calls into DB calls so that DB doesn't step on the
- * application's name space.
- */
-#define dbm_clearerr(a) __db_ndbm_clearerr(a)
-#define dbm_close(a) __db_ndbm_close(a)
-#define dbm_delete(a, b) __db_ndbm_delete(a, b)
-#define dbm_dirfno(a) __db_ndbm_dirfno(a)
-#define dbm_error(a) __db_ndbm_error(a)
-#define dbm_fetch(a, b) __db_ndbm_fetch(a, b)
-#define dbm_firstkey(a) __db_ndbm_firstkey(a)
-#define dbm_nextkey(a) __db_ndbm_nextkey(a)
-#define dbm_open(a, b, c) __db_ndbm_open(a, b, c)
-#define dbm_pagfno(a) __db_ndbm_pagfno(a)
-#define dbm_rdonly(a) __db_ndbm_rdonly(a)
-#define dbm_store(a, b, c, d) __db_ndbm_store(a, b, c, d)
-
-/* Prototype the DB calls. */
-int __db_ndbm_clearerr __P((DBM *));
-void __db_ndbm_close __P((DBM *));
-int __db_ndbm_delete __P((DBM *, datum));
-int __db_ndbm_dirfno __P((DBM *));
-int __db_ndbm_error __P((DBM *));
-datum __db_ndbm_fetch __P((DBM *, datum));
-datum __db_ndbm_firstkey __P((DBM *));
-datum __db_ndbm_nextkey __P((DBM *));
-DBM *__db_ndbm_open __P((const char *, int, int));
-int __db_ndbm_pagfno __P((DBM *));
-int __db_ndbm_rdonly __P((DBM *));
-int __db_ndbm_store __P((DBM *, datum, datum, int));
-
-/*******************************************************
- * Hsearch historic interface.
- *******************************************************/
-typedef enum {
- FIND, ENTER
-} ACTION;
-
-typedef struct entry {
- char *key;
- char *data;
-} ENTRY;
-
-/*
- * Translate HSEARCH calls into DB calls so that DB doesn't step on the
- * application's name space.
- */
-#define hcreate(a) __db_hcreate(a)
-#define hdestroy __db_hdestroy
-#define hsearch(a, b) __db_hsearch(a, b)
-
-/* Prototype the DB calls. */
-int __db_hcreate __P((size_t));
-void __db_hdestroy __P((void));
-ENTRY *__db_hsearch __P((ENTRY, ACTION));
-#endif /* DB_DBM_HSEARCH */
-
-/*
- * XXX
- * MacOS: Reset Metrowerks C enum sizes.
- */
-#ifdef __MWERKS__
-#pragma enumsalwaysint reset
-#endif
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* !_DB_H_ */
diff --git a/bdb/include/db_am.h b/bdb/include/db_am.h
deleted file mode 100644
index 3a41eb3bbfd..00000000000
--- a/bdb/include/db_am.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: db_am.h,v 11.21 2000/12/12 17:43:56 bostic Exp $
- */
-#ifndef _DB_AM_H_
-#define _DB_AM_H_
-
-#define DB_MINPAGECACHE 10 /* Min pages access methods cache. */
-
-/* DB recovery operation codes. The low bits used to have flags or'd in. */
-#define DB_ADD_DUP 0x10
-#define DB_REM_DUP 0x20
-#define DB_ADD_BIG 0x30
-#define DB_REM_BIG 0x40
-#define DB_UNUSED_1 0x50
-#define DB_UNUSED_2 0x60
-#define DB_ADD_PAGE 0x70
-#define DB_REM_PAGE 0x80
-
-/*
- * This is a grotesque naming hack. We have modified the btree page
- * allocation and freeing functions to be generic and have therefore
- * moved them into the access-method independent portion of the code.
- * However, since we didn't want to create new log records and routines
- * for them, we left their logging and recovery functions over in btree.
- * To make the code look prettier, we macro them, but this is sure to
- * confuse the heck out of everyone.
- */
-#define __db_pg_alloc_log __bam_pg_alloc_log
-#define __db_pg_free_log __bam_pg_free_log
-
-/*
- * Standard initialization and shutdown macros for all recovery functions.
- *
- * Requires the following local variables:
- *
- * DB *file_dbp;
- * DB_MPOOLFILE *mpf;
- * int ret;
- */
-#define REC_INTRO(func, inc_count) { \
- file_dbp = NULL; \
- dbc = NULL; \
- if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
- goto out; \
- if ((ret = __db_fileid_to_db(dbenv, \
- &file_dbp, argp->fileid, inc_count)) != 0) { \
- if (ret == DB_DELETED) { \
- ret = 0; \
- goto done; \
- } \
- goto out; \
- } \
- if (file_dbp == NULL) \
- goto out; \
- if ((ret = file_dbp->cursor(file_dbp, NULL, &dbc, 0)) != 0) \
- goto out; \
- F_SET(dbc, DBC_RECOVER); \
- mpf = file_dbp->mpf; \
-}
-
-#define REC_CLOSE { \
- int __t_ret; \
- if (argp != NULL) \
- __os_free(argp, sizeof(*argp)); \
- if (dbc != NULL && (__t_ret = dbc->c_close(dbc)) != 0 && ret == 0) \
- return (__t_ret); \
- return (ret); \
-}
-
-/*
- * No-op versions of the same macros.
- */
-#define REC_NOOP_INTRO(func) { \
- if ((ret = func(dbenv, dbtp->data, &argp)) != 0) \
- return (ret); \
-}
-#define REC_NOOP_CLOSE \
- if (argp != NULL) \
- __os_free(argp, sizeof(*argp)); \
- return (ret); \
-
-/*
- * Standard debugging macro for all recovery functions.
- */
-#ifdef DEBUG_RECOVER
-#define REC_PRINT(func) \
- (void)func(dbenv, dbtp, lsnp, op, info);
-#else
-#define REC_PRINT(func)
-#endif
-
-/*
- * Flags to __db_lget
- */
-#define LCK_COUPLE 0x01 /* Lock Couple */
-#define LCK_ALWAYS 0x02 /* Lock even for off page dup cursors */
-#define LCK_ROLLBACK 0x04 /* Lock even if in rollback */
-
-/*
- * If doing transactions we have to hold the locks associated with a data item
- * from a page for the entire transaction. However, we don't have to hold the
- * locks associated with walking the tree. Distinguish between the two so that
- * we don't tie up the internal pages of the tree longer than necessary.
- */
-#define __LPUT(dbc, lock) \
- (lock.off != LOCK_INVALID ? \
- lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
-#define __TLPUT(dbc, lock) \
- (lock.off != LOCK_INVALID && \
- (dbc)->txn == NULL ? lock_put((dbc)->dbp->dbenv, &(lock)) : 0)
-
-#ifdef DIAGNOSTIC
-#define DB_CHECK_TXN(dbp, txn) \
- if (txn != NULL) \
- F_SET(dbp, DB_AM_TXN); \
- else if (F_ISSET(dbp, DB_AM_TXN)) \
- return (__db_missing_txn_err((dbp)->dbenv));
-#else
-#define DB_CHECK_TXN(dbp, txn)
-#endif
-
-#include "db_dispatch.h"
-#include "db_auto.h"
-#include "crdel_auto.h"
-#include "db_ext.h"
-#endif
diff --git a/bdb/include/db_cxx.h b/bdb/include/db_cxx.h
deleted file mode 100644
index b5599ee699c..00000000000
--- a/bdb/include/db_cxx.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: db_cxx.h,v 11.44 2000/12/21 20:30:18 dda Exp $
- */
-
-#ifndef _DB_CXX_H_
-#define _DB_CXX_H_
-//
-// C++ assumptions:
-//
-// To ensure portability to many platforms, both new and old, we make
-// few assumptions about the C++ compiler and library. For example,
-// we do not expect STL, templates or namespaces to be available. The
-// "newest" C++ feature used is exceptions, which are used liberally
-// to transmit error information. Even the use of exceptions can be
-// disabled at runtime, to do so, use the DB_CXX_NO_EXCEPTIONS flags
-// with the DbEnv or Db constructor.
-//
-// C++ naming conventions:
-//
-// - All top level class names start with Db.
-// - All class members start with lower case letter.
-// - All private data members are suffixed with underscore.
-// - Use underscores to divide names into multiple words.
-// - Simple data accessors are named with get_ or set_ prefix.
-// - All method names are taken from names of functions in the C
-// layer of db (usually by dropping a prefix like "db_").
-// These methods have the same argument types and order,
-// other than dropping the explicit arg that acts as "this".
-//
-// As a rule, each DbFoo object has exactly one underlying DB_FOO struct
-// (defined in db.h) associated with it. In some cases, we inherit directly
-// from the DB_FOO structure to make this relationship explicit. Often,
-// the underlying C layer allocates and deallocates these structures, so
-// there is no easy way to add any data to the DbFoo class. When you see
-// a comment about whether data is permitted to be added, this is what
-// is going on. Of course, if we need to add data to such C++ classes
-// in the future, we will arrange to have an indirect pointer to the
-// DB_FOO struct (as some of the classes already have).
-//
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Forward declarations
-//
-
-#include <iostream.h>
-#include <stdarg.h>
-#include "db.h"
-
-class Db; // forward
-class Dbc; // forward
-class DbEnv; // forward
-class DbException; // forward
-class DbInfo; // forward
-class DbLock; // forward
-class DbLsn; // forward
-class DbMpoolFile; // forward
-class Dbt; // forward
-class DbTxn; // forward
-
-// These classes are not defined here and should be invisible
-// to the user, but some compilers require forward references.
-// There is one for each use of the DEFINE_DB_CLASS macro.
-
-class DbImp;
-class DbEnvImp;
-class DbMpoolFileImp;
-class DbTxnImp;
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Mechanisms for declaring classes
-//
-
-//
-// Every class defined in this file has an _exported next to the class name.
-// This is needed for WinTel machines so that the class methods can
-// be exported or imported in a DLL as appropriate. Users of the DLL
-// use the define DB_USE_DLL. When the DLL is built, DB_CREATE_DLL
-// must be defined.
-//
-#if defined(_MSC_VER)
-
-# if defined(DB_CREATE_DLL)
-# define _exported __declspec(dllexport) // creator of dll
-# elif defined(DB_USE_DLL)
-# define _exported __declspec(dllimport) // user of dll
-# else
-# define _exported // static lib creator or user
-# endif
-
-#else
-
-# define _exported
-
-#endif
-
-// DEFINE_DB_CLASS defines an imp_ data member and imp() accessor.
-// The underlying type is a pointer to an opaque *Imp class, that
-// gets converted to the correct implementation class by the implementation.
-//
-// Since these defines use "private/public" labels, and leave the access
-// being "private", we always use these by convention before any data
-// members in the private section of a class. Keeping them in the
-// private section also emphasizes that they are off limits to user code.
-//
-#define DEFINE_DB_CLASS(name) \
- public: class name##Imp* imp() { return (imp_); } \
- public: const class name##Imp* constimp() const { return (imp_); } \
- private: class name##Imp* imp_
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Turn off inappropriate compiler warnings
-//
-
-#ifdef _MSC_VER
-
-// These are level 4 warnings that are explicitly disabled.
-// With Visual C++, by default you do not see above level 3 unless
-// you use /W4. But we like to compile with the highest level
-// warnings to catch other errors.
-//
-// 4201: nameless struct/union
-// triggered by standard include file <winnt.h>
-//
-// 4514: unreferenced inline function has been removed
-// certain include files in MSVC define methods that are not called
-//
-#pragma warning(disable: 4201 4514)
-
-#endif
-
-// Some interfaces can be customized by allowing users
-// to define callback functions. For performance and
-// logistical reasons, some callbacks require you do
-// declare the functions in C, or in an extern "C" block.
-//
-extern "C" {
- typedef void * (*db_malloc_fcn_type)
- (size_t);
- typedef void * (*db_realloc_fcn_type)
- (void *, size_t);
- typedef int (*bt_compare_fcn_type)
- (DB *, const DBT *, const DBT *);
- typedef size_t (*bt_prefix_fcn_type)
- (DB *, const DBT *, const DBT *);
- typedef int (*dup_compare_fcn_type)
- (DB *, const DBT *, const DBT *);
- typedef u_int32_t (*h_hash_fcn_type)
- (DB *, const void *, u_int32_t);
- typedef int (*pgin_fcn_type)(DB_ENV *dbenv,
- db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
- typedef int (*pgout_fcn_type)(DB_ENV *dbenv,
- db_pgno_t pgno, void *pgaddr, DBT *pgcookie);
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Exception classes
-//
-
-// Almost any error in the DB library throws a DbException.
-// Every exception should be considered an abnormality
-// (e.g. bug, misuse of DB, file system error).
-//
-// NOTE: We would like to inherit from class exception and
-// let it handle what(), but there are
-// MSVC++ problems when <exception> is included.
-//
-class _exported DbException
-{
-public:
- virtual ~DbException();
- DbException(int err);
- DbException(const char *description);
- DbException(const char *prefix, int err);
- DbException(const char *prefix1, const char *prefix2, int err);
- int get_errno() const;
- virtual const char *what() const;
-
- DbException(const DbException &);
- DbException &operator = (const DbException &);
-
-private:
- char *what_;
- int err_; // errno
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Lock classes
-//
-
-class _exported DbLock
-{
- friend class DbEnv;
-
-public:
- DbLock();
-
- int put(DbEnv *env);
-
- DbLock(const DbLock &);
- DbLock &operator = (const DbLock &);
-
-protected:
- // We can add data to this class if needed
- // since its contained class is not allocated by db.
- // (see comment at top)
-
- DbLock(DB_LOCK);
- DB_LOCK lock_;
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Log classes
-//
-
-class _exported DbLsn : protected DB_LSN
-{
- friend class DbEnv; // friendship needed to cast to base class
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Memory pool classes
-//
-
-class _exported DbMpoolFile
-{
- friend class DbEnv;
-
-public:
- int close();
- int get(db_pgno_t *pgnoaddr, u_int32_t flags, void *pagep);
- int put(void *pgaddr, u_int32_t flags);
- int set(void *pgaddr, u_int32_t flags);
- int sync();
-
- static int open(DbEnv *envp, const char *file,
- u_int32_t flags, int mode, size_t pagesize,
- DB_MPOOL_FINFO *finfop, DbMpoolFile **mpf);
-
-private:
- // We can add data to this class if needed
- // since it is implemented via a pointer.
- // (see comment at top)
-
- // Note: use DbMpoolFile::open()
- // to get pointers to a DbMpoolFile,
- // and call DbMpoolFile::close() rather than delete to release them.
- //
- DbMpoolFile();
-
- // Shut g++ up.
-protected:
- ~DbMpoolFile();
-
-private:
- // no copying
- DbMpoolFile(const DbMpoolFile &);
- void operator = (const DbMpoolFile &);
-
- DEFINE_DB_CLASS(DbMpoolFile);
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Transaction classes
-//
-
-class _exported DbTxn
-{
- friend class DbEnv;
-
-public:
- int abort();
- int commit(u_int32_t flags);
- u_int32_t id();
- int prepare();
-
-private:
- // We can add data to this class if needed
- // since it is implemented via a pointer.
- // (see comment at top)
-
- // Note: use DbEnv::txn_begin() to get pointers to a DbTxn,
- // and call DbTxn::abort() or DbTxn::commit rather than
- // delete to release them.
- //
- DbTxn();
- ~DbTxn();
-
- // no copying
- DbTxn(const DbTxn &);
- void operator = (const DbTxn &);
-
- DEFINE_DB_CLASS(DbTxn);
-};
-
-//
-// Berkeley DB environment class. Provides functions for opening databases.
-// User of this library can use this class as a starting point for
-// developing a DB application - derive their application class from
-// this one, add application control logic.
-//
-// Note that if you use the default constructor, you must explicitly
-// call appinit() before any other db activity (e.g. opening files)
-//
-class _exported DbEnv
-{
- friend class Db;
- friend class DbLock;
- friend class DbMpoolFile;
-
-public:
-
- ~DbEnv();
-
- // After using this constructor, you can set any needed
- // parameters for the environment using the set_* methods.
- // Then call open() to finish initializing the environment
- // and attaching it to underlying files.
- //
- DbEnv(u_int32_t flags);
-
- // These methods match those in the C interface.
- //
- int close(u_int32_t);
- void err(int, const char *, ...);
- void errx(const char *, ...);
- int open(const char *, u_int32_t, int);
- int remove(const char *, u_int32_t);
- int set_cachesize(u_int32_t, u_int32_t, int);
- int set_data_dir(const char *);
- void set_errcall(void (*)(const char *, char *));
- void set_errfile(FILE *);
- void set_errpfx(const char *);
- int set_flags(u_int32_t, int);
- int set_feedback(void (*)(DbEnv *, int, int));
- int set_recovery_init(int (*)(DbEnv *));
- int set_lg_bsize(u_int32_t);
- int set_lg_dir(const char *);
- int set_lg_max(u_int32_t);
- int set_lk_conflicts(u_int8_t *, int);
- int set_lk_detect(u_int32_t);
- int set_lk_max(u_int32_t);
- int set_lk_max_lockers(u_int32_t);
- int set_lk_max_locks(u_int32_t);
- int set_lk_max_objects(u_int32_t);
- int set_mp_mmapsize(size_t);
- int set_mutexlocks(int);
- static int set_pageyield(int);
- int set_paniccall(void (*)(DbEnv *, int));
- static int set_panicstate(int);
- static int set_region_init(int);
- int set_server(char *, long, long, u_int32_t);
- int set_shm_key(long);
- int set_tmp_dir(const char *);
- static int set_tas_spins(u_int32_t);
- int set_tx_max(u_int32_t);
- int set_tx_recover(int (*)(DbEnv *, Dbt *, DbLsn *, db_recops));
- int set_tx_timestamp(time_t *);
- int set_verbose(u_int32_t which, int onoff);
-
- // Version information. A static method so it can be obtained anytime.
- //
- static char *version(int *major, int *minor, int *patch);
-
- // Convert DB errors to strings
- static char *strerror(int);
-
- // If an error is detected and the error call function
- // or stream is set, a message is dispatched or printed.
- // If a prefix is set, each message is prefixed.
- //
- // You can use set_errcall() or set_errfile() above to control
- // error functionality. Alternatively, you can call
- // set_error_stream() to force all errors to a C++ stream.
- // It is unwise to mix these approaches.
- //
- void set_error_stream(ostream *);
-
- // used internally
- static void runtime_error(const char *caller, int err,
- int error_policy);
-
- // Lock functions
- //
- int lock_detect(u_int32_t flags, u_int32_t atype, int *aborted);
- int lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
- db_lockmode_t lock_mode, DbLock *lock);
- int lock_id(u_int32_t *idp);
- int lock_stat(DB_LOCK_STAT **statp, db_malloc_fcn_type db_malloc_fcn);
- int lock_vec(u_int32_t locker, u_int32_t flags, DB_LOCKREQ list[],
- int nlist, DB_LOCKREQ **elistp);
-
- // Log functions
- //
- int log_archive(char **list[], u_int32_t flags, db_malloc_fcn_type db_malloc_fcn);
- static int log_compare(const DbLsn *lsn0, const DbLsn *lsn1);
- int log_file(DbLsn *lsn, char *namep, size_t len);
- int log_flush(const DbLsn *lsn);
- int log_get(DbLsn *lsn, Dbt *data, u_int32_t flags);
- int log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags);
-
- int log_register(Db *dbp, const char *name);
- int log_stat(DB_LOG_STAT **spp, db_malloc_fcn_type db_malloc_fcn);
- int log_unregister(Db *dbp);
-
- // Mpool functions
- //
- int memp_register(int ftype,
- pgin_fcn_type pgin_fcn,
- pgout_fcn_type pgout_fcn);
-
- int memp_stat(DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp,
- db_malloc_fcn_type db_malloc_fcn);
- int memp_sync(DbLsn *lsn);
- int memp_trickle(int pct, int *nwrotep);
-
- // Transaction functions
- //
- int txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags);
- int txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags);
- int txn_stat(DB_TXN_STAT **statp, db_malloc_fcn_type db_malloc_fcn);
-
- // These are public only because they need to be called
- // via C functions. They should never be called by users
- // of this class.
- //
- static void _stream_error_function(const char *, char *);
- static int _tx_recover_intercept(DB_ENV *env, DBT *dbt, DB_LSN *lsn,
- db_recops op);
- static void _paniccall_intercept(DB_ENV *env, int errval);
- static int _recovery_init_intercept(DB_ENV *env);
- static void _feedback_intercept(DB_ENV *env, int opcode, int pct);
- static void _destroy_check(const char *str, int isDbEnv);
-
-private:
- void cleanup();
- int initialize(DB_ENV *env);
- int error_policy();
-
- // Used internally
- DbEnv(DB_ENV *, u_int32_t flags);
-
- // no copying
- DbEnv(const DbEnv &);
- void operator = (const DbEnv &);
-
- DEFINE_DB_CLASS(DbEnv);
-
- // instance data
- int construct_error_;
- u_int32_t construct_flags_;
- Db *headdb_;
- Db *taildb_;
- int (*tx_recover_callback_)(DbEnv *, Dbt *, DbLsn *, db_recops);
- int (*recovery_init_callback_)(DbEnv *);
- void (*paniccall_callback_)(DbEnv *, int);
- void (*feedback_callback_)(DbEnv *, int, int);
-
- // class data
- static ostream *error_stream_;
-};
-
-////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////
-//
-// Table access classes
-//
-
-//
-// Represents a database table = a set of keys with associated values.
-//
-class _exported Db
-{
- friend class DbEnv;
-
-public:
- Db(DbEnv*, u_int32_t); // create a Db object, then call open()
- ~Db(); // does *not* call close.
-
- // These methods exactly match those in the C interface.
- //
- int close(u_int32_t flags);
- int cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags);
- int del(DbTxn *txnid, Dbt *key, u_int32_t flags);
- void err(int, const char *, ...);
- void errx(const char *, ...);
- int fd(int *fdp);
- int get(DbTxn *txnid, Dbt *key, Dbt *data, u_int32_t flags);
- int get_byteswapped() const;
- DBTYPE get_type() const;
- int join(Dbc **curslist, Dbc **dbcp, u_int32_t flags);
- int key_range(DbTxn *, Dbt *, DB_KEY_RANGE *, u_int32_t);
- int open(const char *, const char *subname, DBTYPE, u_int32_t, int);
- int put(DbTxn *, Dbt *, Dbt *, u_int32_t);
- int remove(const char *, const char *, u_int32_t);
- int rename(const char *, const char *, const char *, u_int32_t);
- int set_bt_compare(bt_compare_fcn_type);
- int set_bt_maxkey(u_int32_t);
- int set_bt_minkey(u_int32_t);
- int set_bt_prefix(bt_prefix_fcn_type);
- int set_cachesize(u_int32_t, u_int32_t, int);
- int set_dup_compare(dup_compare_fcn_type);
- void set_errcall(void (*)(const char *, char *));
- void set_errfile(FILE *);
- void set_errpfx(const char *);
- int set_append_recno(int (*)(Db *, Dbt *, db_recno_t));
- int set_feedback(void (*)(Db *, int, int));
- int set_flags(u_int32_t);
- int set_h_ffactor(u_int32_t);
- int set_h_hash(h_hash_fcn_type);
- int set_h_nelem(u_int32_t);
- int set_lorder(int);
- int set_malloc(db_malloc_fcn_type);
- int set_pagesize(u_int32_t);
- int set_paniccall(void (*)(DbEnv *, int));
- int set_realloc(db_realloc_fcn_type);
- int set_re_delim(int);
- int set_re_len(u_int32_t);
- int set_re_pad(int);
- int set_re_source(char *);
- int set_q_extentsize(u_int32_t);
- int stat(void *sp, db_malloc_fcn_type db_malloc_fcn, u_int32_t flags);
- int sync(u_int32_t flags);
- int upgrade(const char *name, u_int32_t flags);
- int verify(const char *, const char *, ostream *, u_int32_t);
-
- // This additional method is available for C++
- //
- void set_error_stream(ostream *);
-
- // These are public only because it needs to be called
- // via C functions. It should never be called by users
- // of this class.
- //
- static void _feedback_intercept(DB *db, int opcode, int pct);
- static int _append_recno_intercept(DB *db, DBT *data, db_recno_t recno);
-private:
-
- // no copying
- Db(const Db &);
- Db &operator = (const Db &);
-
- DEFINE_DB_CLASS(Db);
-
- void cleanup();
- int initialize();
- int error_policy();
-
- // instance data
- DbEnv *env_;
- Db *next_;
- Db *prev_;
- int construct_error_;
- u_int32_t flags_;
- u_int32_t construct_flags_;
- void (*feedback_callback_)(Db *, int, int);
- int (*append_recno_callback_)(Db *, Dbt *, db_recno_t);
-};
-
-//
-// A chunk of data, maybe a key or value.
-//
-class _exported Dbt : private DBT
-{
- friend class Dbc;
- friend class Db;
- friend class DbEnv;
-
-public:
-
- // key/data
- void *get_data() const;
- void set_data(void *);
-
- // key/data length
- u_int32_t get_size() const;
- void set_size(u_int32_t);
-
- // RO: length of user buffer.
- u_int32_t get_ulen() const;
- void set_ulen(u_int32_t);
-
- // RO: get/put record length.
- u_int32_t get_dlen() const;
- void set_dlen(u_int32_t);
-
- // RO: get/put record offset.
- u_int32_t get_doff() const;
- void set_doff(u_int32_t);
-
- // flags
- u_int32_t get_flags() const;
- void set_flags(u_int32_t);
-
- Dbt(void *data, size_t size);
- Dbt();
- ~Dbt();
- Dbt(const Dbt &);
- Dbt &operator = (const Dbt &);
-
-private:
- // We can add data to this class if needed
- // since parent class is not allocated by db.
- // (see comment at top)
-};
-
-class _exported Dbc : protected DBC
-{
- friend class Db;
-
-public:
- int close();
- int count(db_recno_t *countp, u_int32_t flags);
- int del(u_int32_t flags);
- int dup(Dbc** cursorp, u_int32_t flags);
- int get(Dbt* key, Dbt *data, u_int32_t flags);
- int put(Dbt* key, Dbt *data, u_int32_t flags);
-
-private:
- // No data is permitted in this class (see comment at top)
-
- // Note: use Db::cursor() to get pointers to a Dbc,
- // and call Dbc::close() rather than delete to release them.
- //
- Dbc();
- ~Dbc();
-
- // no copying
- Dbc(const Dbc &);
- Dbc &operator = (const Dbc &);
-};
-#endif /* !_DB_CXX_H_ */
diff --git a/bdb/include/debug.h b/bdb/include/debug.h
deleted file mode 100644
index 9a3ffc1acb6..00000000000
--- a/bdb/include/debug.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: debug.h,v 11.17 2000/07/07 15:50:36 bostic Exp $
- */
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/*
- * When running with #DIAGNOSTIC defined, we smash memory and do memory
- * guarding with a special byte value.
- */
-#define CLEAR_BYTE 0xdb
-#define GUARD_BYTE 0xdc
-
-/*
- * DB assertions.
- */
-#if defined(DIAGNOSTIC) && defined(__STDC__)
-#define DB_ASSERT(e) ((e) ? (void)0 : __db_assert(#e, __FILE__, __LINE__))
-#else
-#define DB_ASSERT(e) ((void)0)
-#endif
-
-/*
- * Purify and other run-time tools complain about uninitialized reads/writes
- * of structure fields whose only purpose is padding, as well as when heap
- * memory that was never initialized is written to disk.
- */
-#ifdef UMRW
-#define UMRW_SET(v) (v) = 0
-#else
-#define UMRW_SET(v)
-#endif
-
-/*
- * Debugging macro to log operations.
- * If DEBUG_WOP is defined, log operations that modify the database.
- * If DEBUG_ROP is defined, log operations that read the database.
- *
- * D dbp
- * T txn
- * O operation (string)
- * K key
- * A data
- * F flags
- */
-#define LOG_OP(C, T, O, K, A, F) { \
- DB_LSN __lsn; \
- DBT __op; \
- if (DB_LOGGING((C))) { \
- memset(&__op, 0, sizeof(__op)); \
- __op.data = O; \
- __op.size = strlen(O) + 1; \
- (void)__db_debug_log((C)->dbp->dbenv, \
- T, &__lsn, 0, &__op, (C)->dbp->log_fileid, K, A, F);\
- } \
-}
-#ifdef DEBUG_ROP
-#define DEBUG_LREAD(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
-#else
-#define DEBUG_LREAD(C, T, O, K, A, F)
-#endif
-#ifdef DEBUG_WOP
-#define DEBUG_LWRITE(C, T, O, K, A, F) LOG_OP(C, T, O, K, A, F)
-#else
-#define DEBUG_LWRITE(C, T, O, K, A, F)
-#endif
-
-/*
- * Hook for testing recovery at various places in the create/delete paths.
- */
-#if CONFIG_TEST
-#define DB_TEST_RECOVERY(dbp, val, ret, name) \
-do { \
- int __ret; \
- PANIC_CHECK((dbp)->dbenv); \
- if ((dbp)->dbenv->test_copy == (val)) { \
- /* COPY the FILE */ \
- if (F_ISSET((dbp), DB_OPEN_CALLED) && (dbp)->mpf != NULL) \
- (void)(dbp)->sync((dbp), 0); \
- if ((__ret = __db_testcopy((dbp), (name))) != 0) \
- (ret) = __db_panic((dbp)->dbenv, __ret); \
- } \
- if ((dbp)->dbenv->test_abort == (val)) { \
- /* ABORT the TXN */ \
- (ret) = EINVAL; \
- goto db_tr_err; \
- } \
-} while (0)
-#define DB_TEST_RECOVERY_LABEL db_tr_err:
-#else
-#define DB_TEST_RECOVERY(dbp, val, ret, name)
-#define DB_TEST_RECOVERY_LABEL
-#endif
-
-#if defined(__cplusplus)
-}
-#endif
diff --git a/bdb/include/log.h b/bdb/include/log.h
deleted file mode 100644
index 81ecb4174a6..00000000000
--- a/bdb/include/log.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: log.h,v 11.19 2001/01/11 18:19:52 bostic Exp $
- */
-
-#ifndef _LOG_H_
-#define _LOG_H_
-
-struct __db_log; typedef struct __db_log DB_LOG;
-struct __fname; typedef struct __fname FNAME;
-struct __hdr; typedef struct __hdr HDR;
-struct __log; typedef struct __log LOG;
-struct __log_persist; typedef struct __log_persist LOGP;
-
-#define LFPREFIX "log." /* Log file name prefix. */
-#define LFNAME "log.%010d" /* Log file name template. */
-#define LFNAME_V1 "log.%05d" /* Log file name template, rev 1. */
-
-#define LG_MAX_DEFAULT (10 * MEGABYTE) /* 10 MB. */
-#define LG_BSIZE_DEFAULT (32 * 1024) /* 32 KB. */
-#define LG_BASE_REGION_SIZE (480 * 1024) /* 480 KB. */
-
-/*
- * The per-process table that maps log file-id's to DB structures.
- */
-typedef struct __db_entry {
- TAILQ_HEAD(dblist, __db) dblist; /* Associated DB structures. */
- u_int32_t refcount; /* Reference counted. */
- u_int32_t count; /* Number of ops on a deleted db. */
- int deleted; /* File was not found during open. */
-} DB_ENTRY;
-
-/*
- * DB_LOG
- * Per-process log structure.
- */
-struct __db_log {
-/*
- * These fields need to be protected for multi-threaded support.
- *
- * !!!
- * As this structure is allocated in per-process memory, the mutex may need
- * to be stored elsewhere on architectures unable to support mutexes in heap
- * memory, e.g., HP/UX 9.
- */
- MUTEX *mutexp; /* Mutex for thread protection. */
-
- DB_ENTRY *dbentry; /* Recovery file-id mapping. */
-#define DB_GROW_SIZE 64
- int32_t dbentry_cnt; /* Entries. Grows by DB_GROW_SIZE. */
-
-/*
- * These fields are always accessed while the region lock is held, so they do
- * not have to be protected by the thread lock as well, OR, they are only used
- * when threads are not being used, i.e. most cursor operations are disallowed
- * on threaded logs.
- */
- u_int32_t lfname; /* Log file "name". */
- DB_FH lfh; /* Log file handle. */
-
- DB_LSN c_lsn; /* Cursor: current LSN. */
- DBT c_dbt; /* Cursor: return DBT structure. */
- DB_FH c_fh; /* Cursor: file handle. */
- FILE *c_fp; /* Cursor: file pointer. */
- u_int32_t c_off; /* Cursor: previous record offset. */
- u_int32_t c_len; /* Cursor: current record length. */
- u_int32_t r_file; /* Cursor: current read file */
- u_int32_t r_off; /* Cursor: offset of read buffer. */
- u_int32_t r_size; /* Cursor: size of data in read buf. */
-
- u_int8_t *bufp; /* Region buffer. */
- u_int8_t *readbufp; /* Read buffer. */
-
-/* These fields are not protected. */
- DB_ENV *dbenv; /* Reference to error information. */
- REGINFO reginfo; /* Region information. */
-
-/*
- * These fields are used by XA; since XA forbids threaded execution, these
- * do not have to be protected.
- */
- void *xa_info; /* Committed transaction list that
- * has to be carried between calls
- * to xa_recover. */
- DB_LSN xa_lsn; /* Position of an XA recovery scan. */
- DB_LSN xa_first; /* LSN to which we need to roll back
- for this XA recovery scan. */
-
-#define DBLOG_RECOVER 0x01 /* We are in recovery. */
-#define DBLOG_FORCE_OPEN 0x02 /* Force the db open even
- * if it appears to be deleted.
- */
- u_int32_t flags;
-};
-
-/*
- * HDR --
- * Log record header.
- */
-struct __hdr {
- u_int32_t prev; /* Previous offset. */
- u_int32_t cksum; /* Current checksum. */
- u_int32_t len; /* Current length. */
-};
-
-struct __log_persist {
- u_int32_t magic; /* DB_LOGMAGIC */
- u_int32_t version; /* DB_LOGVERSION */
-
- u_int32_t lg_max; /* Maximum file size. */
- int mode; /* Log file mode. */
-};
-
-/*
- * LOG --
- * Shared log region. One of these is allocated in shared memory,
- * and describes the log.
- */
-struct __log {
- LOGP persist; /* Persistent information. */
-
- SH_TAILQ_HEAD(__fq) fq; /* List of file names. */
-
- /*
- * The lsn LSN is the file offset that we're about to write and which
- * we will return to the user.
- */
- DB_LSN lsn; /* LSN at current file offset. */
-
- /*
- * The s_lsn LSN is the last LSN that we know is on disk, not just
- * written, but synced.
- */
- DB_LSN s_lsn; /* LSN of the last sync. */
-
- u_int32_t len; /* Length of the last record. */
-
- u_int32_t w_off; /* Current write offset in the file. */
-
- DB_LSN chkpt_lsn; /* LSN of the last checkpoint. */
- time_t chkpt; /* Time of the last checkpoint. */
-
- DB_LOG_STAT stat; /* Log statistics. */
-
- /*
- * The f_lsn LSN is the LSN (returned to the user) that "owns" the
- * first byte of the buffer. If the record associated with the LSN
- * spans buffers, it may not reflect the physical file location of
- * the first byte of the buffer.
- */
- DB_LSN f_lsn; /* LSN of first byte in the buffer. */
- size_t b_off; /* Current offset in the buffer. */
-
- roff_t buffer_off; /* Log buffer offset. */
- u_int32_t buffer_size; /* Log buffer size. */
-};
-
-/*
- * FNAME --
- * File name and id.
- */
-struct __fname {
- SH_TAILQ_ENTRY q; /* File name queue. */
-
- u_int16_t ref; /* Reference count. */
- u_int16_t locked; /* Table is locked. */
-
- int32_t id; /* Logging file id. */
- DBTYPE s_type; /* Saved DB type. */
-
- roff_t name_off; /* Name offset. */
- db_pgno_t meta_pgno; /* Page number of the meta page. */
- u_int8_t ufid[DB_FILE_ID_LEN]; /* Unique file id. */
-};
-
-/* File open/close register log record opcodes. */
-#define LOG_CHECKPOINT 1 /* Checkpoint: file name/id dump. */
-#define LOG_CLOSE 2 /* File close. */
-#define LOG_OPEN 3 /* File open. */
-
-#define CHECK_LSN(redo, cmp, lsn, prev) \
- DB_ASSERT(!DB_REDO(redo) || (cmp) >= 0); \
- if (DB_REDO(redo) && (cmp) < 0) { \
- __db_err(dbenv, \
- "Log sequence error: page LSN %lu:%lu; previous LSN %lu %lu", \
- (u_long)(lsn)->file, (u_long)(lsn)->offset, \
- (u_long)(prev)->file, (u_long)(prev)->offset); \
- goto out; \
- }
-
-/*
- * Status codes indicating the validity of a log file examined by
- * __log_valid().
- */
-typedef enum {
- DB_LV_INCOMPLETE,
- DB_LV_NONEXISTENT,
- DB_LV_NORMAL,
- DB_LV_OLD_READABLE,
- DB_LV_OLD_UNREADABLE
-} logfile_validity;
-
-#include "log_auto.h"
-#include "log_ext.h"
-#endif /* _LOG_H_ */
diff --git a/bdb/include/mp.h b/bdb/include/mp.h
deleted file mode 100644
index 233cb1c2b10..00000000000
--- a/bdb/include/mp.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: mp.h,v 11.16 2001/01/10 04:50:53 ubell Exp $
- */
-
-struct __bh; typedef struct __bh BH;
-struct __db_mpool; typedef struct __db_mpool DB_MPOOL;
-struct __db_mpreg; typedef struct __db_mpreg DB_MPREG;
-struct __mpool; typedef struct __mpool MPOOL;
-struct __mpoolfile; typedef struct __mpoolfile MPOOLFILE;
-
-/* We require at least 40K of cache. */
-#define DB_CACHESIZE_MIN (20 * 1024)
-
-/*
- * DB_MPOOL --
- * Per-process memory pool structure.
- */
-struct __db_mpool {
- /* These fields need to be protected for multi-threaded support. */
- MUTEX *mutexp; /* Structure thread lock. */
-
- /* List of pgin/pgout routines. */
- LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;
-
- /* List of DB_MPOOLFILE's. */
- TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;
-
- /* These fields are not thread-protected. */
- DB_ENV *dbenv; /* Reference to error information. */
-
- u_int32_t nreg; /* N underlying cache regions. */
- REGINFO *reginfo; /* Underlying cache regions. */
-};
-
-/*
- * DB_MPREG --
- * DB_MPOOL registry of pgin/pgout functions.
- */
-struct __db_mpreg {
- LIST_ENTRY(__db_mpreg) q; /* Linked list. */
-
- int ftype; /* File type. */
- /* Pgin, pgout routines. */
- int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
- int (*pgout) __P((DB_ENV *, db_pgno_t, void *, DBT *));
-};
-
-/*
- * DB_MPOOLFILE --
- * Per-process DB_MPOOLFILE information.
- */
-struct __db_mpoolfile {
- /* These fields need to be protected for multi-threaded support. */
- MUTEX *mutexp; /* Structure thread lock. */
-
- DB_FH fh; /* Underlying file handle. */
-
- u_int32_t ref; /* Reference count. */
-
- /*
- * !!!
- * This field is a special case -- it's protected by the region lock
- * NOT the thread lock. The reason for this is that we always have
- * the region lock immediately before or after we modify the field,
- * and we don't want to use the structure lock to protect it because
- * then I/O (which is done with the structure lock held because of
- * the race between the seek and write of the file descriptor) will
- * block any other put/get calls using this DB_MPOOLFILE structure.
- */
- u_int32_t pinref; /* Pinned block reference count. */
-
- /*
- * !!!
- * This field is a special case -- it's protected by the region lock
- * since it's manipulated only when new files are added to the list.
- */
- TAILQ_ENTRY(__db_mpoolfile) q; /* Linked list of DB_MPOOLFILE's. */
-
- /* These fields are not thread-protected. */
- DB_MPOOL *dbmp; /* Overlying DB_MPOOL. */
- MPOOLFILE *mfp; /* Underlying MPOOLFILE. */
-
- void *addr; /* Address of mmap'd region. */
- size_t len; /* Length of mmap'd region. */
-
- /* These fields need to be protected for multi-threaded support. */
-#define MP_READONLY 0x01 /* File is readonly. */
-#define MP_UPGRADE 0x02 /* File descriptor is readwrite. */
-#define MP_UPGRADE_FAIL 0x04 /* Upgrade wasn't possible. */
- u_int32_t flags;
-};
-
-/*
- * NCACHE --
- * Select a cache based on the page number. This assumes accesses are
- * uniform across pages, which is probably OK -- what we really want to
- * avoid is anything that puts all the pages for any single file in the
- * same cache, as we expect that file access will be bursty.
- */
-#define NCACHE(mp, pgno) \
- ((pgno) % ((MPOOL *)mp)->nreg)
-
-/*
- * NBUCKET --
- * We make the assumption that early pages of the file are more likely
- * to be retrieved than the later pages, which means the top bits will
- * be more interesting for hashing as they're less likely to collide.
- * That said, as 512 8K pages represents a 4MB file, so only reasonably
- * large files will have page numbers with any other than the bottom 9
- * bits set. We XOR in the MPOOL offset of the MPOOLFILE that backs the
- * page, since that should also be unique for the page. We don't want
- * to do anything very fancy -- speed is more important to us than using
- * good hashing.
- */
-#define NBUCKET(mc, mf_offset, pgno) \
- (((pgno) ^ ((mf_offset) << 9)) % (mc)->htab_buckets)
-
-/*
- * MPOOL --
- * Shared memory pool region.
- */
-struct __mpool {
- /*
- * The memory pool can be broken up into individual pieces/files.
- * Not what we would have liked, but on Solaris you can allocate
- * only a little more than 2GB of memory in a contiguous chunk,
- * and I expect to see more systems with similar issues.
- *
- * The first of these pieces/files describes the entire pool, all
- * subsequent ones only describe a part of the cache.
- *
- * We single-thread memp_sync and memp_fsync calls.
- *
- * This mutex is intended *only* to single-thread access to the call,
- * it is not used to protect the lsn and lsn_cnt fields, the region
- * lock is used to protect them.
- */
- MUTEX sync_mutex; /* Checkpoint lock. */
- DB_LSN lsn; /* Maximum checkpoint LSN. */
- u_int32_t lsn_cnt; /* Checkpoint buffers left to write. */
-
- SH_TAILQ_HEAD(__mpfq) mpfq; /* List of MPOOLFILEs. */
-
- u_int32_t nreg; /* Number of underlying REGIONS. */
- roff_t regids; /* Array of underlying REGION Ids. */
-
-#define MP_LSN_RETRY 0x01 /* Retry all BH_WRITE buffers. */
- u_int32_t flags;
-
- /*
- * The following structure fields only describe the cache portion of
- * the region.
- */
- SH_TAILQ_HEAD(__bhq) bhq; /* LRU list of buffer headers. */
-
- int htab_buckets; /* Number of hash table entries. */
- roff_t htab; /* Hash table offset. */
-
- DB_MPOOL_STAT stat; /* Per-cache mpool statistics. */
-#ifdef MUTEX_SYSTEM_RESOURCES
- roff_t maint_off; /* Maintenance information offset */
-#endif
-};
-
-/*
- * MPOOLFILE --
- * Shared DB_MPOOLFILE information.
- */
-struct __mpoolfile {
- SH_TAILQ_ENTRY q; /* List of MPOOLFILEs */
-
- db_pgno_t mpf_cnt; /* Ref count: DB_MPOOLFILEs. */
- db_pgno_t block_cnt; /* Ref count: blocks in cache. */
- db_pgno_t lsn_cnt; /* Checkpoint buffers left to write. */
-
- int ftype; /* File type. */
- int32_t lsn_off; /* Page's LSN offset. */
- u_int32_t clear_len; /* Bytes to clear on page create. */
-
- roff_t path_off; /* File name location. */
- roff_t fileid_off; /* File identification location. */
-
- roff_t pgcookie_len; /* Pgin/pgout cookie length. */
- roff_t pgcookie_off; /* Pgin/pgout cookie location. */
-
- db_pgno_t last_pgno; /* Last page in the file. */
- db_pgno_t orig_last_pgno; /* Original last page in the file. */
-
- DB_MPOOL_FSTAT stat; /* Per-file mpool statistics. */
-
-#define MP_CAN_MMAP 0x01 /* If the file can be mmap'd. */
-#define MP_DEADFILE 0x02 /* Dirty pages can simply be trashed. */
-#define MP_TEMP 0x04 /* Backing file is a temporary. */
-#define MP_UNLINK 0x08 /* Unlink file on last close. */
- u_int32_t flags;
-};
-
-/*
- * BH_TO_CACHE --
- * Return the cache where we can find the specified buffer header.
- */
-#define BH_TO_CACHE(dbmp, bhp) \
- (dbmp)->reginfo[NCACHE((dbmp)->reginfo[0].primary, (bhp)->pgno)].primary
-
-/*
- * BH --
- * Buffer header.
- */
-struct __bh {
- MUTEX mutex; /* Buffer thread/process lock. */
-
- u_int16_t ref; /* Reference count. */
-
-#define BH_CALLPGIN 0x001 /* Page needs to be reworked... */
-#define BH_DIRTY 0x002 /* Page was modified. */
-#define BH_DISCARD 0x004 /* Page is useless. */
-#define BH_LOCKED 0x008 /* Page is locked (I/O in progress). */
-#define BH_SYNC 0x010 /* memp sync: write the page */
-#define BH_SYNC_LOGFLSH 0x020 /* memp sync: also flush the log */
-#define BH_TRASH 0x040 /* Page is garbage. */
- u_int16_t flags;
-
- SH_TAILQ_ENTRY q; /* LRU queue. */
- SH_TAILQ_ENTRY hq; /* MPOOL hash bucket queue. */
-
- db_pgno_t pgno; /* Underlying MPOOLFILE page number. */
- roff_t mf_offset; /* Associated MPOOLFILE offset. */
-
- /*
- * !!!
- * This array must be at least size_t aligned -- the DB access methods
- * put PAGE and other structures into it, and then access them directly.
- * (We guarantee size_t alignment to applications in the documentation,
- * too.)
- */
- u_int8_t buf[1]; /* Variable length data. */
-};
-
-#include "mp_ext.h"
diff --git a/bdb/include/os_jump.h b/bdb/include/os_jump.h
deleted file mode 100644
index 681ba82d5eb..00000000000
--- a/bdb/include/os_jump.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: os_jump.h,v 11.3 2000/02/14 02:59:55 bostic Exp $
- */
-
-/* Calls which can be replaced by the application. */
-struct __db_jumptab {
- int (*j_close) __P((int));
- void (*j_dirfree) __P((char **, int));
- int (*j_dirlist) __P((const char *, char ***, int *));
- int (*j_exists) __P((const char *, int *));
- void (*j_free) __P((void *));
- int (*j_fsync) __P((int));
- int (*j_ioinfo) __P((const char *,
- int, u_int32_t *, u_int32_t *, u_int32_t *));
- void *(*j_malloc) __P((size_t));
- int (*j_map) __P((char *, size_t, int, int, void **));
- int (*j_open) __P((const char *, int, ...));
- ssize_t (*j_read) __P((int, void *, size_t));
- void *(*j_realloc) __P((void *, size_t));
- int (*j_rename) __P((const char *, const char *));
- int (*j_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
- int (*j_sleep) __P((u_long, u_long));
- int (*j_unlink) __P((const char *));
- int (*j_unmap) __P((void *, size_t));
- ssize_t (*j_write) __P((int, const void *, size_t));
- int (*j_yield) __P((void));
-};
-
-extern struct __db_jumptab __db_jump;
diff --git a/bdb/java/src/com/sleepycat/db/Db.java b/bdb/java/src/com/sleepycat/db/Db.java
index de11e28414a..df311795f54 100644
--- a/bdb/java/src/com/sleepycat/db/Db.java
+++ b/bdb/java/src/com/sleepycat/db/Db.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: Db.java,v 11.38 2000/12/31 19:26:22 bostic Exp $
+ * $Id: Db.java,v 11.110 2002/09/09 20:47:31 bostic Exp $
*/
package com.sleepycat.db;
@@ -18,228 +18,180 @@ import java.io.FileNotFoundException;
*/
public class Db
{
- // All constant and flag values used with Db* classes are defined here.
-
- // Collectively, these constants are known by the name
- // "DBTYPE" in the documentation.
- //
- public static final int DB_BTREE = 1; // B+tree
- public static final int DB_HASH = 2; // Extended Linear Hashing.
- public static final int DB_RECNO = 3; // Fixed and variable-length records.
- public static final int DB_QUEUE = 4; // Queue
- public static final int DB_UNKNOWN = 5; // Figure it out on open.
-
- // Flags understood by DbEnv()
- //
- // Note: DB_CXX_NO_EXCEPTIONS will have no effect in Java.
- //
- public static final int DB_CXX_NO_EXCEPTIONS; // C++: return error values
- public static final int DB_CLIENT; // Open for a client environment.
-
- // Flags understood by Db()
- //
- public static final int DB_XA_CREATE; // Open in an XA environment.
-
- // Flags understood by Db.open(), DbEnv.open().
- //
- public static final int DB_CREATE; // O_CREAT: create file as necessary.
- public static final int DB_NOMMAP; // Don't mmap underlying file.
- public static final int DB_THREAD; // Free-thread DB package handles.
-
- // Flags understood by only DbEnv.open().
- //
- public static final int DB_LOCKDOWN; // Lock memory into physical core.
- public static final int DB_PRIVATE; // DB_ENV is process local.
-
- //
- // Flags understood by DbEnv.txn_begin().
- //
- public static final int DB_TXN_NOWAIT; // Do not wait for locks in this TXN.
- public static final int DB_TXN_SYNC; // Always sync log on commit.
-
- // Flags understood by DbEnv.set_flags().
- //
- public static final int DB_CDB_ALLDB; // In CDB, lock across environment.
-
- //
- // Flags understood by Db.open().
- //
- public static final int DB_EXCL; // Exclusive open (O_EXCL).
- public static final int DB_RDONLY; // Read-only (O_RDONLY).
- public static final int DB_TRUNCATE; // Discard existing DB.
- public static final int DB_UPGRADE; // Upgrade if necessary.
-
- //
- // DB (user visible) error return codes.
- //
- public static final int DB_INCOMPLETE = -30999; // Sync didn't finish.
- public static final int DB_KEYEMPTY = -30998; // The key/data pair was deleted or
- // was never created by the user.
- public static final int DB_KEYEXIST = -30997; // The key/data pair already exists.
- public static final int DB_LOCK_DEADLOCK = -30996; // Locker killed to resolve deadlock.
- public static final int DB_LOCK_NOTGRANTED = -30995; // Lock unavailable, no-wait set.
- public static final int DB_NOSERVER = -30994; // Server panic return.
- public static final int DB_NOSERVER_HOME = -30993; // Bad home sent to server.
- public static final int DB_NOSERVER_ID = -30992; // Bad ID sent to server.
- public static final int DB_NOTFOUND = -30991; // Key/data pair not found (EOF).
- public static final int DB_OLD_VERSION = -30990; // Out-of-date version.
- public static final int DB_RUNRECOVERY = -30989; // Panic return.
- public static final int DB_VERIFY_BAD = -30988; // Verify failed; bad format.
-
- //
- // Flags used by DbEnv.open and DbEnv.remove.
- //
- public static final int DB_FORCE; // Force (anything).
- public static final int DB_INIT_CDB; // Concurrent Access Methods.
- public static final int DB_INIT_LOCK; // Initialize locking.
- public static final int DB_INIT_LOG; // Initialize logging.
- public static final int DB_INIT_MPOOL; // Initialize mpool.
- public static final int DB_INIT_TXN; // Initialize transactions.
- public static final int DB_JOINENV; // Initialize all subsystems present.
- public static final int DB_RECOVER; // Run normal recovery.
- public static final int DB_RECOVER_FATAL; // Run catastrophic recovery.
- public static final int DB_SYSTEM_MEM; // Use system-backed memory.
- public static final int DB_TXN_NOSYNC; // Do not sync log on commit.
- public static final int DB_USE_ENVIRON; // Use the environment.
- public static final int DB_USE_ENVIRON_ROOT; // Use the environment if root.
-
- //
- // Operations values to the tx_recover() function.
- //
- public static final int DB_TXN_BACKWARD_ROLL = 1;
- public static final int DB_TXN_FORWARD_ROLL = 2;
- public static final int DB_TXN_OPENFILES = 3;
- public static final int DB_TXN_REDO = 4;
- public static final int DB_TXN_UNDO = 5;
-
- //
- // Verbose flags; used for DbEnv.set_verbose
- //
- public static final int DB_VERB_CHKPOINT; // List checkpoints.
- public static final int DB_VERB_DEADLOCK; // Deadlock detection information.
- public static final int DB_VERB_RECOVERY; // Recovery information.
- public static final int DB_VERB_WAITSFOR; // Dump waits-for table.
-
- //
- // Deadlock detector modes; used in the DBENV structure to configure the
- // locking subsystem.
- //
- public static final int DB_LOCK_NORUN;
- public static final int DB_LOCK_DEFAULT;
- public static final int DB_LOCK_OLDEST;
- public static final int DB_LOCK_RANDOM;
- public static final int DB_LOCK_YOUNGEST;
-
- //
- // Flags understood by only Db.set_flags.
- //
- public static final int DB_DUP; // Btree, Hash: duplicate keys.
- public static final int DB_DUPSORT; // Btree, Hash: duplicate keys.
- public static final int DB_RECNUM; // Btree: record numbers.
- public static final int DB_RENUMBER; // Recno: renumber on insert/delete.
- public static final int DB_REVSPLITOFF;// Btree: turn off reverse splits.
- public static final int DB_SNAPSHOT; // Recno: snapshot the input.
-
- //
- // Flags understood by only Db.join
- //
- public static final int DB_JOIN_NOSORT;// Don't try to optimize join.
-
- //
- // Flags understood by only Db.verify
- //
- public static final int DB_NOORDERCHK; // Skip order check; subdb w/ user func
- public static final int DB_ORDERCHKONLY;// Only perform an order check on subdb
- public static final int DB_SALVAGE; // Salvage what looks like data.
- public static final int DB_AGGRESSIVE; // Salvage anything which might be data.
-
- // Collectively, these constants are known by the name
- // "db_lockmode_t" in the documentation.
- //
- public static final int DB_LOCK_NG = 0; // Not granted.
- public static final int DB_LOCK_READ = 1; // Shared/read.
- public static final int DB_LOCK_WRITE = 2; // Exclusive/write.
- public static final int DB_LOCK_IWRITE = 3; // Intent exclusive/write.
- public static final int DB_LOCK_IREAD = 4; // Intent to share/read.
- public static final int DB_LOCK_IWR = 5; // Intent to read and write.
-
- // Collectively, these constants are known by the name
- // "db_lockop_t" in the documentation.
- //
- public static final int DB_LOCK_DUMP = 0; // Display held locks.
- public static final int DB_LOCK_GET = 1; // Get the lock.
- /* Not visible to API: DB_LOCK_INHERIT = 2 // Pass locks to parent. */
- public static final int DB_LOCK_PUT = 3; // Release the lock.
- public static final int DB_LOCK_PUT_ALL = 4;// Release locker's locks.
- public static final int DB_LOCK_PUT_OBJ = 5;// Release locker's locks on obj.
-
- // Flag values for DbLock.vec()
- public static final int DB_LOCK_NOWAIT; // Don't wait on unavailable lock.
-
- // Flag values for DbLock.detect()
- public static final int DB_LOCK_CONFLICT; // Run on any conflict.
-
- //
- // Flag values for DbLog.archive()
- //
- public static final int DB_ARCH_ABS; // Absolute pathnames.
- public static final int DB_ARCH_DATA; // Data files.
- public static final int DB_ARCH_LOG; // Log files.
-
- //
- // DB access method and cursor operation values.
- // Each value is an operation code to which
- // additional bit flags are added.
- //
- public static final int DB_AFTER; // Dbc.put()
- public static final int DB_APPEND; // Db.put()
- public static final int DB_BEFORE; // Dbc.put()
- public static final int DB_CACHED_COUNTS; // Db.stat()
- public static final int DB_CHECKPOINT; // DbLog.put(), DbLog.get()
- public static final int DB_CONSUME; // Db.get()
- public static final int DB_CONSUME_WAIT; // Db.get()
- public static final int DB_CURLSN; // DbLog.put()
- public static final int DB_CURRENT; // Dbc.get(), Dbc.put(), DbLog.get()
- public static final int DB_FIRST; // Dbc.get(), DbLog.get()
- public static final int DB_FLUSH; // DbLog.put()
- public static final int DB_GET_BOTH; // Db.get(), Dbc.get()
- public static final int DB_GET_RECNO; // Dbc.get()
- public static final int DB_JOIN_ITEM; // Dbc.get()
- public static final int DB_KEYFIRST; // Dbc.put()
- public static final int DB_KEYLAST; // Dbc.put()
- public static final int DB_LAST; // Dbc.get(), DbLog.get()
- public static final int DB_NEXT; // Dbc.get(), DbLog.get()
- public static final int DB_NEXT_DUP; // Dbc.get()
- public static final int DB_NEXT_NODUP; // Dbc.get()
- public static final int DB_NODUPDATA; // Don't permit duplicated data
- public static final int DB_NOOVERWRITE;// Db.put()
- public static final int DB_NOSYNC; // Db.close()
- public static final int DB_POSITION; // Dbc.dup()
- public static final int DB_PREV; // Dbc.get(), DbLog.get()
- public static final int DB_PREV_NODUP; // Dbc.get()
- public static final int DB_RECORDCOUNT;// Db.stat()
- public static final int DB_SET; // Dbc.get(), DbLog.get()
- public static final int DB_SET_RANGE; // Dbc.get()
- public static final int DB_SET_RECNO; // Dbc.get()
- public static final int DB_WRITECURSOR;// Db.cursor()
-
- // Other flags that can be added to an operation codes above.
- //
- public static final int DB_RMW; // Acquire write flag immediately.
-
- // Collectively, these values are used for Dbt flags
- //
- // Return in allocated memory.
+ // BEGIN-JAVA-SPECIAL-CONSTANTS
+ /* DO NOT EDIT: automatically built by dist/s_java. */
+ public static final int DB_BTREE = 1;
+ public static final int DB_DONOTINDEX = -30999;
+ public static final int DB_HASH = 2;
+ public static final int DB_KEYEMPTY = -30998;
+ public static final int DB_KEYEXIST = -30997;
+ public static final int DB_LOCK_DEADLOCK = -30996;
+ public static final int DB_LOCK_NOTGRANTED = -30995;
+ public static final int DB_NOSERVER = -30994;
+ public static final int DB_NOSERVER_HOME = -30993;
+ public static final int DB_NOSERVER_ID = -30992;
+ public static final int DB_NOTFOUND = -30991;
+ public static final int DB_OLD_VERSION = -30990;
+ public static final int DB_PAGE_NOTFOUND = -30989;
+ public static final int DB_QUEUE = 4;
+ public static final int DB_RECNO = 3;
+ public static final int DB_REP_DUPMASTER = -30988;
+ public static final int DB_REP_HOLDELECTION = -30987;
+ public static final int DB_REP_NEWMASTER = -30986;
+ public static final int DB_REP_NEWSITE = -30985;
+ public static final int DB_REP_OUTDATED = -30984;
+ public static final int DB_RUNRECOVERY = -30982;
+ public static final int DB_SECONDARY_BAD = -30981;
+ public static final int DB_TXN_ABORT = 0;
+ public static final int DB_TXN_APPLY = 1;
+ public static final int DB_TXN_BACKWARD_ROLL = 3;
+ public static final int DB_TXN_FORWARD_ROLL = 4;
+ public static final int DB_TXN_PRINT = 8;
+ public static final int DB_UNKNOWN = 5;
+ public static final int DB_VERIFY_BAD = -30980;
+ public static final int DB_AFTER;
+ public static final int DB_AGGRESSIVE;
+ public static final int DB_APPEND;
+ public static final int DB_ARCH_ABS;
+ public static final int DB_ARCH_DATA;
+ public static final int DB_ARCH_LOG;
+ public static final int DB_AUTO_COMMIT;
+ public static final int DB_BEFORE;
+ public static final int DB_CACHED_COUNTS;
+ public static final int DB_CDB_ALLDB;
+ public static final int DB_CHKSUM_SHA1;
+ public static final int DB_CLIENT;
+ public static final int DB_CONSUME;
+ public static final int DB_CONSUME_WAIT;
+ public static final int DB_CREATE;
+ public static final int DB_CURRENT;
+ public static final int DB_CXX_NO_EXCEPTIONS;
public static final int DB_DBT_MALLOC;
-
- // Partial put/get.
public static final int DB_DBT_PARTIAL;
-
- // Return in realloc'd memory.
public static final int DB_DBT_REALLOC;
-
- // Return in user's memory.
public static final int DB_DBT_USERMEM;
+ public static final int DB_DIRECT;
+ public static final int DB_DIRECT_DB;
+ public static final int DB_DIRECT_LOG;
+ public static final int DB_DIRTY_READ;
+ public static final int DB_DUP;
+ public static final int DB_DUPSORT;
+ public static final int DB_EID_BROADCAST;
+ public static final int DB_EID_INVALID;
+ public static final int DB_ENCRYPT;
+ public static final int DB_ENCRYPT_AES;
+ public static final int DB_EXCL;
+ public static final int DB_FAST_STAT;
+ public static final int DB_FIRST;
+ public static final int DB_FLUSH;
+ public static final int DB_FORCE;
+ public static final int DB_GET_BOTH;
+ public static final int DB_GET_BOTH_RANGE;
+ public static final int DB_GET_RECNO;
+ public static final int DB_INIT_CDB;
+ public static final int DB_INIT_LOCK;
+ public static final int DB_INIT_LOG;
+ public static final int DB_INIT_MPOOL;
+ public static final int DB_INIT_TXN;
+ public static final int DB_JOINENV;
+ public static final int DB_JOIN_ITEM;
+ public static final int DB_JOIN_NOSORT;
+ public static final int DB_KEYFIRST;
+ public static final int DB_KEYLAST;
+ public static final int DB_LAST;
+ public static final int DB_LOCKDOWN;
+ public static final int DB_LOCK_DEFAULT;
+ public static final int DB_LOCK_EXPIRE;
+ public static final int DB_LOCK_GET;
+ public static final int DB_LOCK_GET_TIMEOUT;
+ public static final int DB_LOCK_IREAD;
+ public static final int DB_LOCK_IWR;
+ public static final int DB_LOCK_IWRITE;
+ public static final int DB_LOCK_MAXLOCKS;
+ public static final int DB_LOCK_MINLOCKS;
+ public static final int DB_LOCK_MINWRITE;
+ public static final int DB_LOCK_NOWAIT;
+ public static final int DB_LOCK_OLDEST;
+ public static final int DB_LOCK_PUT;
+ public static final int DB_LOCK_PUT_ALL;
+ public static final int DB_LOCK_PUT_OBJ;
+ public static final int DB_LOCK_RANDOM;
+ public static final int DB_LOCK_READ;
+ public static final int DB_LOCK_TIMEOUT;
+ public static final int DB_LOCK_WRITE;
+ public static final int DB_LOCK_YOUNGEST;
+ public static final int DB_MULTIPLE;
+ public static final int DB_MULTIPLE_KEY;
+ public static final int DB_NEXT;
+ public static final int DB_NEXT_DUP;
+ public static final int DB_NEXT_NODUP;
+ public static final int DB_NODUPDATA;
+ public static final int DB_NOLOCKING;
+ public static final int DB_NOMMAP;
+ public static final int DB_NOORDERCHK;
+ public static final int DB_NOOVERWRITE;
+ public static final int DB_NOPANIC;
+ public static final int DB_NOSYNC;
+ public static final int DB_ODDFILESIZE;
+ public static final int DB_ORDERCHKONLY;
+ public static final int DB_OVERWRITE;
+ public static final int DB_PANIC_ENVIRONMENT;
+ public static final int DB_POSITION;
+ public static final int DB_PREV;
+ public static final int DB_PREV_NODUP;
+ public static final int DB_PRINTABLE;
+ public static final int DB_PRIORITY_DEFAULT;
+ public static final int DB_PRIORITY_HIGH;
+ public static final int DB_PRIORITY_LOW;
+ public static final int DB_PRIORITY_VERY_HIGH;
+ public static final int DB_PRIORITY_VERY_LOW;
+ public static final int DB_PRIVATE;
+ public static final int DB_RDONLY;
+ public static final int DB_RECNUM;
+ public static final int DB_RECORDCOUNT;
+ public static final int DB_RECOVER;
+ public static final int DB_RECOVER_FATAL;
+ public static final int DB_REGION_INIT;
+ public static final int DB_RENUMBER;
+ public static final int DB_REP_CLIENT;
+ public static final int DB_REP_LOGSONLY;
+ public static final int DB_REP_MASTER;
+ public static final int DB_REP_PERMANENT;
+ public static final int DB_REP_UNAVAIL;
+ public static final int DB_REVSPLITOFF;
+ public static final int DB_RMW;
+ public static final int DB_SALVAGE;
+ public static final int DB_SET;
+ public static final int DB_SET_LOCK_TIMEOUT;
+ public static final int DB_SET_RANGE;
+ public static final int DB_SET_RECNO;
+ public static final int DB_SET_TXN_TIMEOUT;
+ public static final int DB_SNAPSHOT;
+ public static final int DB_STAT_CLEAR;
+ public static final int DB_SYSTEM_MEM;
+ public static final int DB_THREAD;
+ public static final int DB_TRUNCATE;
+ public static final int DB_TXN_NOSYNC;
+ public static final int DB_TXN_NOWAIT;
+ public static final int DB_TXN_SYNC;
+ public static final int DB_TXN_WRITE_NOSYNC;
+ public static final int DB_UPGRADE;
+ public static final int DB_USE_ENVIRON;
+ public static final int DB_USE_ENVIRON_ROOT;
+ public static final int DB_VERB_CHKPOINT;
+ public static final int DB_VERB_DEADLOCK;
+ public static final int DB_VERB_RECOVERY;
+ public static final int DB_VERB_REPLICATION;
+ public static final int DB_VERB_WAITSFOR;
+ public static final int DB_VERIFY;
+ public static final int DB_VERSION_MAJOR;
+ public static final int DB_VERSION_MINOR;
+ public static final int DB_VERSION_PATCH;
+ public static final int DB_WRITECURSOR;
+ public static final int DB_XA_CREATE;
+ public static final int DB_XIDDATASIZE;
+ public static final int DB_YIELDCPU;
+ // END-JAVA-SPECIAL-CONSTANTS
// Note: the env can be null
//
@@ -265,7 +217,7 @@ public class Db
dbenv_ = null;
_notify_internal();
}
-
+
private native void _init(DbEnv env, int flags)
throws DbException;
@@ -274,17 +226,32 @@ public class Db
// methods
//
+ public synchronized void associate(DbTxn txn, Db secondary,
+ DbSecondaryKeyCreate key_creator,
+ int flags)
+ throws DbException
+ {
+ secondary.secondary_key_create_ = key_creator;
+ _associate(txn, secondary, key_creator, flags);
+ }
+
+ public native void _associate(DbTxn txn, Db secondary,
+ DbSecondaryKeyCreate key_creator, int flags)
+ throws DbException;
+
public synchronized int close(int flags)
throws DbException
{
- int err;
-
- dbenv_._remove_db(this);
- err = _close(flags);
- if (constructor_env_ == null) {
- dbenv_._notify_db_close();
+ try {
+ dbenv_._remove_db(this);
+ return _close(flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
}
- return err;
}
public native int _close(int flags)
@@ -307,7 +274,10 @@ public class Db
protected void finalize()
throws Throwable
{
- _finalize(dbenv_.errcall_, dbenv_.errpfx_);
+ if (dbenv_ == null)
+ _finalize(null, null);
+ else
+ _finalize(dbenv_.errcall_, dbenv_.errpfx_);
}
protected native void _finalize(DbErrcall errcall, String errpfx)
@@ -324,36 +294,71 @@ public class Db
public native Dbc join(Dbc curslist[], int flags)
throws DbException;
- public native void key_range(DbTxn txn, Dbt key,
+ public native void key_range(DbTxn txnid, Dbt key,
DbKeyRange range, int flags)
throws DbException;
- public synchronized void open(String file, String database,
- /*DBTYPE*/ int type,
+ public synchronized void open(DbTxn txnid, String file,
+ String database, /*DBTYPE*/ int type,
int flags, int mode)
throws DbException, FileNotFoundException
{
- _open(file, database, type, flags, mode);
+ _open(txnid, file, database, type, flags, mode);
}
-
+
// (Internal)
- public native void _open(String file, String database,
- /*DBTYPE*/ int type,
+ public native void _open(DbTxn txnid, String file,
+ String database, /*DBTYPE*/ int type,
int flags, int mode)
throws DbException, FileNotFoundException;
-
+
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int pget(DbTxn txnid, Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
// returns: 0, DB_KEYEXIST, or throws error
public native int put(DbTxn txnid, Dbt key, Dbt data, int flags)
throws DbException;
- public synchronized native void rename(String file, String database,
- String newname, int flags)
- throws DbException, FileNotFoundException;
+ public synchronized void rename(String file, String database,
+ String newname, int flags)
+ throws DbException, FileNotFoundException
+ {
+ try {
+ _rename(file, database, newname, flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
+ }
+ }
- public synchronized native void remove(String file, String database,
- int flags)
- throws DbException, FileNotFoundException;
+ public native void _rename(String file, String database,
+ String newname, int flags)
+ throws DbException, FileNotFoundException;
+
+
+ public synchronized void remove(String file,
+ String database, int flags)
+ throws DbException, FileNotFoundException
+ {
+ try {
+ _remove(file, database, flags);
+ }
+ finally {
+ if (constructor_env_ == null) {
+ dbenv_._notify_db_close();
+ }
+ dbenv_ = null;
+ }
+ }
+
+ public native void _remove(String file, String database,
+ int flags)
+ throws DbException, FileNotFoundException;
// Comparison function.
public void set_append_recno(DbAppendRecno append_recno)
@@ -394,7 +399,7 @@ public class Db
bt_prefix_ = bt_prefix;
bt_prefix_changed(bt_prefix);
}
-
+
// (Internal)
private native void bt_prefix_changed(DbBtreePrefix bt_prefix)
throws DbException;
@@ -403,6 +408,10 @@ public class Db
public native void set_cachesize(int gbytes, int bytes, int ncaches)
throws DbException;
+ // Set cache priority
+ public native void set_cache_priority(/* DB_CACHE_PRIORITY */ int priority)
+ throws DbException;
+
// Duplication resolution
public void set_dup_compare(DbDupCompare dup_compare)
throws DbException
@@ -415,6 +424,10 @@ public class Db
private native void dup_compare_changed(DbDupCompare dup_compare)
throws DbException;
+ // Encryption
+ public native void set_encrypt(String passwd, /*u_int32_t*/ int flags)
+ throws DbException;
+
// Error message callback.
public void set_errcall(DbErrcall errcall)
{
@@ -435,7 +448,7 @@ public class Db
if (dbenv_ != null)
dbenv_.set_errpfx(errpfx);
}
-
+
// Feedback
public void set_feedback(DbFeedback feedback)
@@ -450,10 +463,16 @@ public class Db
throws DbException;
// Flags.
- public native void set_flags(/*u_int32_t*/ int flags);
+ public native void set_flags(/*u_int32_t*/ int flags)
+ throws DbException;
+
+ // Internal - only intended for testing purposes in the Java RPC server
+ public native int get_flags_raw()
+ throws DbException;
// Fill factor.
- public native void set_h_ffactor(/*unsigned*/ int h_ffactor);
+ public native void set_h_ffactor(/*unsigned*/ int h_ffactor)
+ throws DbException;
// Hash function.
public void set_h_hash(DbHash h_hash)
@@ -464,38 +483,49 @@ public class Db
}
// (Internal)
- private native void hash_changed(DbHash hash)
+ private native void hash_changed(DbHash hash)
throws DbException;
// Number of elements.
- public native void set_h_nelem(/*unsigned*/ int h_nelem);
+ public native void set_h_nelem(/*unsigned*/ int h_nelem)
+ throws DbException;
// Byte order.
- public native void set_lorder(int lorder);
+ public native void set_lorder(int lorder)
+ throws DbException;
// Underlying page size.
- public native void set_pagesize(/*size_t*/ long pagesize);
+ public native void set_pagesize(/*size_t*/ long pagesize)
+ throws DbException;
// Variable-length delimiting byte.
- public native void set_re_delim(int re_delim);
+ public native void set_re_delim(int re_delim)
+ throws DbException;
// Length for fixed-length records.
- public native void set_re_len(/*u_int32_t*/ int re_len);
+ public native void set_re_len(/*u_int32_t*/ int re_len)
+ throws DbException;
// Fixed-length padding byte.
- public native void set_re_pad(int re_pad);
+ public native void set_re_pad(int re_pad)
+ throws DbException;
// Source file name.
- public native void set_re_source(String re_source);
+ public native void set_re_source(String re_source)
+ throws DbException;
// Extent size of Queue
- public native void set_q_extentsize(/*u_int32_t*/ int extent_size);
+ public native void set_q_extentsize(/*u_int32_t*/ int extent_size)
+ throws DbException;
// returns a DbBtreeStat or DbHashStat
public native Object stat(int flags)
throws DbException;
- public native int sync(int flags)
+ public native void sync(int flags)
+ throws DbException;
+
+ public native int truncate(DbTxn txnid, int flags)
throws DbException;
public native void upgrade(String name, int flags)
@@ -519,6 +549,7 @@ public class Db
private DbBtreePrefix bt_prefix_ = null;
private DbDupCompare dup_compare_ = null;
private DbHash h_hash_ = null;
+ private DbSecondaryKeyCreate secondary_key_create_ = null;
////////////////////////////////////////////////////////////////
//
@@ -535,9 +566,13 @@ public class Db
// An alternate library name can be specified via a property.
//
- String overrideLibname = System.getProperty("sleepycat.db.libname");
- if (overrideLibname != null) {
- System.loadLibrary(overrideLibname);
+ String override;
+
+ if ((override = System.getProperty("sleepycat.db.libfile")) != null) {
+ System.load(override);
+ }
+ else if ((override = System.getProperty("sleepycat.db.libname")) != null) {
+ System.loadLibrary(override);
}
else {
String os = System.getProperty("os.name");
@@ -566,6 +601,7 @@ public class Db
{
if (c1 != c2) {
System.err.println("Db: constant mismatch");
+ Thread.dumpStack();
System.exit(1);
}
}
@@ -573,138 +609,153 @@ public class Db
static {
Db.load_db();
- // Note: constant values are stored in DbConstants, which
- // is automatically generated. Initializing constants in
- // static code insulates users from the possibility of
- // changing constants.
- //
- DB_CXX_NO_EXCEPTIONS = DbConstants.DB_CXX_NO_EXCEPTIONS;
- DB_CLIENT = DbConstants.DB_CLIENT;
- DB_XA_CREATE = DbConstants.DB_XA_CREATE;
-
- DB_CREATE = DbConstants.DB_CREATE;
- DB_NOMMAP = DbConstants.DB_NOMMAP;
- DB_THREAD = DbConstants.DB_THREAD;
-
- DB_LOCKDOWN = DbConstants.DB_LOCKDOWN;
- DB_PRIVATE = DbConstants.DB_PRIVATE;
- DB_TXN_NOWAIT = DbConstants.DB_TXN_NOWAIT;
- DB_TXN_SYNC = DbConstants.DB_TXN_SYNC;
- DB_CDB_ALLDB = DbConstants.DB_CDB_ALLDB;
-
- DB_EXCL = DbConstants.DB_EXCL;
- DB_RDONLY = DbConstants.DB_RDONLY;
- DB_TRUNCATE = DbConstants.DB_TRUNCATE;
- DB_UPGRADE = DbConstants.DB_UPGRADE;
-
- // These constants are not assigned, but rather checked.
- // Having initialized constants for these values allows
- // them to be used as case values in switch statements.
- //
- check_constant(DB_INCOMPLETE, DbConstants.DB_INCOMPLETE);
- check_constant(DB_KEYEMPTY, DbConstants.DB_KEYEMPTY);
- check_constant(DB_KEYEXIST, DbConstants.DB_KEYEXIST);
- check_constant(DB_LOCK_DEADLOCK, DbConstants.DB_LOCK_DEADLOCK);
- check_constant(DB_LOCK_NOTGRANTED, DbConstants.DB_LOCK_NOTGRANTED);
- check_constant(DB_NOSERVER, DbConstants.DB_NOSERVER);
- check_constant(DB_NOSERVER_HOME, DbConstants.DB_NOSERVER_HOME);
- check_constant(DB_NOSERVER_ID, DbConstants.DB_NOSERVER_ID);
- check_constant(DB_NOTFOUND, DbConstants.DB_NOTFOUND);
- check_constant(DB_OLD_VERSION, DbConstants.DB_OLD_VERSION);
- check_constant(DB_RUNRECOVERY, DbConstants.DB_RUNRECOVERY);
- check_constant(DB_VERIFY_BAD, DbConstants.DB_VERIFY_BAD);
- check_constant(DB_TXN_BACKWARD_ROLL, DbConstants.DB_TXN_BACKWARD_ROLL);
- check_constant(DB_TXN_FORWARD_ROLL, DbConstants.DB_TXN_FORWARD_ROLL);
- check_constant(DB_TXN_OPENFILES, DbConstants.DB_TXN_OPENFILES);
- check_constant(DB_TXN_REDO, DbConstants.DB_TXN_REDO);
- check_constant(DB_TXN_UNDO, DbConstants.DB_TXN_UNDO);
-
- DB_FORCE = DbConstants.DB_FORCE;
- DB_INIT_CDB = DbConstants.DB_INIT_CDB;
- DB_INIT_LOCK = DbConstants.DB_INIT_LOCK;
- DB_INIT_LOG = DbConstants.DB_INIT_LOG;
- DB_INIT_MPOOL = DbConstants.DB_INIT_MPOOL;
- DB_INIT_TXN = DbConstants.DB_INIT_TXN;
- DB_JOINENV = DbConstants.DB_JOINENV;
- DB_RECOVER = DbConstants.DB_RECOVER;
- DB_RECOVER_FATAL = DbConstants.DB_RECOVER_FATAL;
- DB_SYSTEM_MEM = DbConstants.DB_SYSTEM_MEM;
- DB_TXN_NOSYNC = DbConstants.DB_TXN_NOSYNC;
- DB_USE_ENVIRON = DbConstants.DB_USE_ENVIRON;
- DB_USE_ENVIRON_ROOT = DbConstants.DB_USE_ENVIRON_ROOT;
-
- DB_VERB_CHKPOINT = DbConstants.DB_VERB_CHKPOINT;
- DB_VERB_DEADLOCK = DbConstants.DB_VERB_DEADLOCK;
- DB_VERB_RECOVERY = DbConstants.DB_VERB_RECOVERY;
- DB_VERB_WAITSFOR = DbConstants.DB_VERB_WAITSFOR;
-
- DB_LOCK_NORUN = DbConstants.DB_LOCK_NORUN;
- DB_LOCK_DEFAULT = DbConstants.DB_LOCK_DEFAULT;
- DB_LOCK_OLDEST = DbConstants.DB_LOCK_OLDEST;
- DB_LOCK_RANDOM = DbConstants.DB_LOCK_RANDOM;
- DB_LOCK_YOUNGEST = DbConstants.DB_LOCK_YOUNGEST;
-
- DB_DUP = DbConstants.DB_DUP;
- DB_DUPSORT = DbConstants.DB_DUPSORT;
- DB_RECNUM = DbConstants.DB_RECNUM;
- DB_RENUMBER = DbConstants.DB_RENUMBER;
- DB_REVSPLITOFF = DbConstants.DB_REVSPLITOFF;
- DB_SNAPSHOT = DbConstants.DB_SNAPSHOT;
-
- DB_JOIN_NOSORT = DbConstants.DB_JOIN_NOSORT;
-
- DB_NOORDERCHK = DbConstants.DB_NOORDERCHK;
- DB_ORDERCHKONLY = DbConstants.DB_ORDERCHKONLY;
- DB_SALVAGE = DbConstants.DB_SALVAGE;
+ // BEGIN-JAVA-CONSTANT-INITIALIZATION
+ /* DO NOT EDIT: automatically built by dist/s_java. */
+ DB_AFTER = DbConstants.DB_AFTER;
DB_AGGRESSIVE = DbConstants.DB_AGGRESSIVE;
-
- DB_LOCK_NOWAIT = DbConstants.DB_LOCK_NOWAIT;
- DB_LOCK_CONFLICT = DbConstants.DB_LOCK_CONFLICT;
-
+ DB_APPEND = DbConstants.DB_APPEND;
DB_ARCH_ABS = DbConstants.DB_ARCH_ABS;
DB_ARCH_DATA = DbConstants.DB_ARCH_DATA;
DB_ARCH_LOG = DbConstants.DB_ARCH_LOG;
-
- DB_AFTER = DbConstants.DB_AFTER;
- DB_APPEND = DbConstants.DB_APPEND;
+ DB_AUTO_COMMIT = DbConstants.DB_AUTO_COMMIT;
DB_BEFORE = DbConstants.DB_BEFORE;
DB_CACHED_COUNTS = DbConstants.DB_CACHED_COUNTS;
- DB_CHECKPOINT = DbConstants.DB_CHECKPOINT;
+ DB_CDB_ALLDB = DbConstants.DB_CDB_ALLDB;
+ DB_CHKSUM_SHA1 = DbConstants.DB_CHKSUM_SHA1;
+ DB_CLIENT = DbConstants.DB_CLIENT;
DB_CONSUME = DbConstants.DB_CONSUME;
DB_CONSUME_WAIT = DbConstants.DB_CONSUME_WAIT;
- DB_CURLSN = DbConstants.DB_CURLSN;
+ DB_CREATE = DbConstants.DB_CREATE;
DB_CURRENT = DbConstants.DB_CURRENT;
+ DB_CXX_NO_EXCEPTIONS = DbConstants.DB_CXX_NO_EXCEPTIONS;
+ DB_DBT_MALLOC = DbConstants.DB_DBT_MALLOC;
+ DB_DBT_PARTIAL = DbConstants.DB_DBT_PARTIAL;
+ DB_DBT_REALLOC = DbConstants.DB_DBT_REALLOC;
+ DB_DBT_USERMEM = DbConstants.DB_DBT_USERMEM;
+ DB_DIRECT = DbConstants.DB_DIRECT;
+ DB_DIRECT_DB = DbConstants.DB_DIRECT_DB;
+ DB_DIRECT_LOG = DbConstants.DB_DIRECT_LOG;
+ DB_DIRTY_READ = DbConstants.DB_DIRTY_READ;
+ DB_DUP = DbConstants.DB_DUP;
+ DB_DUPSORT = DbConstants.DB_DUPSORT;
+ DB_EID_BROADCAST = DbConstants.DB_EID_BROADCAST;
+ DB_EID_INVALID = DbConstants.DB_EID_INVALID;
+ DB_ENCRYPT = DbConstants.DB_ENCRYPT;
+ DB_ENCRYPT_AES = DbConstants.DB_ENCRYPT_AES;
+ DB_EXCL = DbConstants.DB_EXCL;
+ DB_FAST_STAT = DbConstants.DB_FAST_STAT;
DB_FIRST = DbConstants.DB_FIRST;
DB_FLUSH = DbConstants.DB_FLUSH;
+ DB_FORCE = DbConstants.DB_FORCE;
DB_GET_BOTH = DbConstants.DB_GET_BOTH;
+ DB_GET_BOTH_RANGE = DbConstants.DB_GET_BOTH_RANGE;
DB_GET_RECNO = DbConstants.DB_GET_RECNO;
+ DB_INIT_CDB = DbConstants.DB_INIT_CDB;
+ DB_INIT_LOCK = DbConstants.DB_INIT_LOCK;
+ DB_INIT_LOG = DbConstants.DB_INIT_LOG;
+ DB_INIT_MPOOL = DbConstants.DB_INIT_MPOOL;
+ DB_INIT_TXN = DbConstants.DB_INIT_TXN;
+ DB_JOINENV = DbConstants.DB_JOINENV;
DB_JOIN_ITEM = DbConstants.DB_JOIN_ITEM;
+ DB_JOIN_NOSORT = DbConstants.DB_JOIN_NOSORT;
DB_KEYFIRST = DbConstants.DB_KEYFIRST;
DB_KEYLAST = DbConstants.DB_KEYLAST;
DB_LAST = DbConstants.DB_LAST;
+ DB_LOCKDOWN = DbConstants.DB_LOCKDOWN;
+ DB_LOCK_DEFAULT = DbConstants.DB_LOCK_DEFAULT;
+ DB_LOCK_EXPIRE = DbConstants.DB_LOCK_EXPIRE;
+ DB_LOCK_GET = DbConstants.DB_LOCK_GET;
+ DB_LOCK_GET_TIMEOUT = DbConstants.DB_LOCK_GET_TIMEOUT;
+ DB_LOCK_IREAD = DbConstants.DB_LOCK_IREAD;
+ DB_LOCK_IWR = DbConstants.DB_LOCK_IWR;
+ DB_LOCK_IWRITE = DbConstants.DB_LOCK_IWRITE;
+ DB_LOCK_MAXLOCKS = DbConstants.DB_LOCK_MAXLOCKS;
+ DB_LOCK_MINLOCKS = DbConstants.DB_LOCK_MINLOCKS;
+ DB_LOCK_MINWRITE = DbConstants.DB_LOCK_MINWRITE;
+ DB_LOCK_NOWAIT = DbConstants.DB_LOCK_NOWAIT;
+ DB_LOCK_OLDEST = DbConstants.DB_LOCK_OLDEST;
+ DB_LOCK_PUT = DbConstants.DB_LOCK_PUT;
+ DB_LOCK_PUT_ALL = DbConstants.DB_LOCK_PUT_ALL;
+ DB_LOCK_PUT_OBJ = DbConstants.DB_LOCK_PUT_OBJ;
+ DB_LOCK_RANDOM = DbConstants.DB_LOCK_RANDOM;
+ DB_LOCK_READ = DbConstants.DB_LOCK_READ;
+ DB_LOCK_TIMEOUT = DbConstants.DB_LOCK_TIMEOUT;
+ DB_LOCK_WRITE = DbConstants.DB_LOCK_WRITE;
+ DB_LOCK_YOUNGEST = DbConstants.DB_LOCK_YOUNGEST;
+ DB_MULTIPLE = DbConstants.DB_MULTIPLE;
+ DB_MULTIPLE_KEY = DbConstants.DB_MULTIPLE_KEY;
DB_NEXT = DbConstants.DB_NEXT;
DB_NEXT_DUP = DbConstants.DB_NEXT_DUP;
DB_NEXT_NODUP = DbConstants.DB_NEXT_NODUP;
DB_NODUPDATA = DbConstants.DB_NODUPDATA;
+ DB_NOLOCKING = DbConstants.DB_NOLOCKING;
+ DB_NOMMAP = DbConstants.DB_NOMMAP;
+ DB_NOORDERCHK = DbConstants.DB_NOORDERCHK;
DB_NOOVERWRITE = DbConstants.DB_NOOVERWRITE;
+ DB_NOPANIC = DbConstants.DB_NOPANIC;
DB_NOSYNC = DbConstants.DB_NOSYNC;
+ DB_ODDFILESIZE = DbConstants.DB_ODDFILESIZE;
+ DB_ORDERCHKONLY = DbConstants.DB_ORDERCHKONLY;
+ DB_OVERWRITE = DbConstants.DB_OVERWRITE;
+ DB_PANIC_ENVIRONMENT = DbConstants.DB_PANIC_ENVIRONMENT;
DB_POSITION = DbConstants.DB_POSITION;
DB_PREV = DbConstants.DB_PREV;
DB_PREV_NODUP = DbConstants.DB_PREV_NODUP;
+ DB_PRINTABLE = DbConstants.DB_PRINTABLE;
+ DB_PRIORITY_DEFAULT = DbConstants.DB_PRIORITY_DEFAULT;
+ DB_PRIORITY_HIGH = DbConstants.DB_PRIORITY_HIGH;
+ DB_PRIORITY_LOW = DbConstants.DB_PRIORITY_LOW;
+ DB_PRIORITY_VERY_HIGH = DbConstants.DB_PRIORITY_VERY_HIGH;
+ DB_PRIORITY_VERY_LOW = DbConstants.DB_PRIORITY_VERY_LOW;
+ DB_PRIVATE = DbConstants.DB_PRIVATE;
+ DB_RDONLY = DbConstants.DB_RDONLY;
+ DB_RECNUM = DbConstants.DB_RECNUM;
DB_RECORDCOUNT = DbConstants.DB_RECORDCOUNT;
+ DB_RECOVER = DbConstants.DB_RECOVER;
+ DB_RECOVER_FATAL = DbConstants.DB_RECOVER_FATAL;
+ DB_REGION_INIT = DbConstants.DB_REGION_INIT;
+ DB_RENUMBER = DbConstants.DB_RENUMBER;
+ DB_REP_CLIENT = DbConstants.DB_REP_CLIENT;
+ DB_REP_LOGSONLY = DbConstants.DB_REP_LOGSONLY;
+ DB_REP_MASTER = DbConstants.DB_REP_MASTER;
+ DB_REP_PERMANENT = DbConstants.DB_REP_PERMANENT;
+ DB_REP_UNAVAIL = DbConstants.DB_REP_UNAVAIL;
+ DB_REVSPLITOFF = DbConstants.DB_REVSPLITOFF;
DB_RMW = DbConstants.DB_RMW;
+ DB_SALVAGE = DbConstants.DB_SALVAGE;
DB_SET = DbConstants.DB_SET;
+ DB_SET_LOCK_TIMEOUT = DbConstants.DB_SET_LOCK_TIMEOUT;
DB_SET_RANGE = DbConstants.DB_SET_RANGE;
DB_SET_RECNO = DbConstants.DB_SET_RECNO;
+ DB_SET_TXN_TIMEOUT = DbConstants.DB_SET_TXN_TIMEOUT;
+ DB_SNAPSHOT = DbConstants.DB_SNAPSHOT;
+ DB_STAT_CLEAR = DbConstants.DB_STAT_CLEAR;
+ DB_SYSTEM_MEM = DbConstants.DB_SYSTEM_MEM;
+ DB_THREAD = DbConstants.DB_THREAD;
+ DB_TRUNCATE = DbConstants.DB_TRUNCATE;
+ DB_TXN_NOSYNC = DbConstants.DB_TXN_NOSYNC;
+ DB_TXN_NOWAIT = DbConstants.DB_TXN_NOWAIT;
+ DB_TXN_SYNC = DbConstants.DB_TXN_SYNC;
+ DB_TXN_WRITE_NOSYNC = DbConstants.DB_TXN_WRITE_NOSYNC;
+ DB_UPGRADE = DbConstants.DB_UPGRADE;
+ DB_USE_ENVIRON = DbConstants.DB_USE_ENVIRON;
+ DB_USE_ENVIRON_ROOT = DbConstants.DB_USE_ENVIRON_ROOT;
+ DB_VERB_CHKPOINT = DbConstants.DB_VERB_CHKPOINT;
+ DB_VERB_DEADLOCK = DbConstants.DB_VERB_DEADLOCK;
+ DB_VERB_RECOVERY = DbConstants.DB_VERB_RECOVERY;
+ DB_VERB_REPLICATION = DbConstants.DB_VERB_REPLICATION;
+ DB_VERB_WAITSFOR = DbConstants.DB_VERB_WAITSFOR;
+ DB_VERIFY = DbConstants.DB_VERIFY;
+ DB_VERSION_MAJOR = DbConstants.DB_VERSION_MAJOR;
+ DB_VERSION_MINOR = DbConstants.DB_VERSION_MINOR;
+ DB_VERSION_PATCH = DbConstants.DB_VERSION_PATCH;
DB_WRITECURSOR = DbConstants.DB_WRITECURSOR;
-
- DB_DBT_MALLOC = DbConstants.DB_DBT_MALLOC;
- DB_DBT_PARTIAL = DbConstants.DB_DBT_PARTIAL;
- DB_DBT_REALLOC = DbConstants.DB_DBT_REALLOC;
- DB_DBT_USERMEM = DbConstants.DB_DBT_USERMEM;
+ DB_XA_CREATE = DbConstants.DB_XA_CREATE;
+ DB_XIDDATASIZE = DbConstants.DB_XIDDATASIZE;
+ DB_YIELDCPU = DbConstants.DB_YIELDCPU;
+ // END-JAVA-CONSTANT-INITIALIZATION
one_time_init();
}
}
-
// end of Db.java
diff --git a/bdb/java/src/com/sleepycat/db/DbAppDispatch.java b/bdb/java/src/com/sleepycat/db/DbAppDispatch.java
new file mode 100644
index 00000000000..de72771f3e9
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbAppDispatch.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbAppDispatch.java,v 11.6 2002/02/26 16:23:02 krinsky Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_app_dispatch()
+ *
+ */
+public interface DbAppDispatch
+{
+ // The value of recops is one of the Db.DB_TXN_* constants
+ public abstract int app_dispatch(DbEnv env, Dbt dbt, DbLsn lsn, int recops);
+}
+
+// end of DbAppDispatch.java
diff --git a/bdb/java/src/com/sleepycat/db/DbAppendRecno.java b/bdb/java/src/com/sleepycat/db/DbAppendRecno.java
index ffe40e95f9e..c9d205ec74d 100644
--- a/bdb/java/src/com/sleepycat/db/DbAppendRecno.java
+++ b/bdb/java/src/com/sleepycat/db/DbAppendRecno.java
@@ -1,17 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbAppendRecno.java,v 11.1 2000/07/31 20:28:30 dda Exp $
+ * $Id: DbAppendRecno.java,v 11.5 2002/01/11 15:52:33 bostic Exp $
*/
package com.sleepycat.db;
/*
* This interface is used by Db.set_append_recno()
- *
+ *
*/
public interface DbAppendRecno
{
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java b/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java
index 2e5306af232..d738998cfb5 100644
--- a/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java
+++ b/bdb/java/src/com/sleepycat/db/DbBtreeCompare.java
@@ -1,17 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbBtreeCompare.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ * $Id: DbBtreeCompare.java,v 11.6 2002/01/11 15:52:33 bostic Exp $
*/
package com.sleepycat.db;
/*
* This interface is used by DbEnv.set_bt_compare()
- *
+ *
*/
public interface DbBtreeCompare
{
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java b/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java
index 27e63054339..4f18d8feb11 100644
--- a/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java
+++ b/bdb/java/src/com/sleepycat/db/DbBtreePrefix.java
@@ -1,17 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbBtreePrefix.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ * $Id: DbBtreePrefix.java,v 11.6 2002/01/11 15:52:33 bostic Exp $
*/
package com.sleepycat.db;
/*
* This interface is used by DbEnv.set_bt_prefix()
- *
+ *
*/
public interface DbBtreePrefix
{
diff --git a/bdb/java/src/com/sleepycat/db/DbBtreeStat.java b/bdb/java/src/com/sleepycat/db/DbBtreeStat.java
index 8dea8da107c..669afcffc88 100644
--- a/bdb/java/src/com/sleepycat/db/DbBtreeStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbBtreeStat.java
@@ -1,40 +1,28 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbBtreeStat.java,v 11.5 2000/05/04 02:54:55 dda Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * Db.stat() method.
- */
public class DbBtreeStat
{
- public int bt_magic; // Magic number.
- public int bt_version; // Version number.
- public int bt_metaflags; // Meta-data flags.
- public int bt_nkeys; // Number of unique keys.
- public int bt_ndata; // Number of data items.
- public int bt_pagesize; // Page size.
- public int bt_maxkey; // Maxkey value.
- public int bt_minkey; // Minkey value.
- public int bt_re_len; // Fixed-length record length.
- public int bt_re_pad; // Fixed-length record pad.
- public int bt_levels; // Tree levels.
- public int bt_int_pg; // Internal pages.
- public int bt_leaf_pg; // Leaf pages.
- public int bt_dup_pg; // Duplicate pages.
- public int bt_over_pg; // Overflow pages.
- public int bt_free; // Pages on the free list.
- public int bt_int_pgfree; // Bytes free in internal pages.
- public int bt_leaf_pgfree; // Bytes free in leaf pages.
- public int bt_dup_pgfree; // Bytes free in duplicate pages.
- public int bt_over_pgfree; // Bytes free in overflow pages.
+ public int bt_magic;
+ public int bt_version;
+ public int bt_metaflags;
+ public int bt_nkeys;
+ public int bt_ndata;
+ public int bt_pagesize;
+ public int bt_maxkey;
+ public int bt_minkey;
+ public int bt_re_len;
+ public int bt_re_pad;
+ public int bt_levels;
+ public int bt_int_pg;
+ public int bt_leaf_pg;
+ public int bt_dup_pg;
+ public int bt_over_pg;
+ public int bt_free;
+ public int bt_int_pgfree;
+ public int bt_leaf_pgfree;
+ public int bt_dup_pgfree;
+ public int bt_over_pgfree;
}
-
// end of DbBtreeStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbClient.java b/bdb/java/src/com/sleepycat/db/DbClient.java
new file mode 100644
index 00000000000..3b9b349c841
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbClient.java
@@ -0,0 +1,21 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbClient.java,v 11.4 2002/01/11 15:52:33 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This interface is used by DbEnv.set_rpc_server().
+ * It is a placeholder for a future capability.
+ *
+ */
+public interface DbClient
+{
+}
+
+// end of DbClient.java
diff --git a/bdb/java/src/com/sleepycat/db/DbDeadlockException.java b/bdb/java/src/com/sleepycat/db/DbDeadlockException.java
index beab2ad62fa..2540e22a782 100644
--- a/bdb/java/src/com/sleepycat/db/DbDeadlockException.java
+++ b/bdb/java/src/com/sleepycat/db/DbDeadlockException.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbDeadlockException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbDeadlockException.java,v 11.6 2002/01/11 15:52:33 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbDupCompare.java b/bdb/java/src/com/sleepycat/db/DbDupCompare.java
index 3d4b5a736f8..b014c76594c 100644
--- a/bdb/java/src/com/sleepycat/db/DbDupCompare.java
+++ b/bdb/java/src/com/sleepycat/db/DbDupCompare.java
@@ -1,17 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbDupCompare.java,v 11.2 2000/07/04 20:53:19 dda Exp $
+ * $Id: DbDupCompare.java,v 11.6 2002/01/11 15:52:34 bostic Exp $
*/
package com.sleepycat.db;
/*
* This interface is used by DbEnv.set_dup_compare()
- *
+ *
*/
public interface DbDupCompare
{
diff --git a/bdb/java/src/com/sleepycat/db/DbEnv.java b/bdb/java/src/com/sleepycat/db/DbEnv.java
index 6e9ce7ae337..f5a2e25f61b 100644
--- a/bdb/java/src/com/sleepycat/db/DbEnv.java
+++ b/bdb/java/src/com/sleepycat/db/DbEnv.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbEnv.java,v 11.25 2001/01/04 14:23:30 dda Exp $
+ * $Id: DbEnv.java,v 11.58 2002/08/29 14:22:22 margo Exp $
*/
package com.sleepycat.db;
@@ -29,7 +29,7 @@ public class DbEnv
// the set_* access methods below, and finally open
// the environment by calling open().
//
- public DbEnv(int flags)
+ public DbEnv(int flags) throws DbException
{
constructor_flags_ = flags;
_init(errstream_, constructor_flags_);
@@ -62,7 +62,7 @@ public class DbEnv
{
dblist_.addElement(db);
}
-
+
//
// Remove from the private list of Db's.
//
@@ -70,7 +70,7 @@ public class DbEnv
{
dblist_.removeElement(db);
}
-
+
//
// Iterate all the Db's in the list, and
// notify them that the environment is closing,
@@ -85,7 +85,7 @@ public class DbEnv
}
dblist_.removeAllElements();
}
-
+
// close discards any internal memory.
// After using close, the DbEnv can be reopened.
//
@@ -100,6 +100,14 @@ public class DbEnv
private native void _close(int flags)
throws DbException;
+ public native void dbremove(DbTxn txn, String name, String subdb,
+ int flags)
+ throws DbException;
+
+ public native void dbrename(DbTxn txn, String name, String subdb,
+ String newname, int flags)
+ throws DbException;
+
public native void err(int errcode, String message);
public native void errx(String message);
@@ -117,9 +125,11 @@ public class DbEnv
throws Throwable;
// (Internal)
- private native void _init(DbErrcall errcall, int flags);
+ // called during constructor
+ private native void _init(DbErrcall errcall, int flags) throws DbException;
// (Internal)
+ // called when DbEnv is constructed as part of Db constructor.
private native void _init_using_db(DbErrcall errcall, Db db);
/*package*/ native void _notify_db_close();
@@ -144,6 +154,10 @@ public class DbEnv
public native void set_cachesize(int gbytes, int bytes, int ncaches)
throws DbException;
+ // Encryption
+ public native void set_encrypt(String passwd, /*u_int32_t*/ int flags)
+ throws DbException;
+
// Error message callback.
public void set_errcall(DbErrcall errcall)
{
@@ -170,7 +184,7 @@ public class DbEnv
private native void _set_errpfx(String errpfx);
// Feedback
- public void set_feedback(DbFeedback feedback)
+ public void set_feedback(DbEnvFeedback feedback)
throws DbException
{
feedback_ = feedback;
@@ -178,18 +192,18 @@ public class DbEnv
}
// (Internal)
- private native void feedback_changed(DbFeedback feedback)
+ private native void feedback_changed(DbEnvFeedback feedback)
throws DbException;
// Generate debugging messages.
- public native void set_verbose(int which, int onoff)
+ public native void set_verbose(int which, boolean onoff)
throws DbException;
public native void set_data_dir(String data_dir)
throws DbException;
// Log buffer size.
- public native void set_lg_bsize(/*u_int32_t*/ int lg_max)
+ public native void set_lg_bsize(/*u_int32_t*/ int lg_bsize)
throws DbException;
// Log directory.
@@ -200,6 +214,10 @@ public class DbEnv
public native void set_lg_max(/*u_int32_t*/ int lg_max)
throws DbException;
+ // Log region size.
+ public native void set_lg_regionmax(/*u_int32_t*/ int lg_regionmax)
+ throws DbException;
+
// Two dimensional conflict matrix.
public native void set_lk_conflicts(byte[][] lk_conflicts)
throws DbException;
@@ -231,55 +249,51 @@ public class DbEnv
public native void set_mp_mmapsize(/*size_t*/ long mmapsize)
throws DbException;
- public native void set_mutexlocks(int mutexlocks)
+ public native void set_flags(int flags, boolean onoff)
throws DbException;
- public native static void set_pageyield(int pageyield)
- throws DbException;
+ public native void set_rep_limit(int gbytes, int bytes) throws DbException;
- public native static void set_panicstate(int panicstate)
- throws DbException;
-
- public void set_recovery_init(DbRecoveryInit recovery_init)
+ public void set_rep_transport(int envid, DbRepTransport transport)
throws DbException
{
- recovery_init_ = recovery_init;
- recovery_init_changed(recovery_init);
+ rep_transport_ = transport;
+ rep_transport_changed(envid, transport);
}
// (Internal)
- private native void recovery_init_changed(DbRecoveryInit recovery_init)
+ private native void rep_transport_changed(int envid,
+ DbRepTransport transport)
throws DbException;
- public native static void set_region_init(int region_init)
- throws DbException;
-
- public native void set_flags(int flags, int onoff)
- throws DbException;
-
- public native void set_server(String host, long cl_timeout,
- long sv_timeout, int flags)
+ public native void set_rpc_server(DbClient client, String host,
+ long cl_timeout, long sv_timeout,
+ int flags)
throws DbException;
public native void set_shm_key(long shm_key)
throws DbException;
- public native static void set_tas_spins(int tas_spins)
+ public native void set_tas_spins(int tas_spins)
throws DbException;
+ public native void set_timeout(/*db_timeout_t*/ long timeout,
+ /*u_int32_t*/ int flags)
+ throws DbException;
+
public native void set_tmp_dir(String tmp_dir)
throws DbException;
// Feedback
- public void set_tx_recover(DbTxnRecover tx_recover)
+ public void set_app_dispatch(DbAppDispatch app_dispatch)
throws DbException
{
- tx_recover_ = tx_recover;
- tx_recover_changed(tx_recover);
+ app_dispatch_ = app_dispatch;
+ app_dispatch_changed(app_dispatch);
}
// (Internal)
- private native void tx_recover_changed(DbTxnRecover tx_recover)
+ private native void app_dispatch_changed(DbAppDispatch app_dispatch)
throws DbException;
// Maximum number of transactions.
@@ -316,55 +330,80 @@ public class DbEnv
/*db_lockmode_t*/ int lock_mode)
throws DbException;
+ public native void lock_put(DbLock lock)
+ throws DbException;
+
public native /*u_int32_t*/ int lock_id()
throws DbException;
- public native DbLockStat lock_stat()
+ public native void lock_id_free(/*u_int32_t*/ int id)
+ throws DbException;
+
+ public native DbLockStat lock_stat(/*u_int32_t*/ int flags)
throws DbException;
+ public native void lock_vec(/*u_int32_t*/ int locker,
+ int flags,
+ DbLockRequest[] list,
+ int offset,
+ int count)
+ throws DbException;
+
public native String[] log_archive(int flags)
throws DbException;
public native static int log_compare(DbLsn lsn0, DbLsn lsn1);
+ public native DbLogc log_cursor(int flags)
+ throws DbException;
+
public native String log_file(DbLsn lsn)
throws DbException;
public native void log_flush(DbLsn lsn)
throws DbException;
- public native void log_get(DbLsn lsn, Dbt data, int flags)
+ public native void log_put(DbLsn lsn, Dbt data, int flags)
throws DbException;
- public native void log_put(DbLsn lsn, Dbt data, int flags)
+ public native DbLogStat log_stat(/*u_int32_t*/ int flags)
throws DbException;
- public native DbLogStat log_stat()
+ public native DbMpoolStat memp_stat(/*u_int32_t*/ int flags)
throws DbException;
- public native void log_register(Db dbp, String name)
+ public native DbMpoolFStat[] memp_fstat(/*u_int32_t*/ int flags)
throws DbException;
- public native void log_unregister(Db dbp)
+ public native int memp_trickle(int pct)
throws DbException;
- public native DbMpoolStat memp_stat()
+ public native int rep_elect(int nsites, int pri, int timeout)
throws DbException;
- public native DbMpoolFStat[] memp_fstat()
+ public static class RepProcessMessage {
+ public int envid;
+ }
+ public native int rep_process_message(Dbt control, Dbt rec,
+ RepProcessMessage result)
throws DbException;
- public native int memp_trickle(int pct)
+ public native void rep_start(Dbt cookie, int flags)
+ throws DbException;
+
+ public native DbRepStat rep_stat(/*u_int32_t*/ int flags)
throws DbException;
public native DbTxn txn_begin(DbTxn pid, int flags)
throws DbException;
- public native int txn_checkpoint(int kbyte, int min, int flags)
+ public native void txn_checkpoint(int kbyte, int min, int flags)
throws DbException;
+ public native DbPreplist[] txn_recover(int count, int flags)
+ throws DbException;
- public native DbTxnStat txn_stat()
+ public native DbTxnStat txn_stat(/*u_int32_t*/ int flags)
throws DbException;
////////////////////////////////////////////////////////////////
@@ -375,9 +414,9 @@ public class DbEnv
private long private_info_ = 0;
private int constructor_flags_ = 0;
private Vector dblist_ = new Vector(); // Db's that are open
- private DbFeedback feedback_ = null;
- private DbRecoveryInit recovery_init_ = null;
- private DbTxnRecover tx_recover_ = null;
+ private DbEnvFeedback feedback_ = null;
+ private DbRepTransport rep_transport_ = null;
+ private DbAppDispatch app_dispatch_ = null;
private DbOutputStreamErrcall errstream_ =
new DbOutputStreamErrcall(System.err);
/*package*/ DbErrcall errcall_ = errstream_;
@@ -386,7 +425,6 @@ public class DbEnv
static {
Db.load_db();
}
-
}
// end of DbEnv.java
diff --git a/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java b/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java
index 9eec2b819f6..feef750f186 100644
--- a/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java
+++ b/bdb/java/src/com/sleepycat/db/DbEnvFeedback.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbEnvFeedback.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbEnvFeedback.java,v 11.6 2002/01/11 15:52:34 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbErrcall.java b/bdb/java/src/com/sleepycat/db/DbErrcall.java
index 62d3a3e08b3..4db12772ccd 100644
--- a/bdb/java/src/com/sleepycat/db/DbErrcall.java
+++ b/bdb/java/src/com/sleepycat/db/DbErrcall.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbErrcall.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbErrcall.java,v 11.6 2002/01/11 15:52:35 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbException.java b/bdb/java/src/com/sleepycat/db/DbException.java
index ed4d020b677..132ea868f14 100644
--- a/bdb/java/src/com/sleepycat/db/DbException.java
+++ b/bdb/java/src/com/sleepycat/db/DbException.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbException.java,v 11.4 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbException.java,v 11.7 2002/01/11 15:52:35 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbFeedback.java b/bdb/java/src/com/sleepycat/db/DbFeedback.java
index d932d951a6f..38d7346e03e 100644
--- a/bdb/java/src/com/sleepycat/db/DbFeedback.java
+++ b/bdb/java/src/com/sleepycat/db/DbFeedback.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbFeedback.java,v 11.4 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbFeedback.java,v 11.7 2002/01/11 15:52:35 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbHash.java b/bdb/java/src/com/sleepycat/db/DbHash.java
index a72c2070b59..22781a11929 100644
--- a/bdb/java/src/com/sleepycat/db/DbHash.java
+++ b/bdb/java/src/com/sleepycat/db/DbHash.java
@@ -1,17 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbHash.java,v 11.1 2000/07/04 20:53:19 dda Exp $
+ * $Id: DbHash.java,v 11.5 2002/01/11 15:52:36 bostic Exp $
*/
package com.sleepycat.db;
/*
* This interface is used by DbEnv.set_bt_compare()
- *
+ *
*/
public interface DbHash
{
diff --git a/bdb/java/src/com/sleepycat/db/DbHashStat.java b/bdb/java/src/com/sleepycat/db/DbHashStat.java
index 62154344732..97de6127af6 100644
--- a/bdb/java/src/com/sleepycat/db/DbHashStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbHashStat.java
@@ -1,37 +1,24 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbHashStat.java,v 11.6 2000/05/04 02:54:55 dda Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * Db.stat() method.
- */
public class DbHashStat
{
- public int hash_magic; // Magic number.
- public int hash_version; // Version number.
- public int hash_metaflags; // Metadata flags.
- public int hash_nkeys; // Number of unique keys.
- public int hash_ndata; // Number of data items.
- public int hash_pagesize; // Page size.
- public int hash_nelem; // Original nelem specified.
- public int hash_ffactor; // Fill factor specified at create.
- public int hash_buckets; // Number of hash buckets.
- public int hash_free; // Pages on the free list.
- public int hash_bfree; // Bytes free on bucket pages.
- public int hash_bigpages; // Number of big key/data pages.
- public int hash_big_bfree; // Bytes free on big item pages.
- public int hash_overflows; // Number of overflow pages.
- public int hash_ovfl_free; // Bytes free on ovfl pages.
- public int hash_dup; // Number of dup pages.
- public int hash_dup_free; // Bytes free on duplicate pages.
+ public int hash_magic;
+ public int hash_version;
+ public int hash_metaflags;
+ public int hash_nkeys;
+ public int hash_ndata;
+ public int hash_pagesize;
+ public int hash_ffactor;
+ public int hash_buckets;
+ public int hash_free;
+ public int hash_bfree;
+ public int hash_bigpages;
+ public int hash_big_bfree;
+ public int hash_overflows;
+ public int hash_ovfl_free;
+ public int hash_dup;
+ public int hash_dup_free;
}
-
// end of DbHashStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbKeyRange.java b/bdb/java/src/com/sleepycat/db/DbKeyRange.java
index 6a86afd9109..aa5dc16eeda 100644
--- a/bdb/java/src/com/sleepycat/db/DbKeyRange.java
+++ b/bdb/java/src/com/sleepycat/db/DbKeyRange.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbKeyRange.java,v 1.1 2000/04/12 15:07:02 dda Exp $
+ * $Id: DbKeyRange.java,v 1.4 2002/01/11 15:52:36 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbLock.java b/bdb/java/src/com/sleepycat/db/DbLock.java
index bc467913e92..ee021a856b8 100644
--- a/bdb/java/src/com/sleepycat/db/DbLock.java
+++ b/bdb/java/src/com/sleepycat/db/DbLock.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbLock.java,v 11.4 2000/05/25 04:18:13 dda Exp $
+ * $Id: DbLock.java,v 11.8 2002/01/11 15:52:36 bostic Exp $
*/
package com.sleepycat.db;
@@ -18,11 +18,6 @@ public class DbLock
protected native void finalize()
throws Throwable;
- // methods
- //
- public native void put(DbEnv env)
- throws DbException;
-
// get/set methods
//
diff --git a/bdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java b/bdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java
new file mode 100644
index 00000000000..0fa434c72ee
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLockNotGrantedException.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLockNotGrantedException.java,v 11.3 2002/01/11 15:52:36 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+public class DbLockNotGrantedException extends DbException {
+ public DbLockNotGrantedException(String message,
+ int op, int mode, Dbt obj,
+ DbLock lock, int index)
+ {
+ super(message, Db.DB_LOCK_NOTGRANTED);
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ this.index = index;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public int get_index()
+ {
+ return index;
+ }
+
+ private int op;
+ private int mode;
+ private Dbt obj;
+ private DbLock lock;
+ private int index;
+
+}
+
diff --git a/bdb/java/src/com/sleepycat/db/DbLockRequest.java b/bdb/java/src/com/sleepycat/db/DbLockRequest.java
new file mode 100644
index 00000000000..2ea2fe148ea
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLockRequest.java
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLockRequest.java,v 11.4 2002/01/16 07:45:24 mjc Exp $
+ */
+
+package com.sleepycat.db;
+
+public class DbLockRequest
+{
+ public DbLockRequest(int op, int mode, Dbt obj, DbLock lock)
+ {
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public void set_op(int op)
+ {
+ this.op = op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public void set_mode(int mode)
+ {
+ this.mode = mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public void set_obj(Dbt obj)
+ {
+ this.obj = obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public void set_lock(DbLock lock)
+ {
+ this.lock = lock;
+ }
+
+ private /* db_lockop_t */ int op;
+ private /* db_lockmode_t */ int mode;
+ private /* db_timeout_t */ int timeout;
+ private Dbt obj;
+ private DbLock lock;
+}
diff --git a/bdb/java/src/com/sleepycat/db/DbLockStat.java b/bdb/java/src/com/sleepycat/db/DbLockStat.java
index f23f2ad5d49..f0903f061d2 100644
--- a/bdb/java/src/com/sleepycat/db/DbLockStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbLockStat.java
@@ -1,30 +1,32 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbLockStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * DbLockTab.stat() method.
- */
public class DbLockStat
{
- public int st_maxlocks; // Maximum number of locks in table.
- public int st_nmodes; // Number of lock modes.
- public int st_nlockers; // Number of lockers.
- public int st_nconflicts; // Number of lock conflicts.
- public int st_nrequests; // Number of lock gets.
- public int st_nreleases; // Number of lock puts.
- public int st_ndeadlocks; // Number of lock deadlocks.
- public int st_region_wait; // Region lock granted after wait.
- public int st_region_nowait; // Region lock granted without wait.
- public int st_regsize; // Region size.
+ public int st_id;
+ public int st_cur_maxid;
+ public int st_maxlocks;
+ public int st_maxlockers;
+ public int st_maxobjects;
+ public int st_nmodes;
+ public int st_nlocks;
+ public int st_maxnlocks;
+ public int st_nlockers;
+ public int st_maxnlockers;
+ public int st_nobjects;
+ public int st_maxnobjects;
+ public int st_nconflicts;
+ public int st_nrequests;
+ public int st_nreleases;
+ public int st_nnowaits;
+ public int st_ndeadlocks;
+ public int st_locktimeout;
+ public int st_nlocktimeouts;
+ public int st_txntimeout;
+ public int st_ntxntimeouts;
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_regsize;
}
-
// end of DbLockStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLogStat.java b/bdb/java/src/com/sleepycat/db/DbLogStat.java
index d708f1c4148..19e5be25ce3 100644
--- a/bdb/java/src/com/sleepycat/db/DbLogStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbLogStat.java
@@ -1,35 +1,29 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbLogStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * DbLog.stat() method.
- */
public class DbLogStat
{
- public int st_magic; // Log file magic number.
- public int st_version; // Log file version number.
- public int st_mode; // Log file mode.
- public int st_lg_max; // Maximum log file size.
- public int st_w_bytes; // Bytes to log.
- public int st_w_mbytes; // Megabytes to log.
- public int st_wc_bytes; // Bytes to log since checkpoint.
- public int st_wc_mbytes; // Megabytes to log since checkpoint.
- public int st_wcount; // Total syncs to the log.
- public int st_scount; // Total writes to the log.
- public int st_region_wait; // Region lock granted after wait.
- public int st_region_nowait; // Region lock granted without wait.
- public int st_cur_file; // Current log file number.
- public int st_cur_offset; // Current log file offset.
- public int st_regsize; // Region size.
+ public int st_magic;
+ public int st_version;
+ public int st_mode;
+ public int st_lg_bsize;
+ public int st_lg_size;
+ public int st_w_bytes;
+ public int st_w_mbytes;
+ public int st_wc_bytes;
+ public int st_wc_mbytes;
+ public int st_wcount;
+ public int st_wcount_fill;
+ public int st_scount;
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_cur_file;
+ public int st_cur_offset;
+ public int st_disk_file;
+ public int st_disk_offset;
+ public int st_regsize;
+ public int st_maxcommitperflush;
+ public int st_mincommitperflush;
}
-
// end of DbLogStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLogc.java b/bdb/java/src/com/sleepycat/db/DbLogc.java
new file mode 100644
index 00000000000..34368199d5e
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbLogc.java
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbLogc.java,v 11.3 2002/01/11 15:52:37 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLogc
+{
+ // methods
+ //
+ public native void close(int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLogc.java
diff --git a/bdb/java/src/com/sleepycat/db/DbLsn.java b/bdb/java/src/com/sleepycat/db/DbLsn.java
index ff36ac61c99..c9e01225e3f 100644
--- a/bdb/java/src/com/sleepycat/db/DbLsn.java
+++ b/bdb/java/src/com/sleepycat/db/DbLsn.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbLsn.java,v 11.5 2000/09/11 16:21:37 dda Exp $
+ * $Id: DbLsn.java,v 11.8 2002/01/11 15:52:37 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbMemoryException.java b/bdb/java/src/com/sleepycat/db/DbMemoryException.java
index 67a29a1f16f..a5f66674c89 100644
--- a/bdb/java/src/com/sleepycat/db/DbMemoryException.java
+++ b/bdb/java/src/com/sleepycat/db/DbMemoryException.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbMemoryException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbMemoryException.java,v 11.7 2002/01/11 15:52:37 bostic Exp $
*/
package com.sleepycat.db;
@@ -23,6 +23,27 @@ public class DbMemoryException extends DbException
{
super(s, errno);
}
+
+ public void set_dbt(Dbt dbt)
+ {
+ this.dbt = dbt;
+ }
+
+ public Dbt get_dbt()
+ {
+ return dbt;
+ }
+
+ /* Override of DbException.toString():
+ * the extra verbage that comes from DbEnv.strerror(ENOMEM)
+ * is not helpful.
+ */
+ public String toString()
+ {
+ return getMessage();
+ }
+
+ Dbt dbt = null;
}
// end of DbMemoryException.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java b/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java
index 44497b3bf74..cc03b568fc3 100644
--- a/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbMpoolFStat.java
@@ -1,28 +1,16 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbMpoolFStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * DbMpool.fstat() method.
- */
public class DbMpoolFStat
{
- public String file_name; // File name.
- public int st_pagesize; // Page size.
- public int st_cache_hit; // Pages found in the cache.
- public int st_cache_miss; // Pages not found in the cache.
- public int st_map; // Pages from mapped files.
- public int st_page_create; // Pages created in the cache.
- public int st_page_in; // Pages read in.
- public int st_page_out; // Pages written out.
+ public String file_name;
+ public int st_pagesize;
+ public int st_map;
+ public int st_cache_hit;
+ public int st_cache_miss;
+ public int st_page_create;
+ public int st_page_in;
+ public int st_page_out;
}
-
// end of DbMpoolFStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMpoolStat.java b/bdb/java/src/com/sleepycat/db/DbMpoolStat.java
index 8a6d75e367b..d2d854a8b8d 100644
--- a/bdb/java/src/com/sleepycat/db/DbMpoolStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbMpoolStat.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbMpoolStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbMpoolStat.java,v 11.7 2002/01/11 15:52:38 bostic Exp $
*/
package com.sleepycat.db;
@@ -15,10 +15,6 @@ package com.sleepycat.db;
*/
public class DbMpoolStat
{
- /**
- * @deprecated As of Berkeley DB 2.8.2, cachesize for mpool unavailable.
- */
- public int st_cachesize; // Cache size.
public int st_cache_hit; // Pages found in the cache.
public int st_cache_miss; // Pages not found in the cache.
public int st_map; // Pages from mapped files.
diff --git a/bdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java b/bdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java
new file mode 100644
index 00000000000..3c948ad37cd
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMultipleDataIterator.java
@@ -0,0 +1,46 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMultipleDataIterator.java,v 1.5 2002/01/11 15:52:38 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt data)
+ {
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ // crack out the data offset and length.
+ if (dataoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleDataIterator.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMultipleIterator.java b/bdb/java/src/com/sleepycat/db/DbMultipleIterator.java
new file mode 100644
index 00000000000..0a28d9507a1
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMultipleIterator.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMultipleIterator.java,v 1.5 2002/01/11 15:52:38 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+// DbMultipleIterator is a shared package-private base class for the three
+// types of bulk-return Iterator; it should never be instantiated directly,
+// but it handles the functionality shared by its subclasses.
+class DbMultipleIterator
+{
+ // Package-private methods and members: used by our subclasses.
+
+ // Called implicitly by the subclass
+ DbMultipleIterator(Dbt data)
+ {
+ buf = data.get_data();
+ size = data.get_ulen();
+ // The offset will always be zero from the front of the buffer
+ // DB returns, and the buffer is opaque, so don't bother
+ // handling an offset.
+
+ // The initial position is pointing at the last u_int32_t
+ // in the buffer.
+ pos = size - int32sz;
+ }
+
+ // The C macros use sizeof(u_int32_t). Fortunately, java ints
+ // are always four bytes. Make this a constant just for form's sake.
+ static final int int32sz = 4;
+
+ // Current position within the buffer; equivalent to "pointer"
+ // in the DB_MULTIPLE macros.
+ int pos;
+
+ // A reference to the encoded buffer returned from the original
+ // Db/Dbc.get call on the data Dbt, and its size.
+ byte[] buf;
+ int size;
+}
+
+// end of DbMultipleIterator.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java b/bdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java
new file mode 100644
index 00000000000..0edae5801d4
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMultipleKeyDataIterator.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMultipleKeyDataIterator.java,v 1.5 2002/01/11 15:52:39 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleKeyDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleKeyDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt key, Dbt data)
+ {
+ int keyoff = DbUtil.array2int(buf, pos);
+
+ // crack out the key and data offsets and lengths.
+ if (keyoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int keysz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ key.set_data(buf);
+ key.set_size(keysz);
+ key.set_offset(keyoff);
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleKeyDataIterator.java
diff --git a/bdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java b/bdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java
new file mode 100644
index 00000000000..a4a578ed170
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbMultipleRecnoDataIterator.java
@@ -0,0 +1,51 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbMultipleRecnoDataIterator.java,v 1.5 2002/01/11 15:52:39 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+public class DbMultipleRecnoDataIterator extends DbMultipleIterator
+{
+ // public methods
+ public DbMultipleRecnoDataIterator(Dbt data)
+ {
+ super(data);
+ }
+
+ public boolean next(Dbt key, Dbt data)
+ {
+ int keyoff = DbUtil.array2int(buf, pos);
+
+ // crack out the key offset and the data offset and length.
+ if (keyoff < 0) {
+ return (false);
+ }
+
+ pos -= int32sz;
+ int dataoff = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+ int datasz = DbUtil.array2int(buf, pos);
+
+ pos -= int32sz;
+
+ key.set_recno_key_from_buffer(buf, keyoff);
+
+ data.set_data(buf);
+ data.set_size(datasz);
+ data.set_offset(dataoff);
+
+ return (true);
+ }
+}
+
+// end of DbMultipleRecnoDataIterator.java
diff --git a/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java b/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
index 4f962d9a334..7343caa8238 100644
--- a/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
+++ b/bdb/java/src/com/sleepycat/db/DbOutputStreamErrcall.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbOutputStreamErrcall.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbOutputStreamErrcall.java,v 11.6 2002/01/11 15:52:39 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbPreplist.java b/bdb/java/src/com/sleepycat/db/DbPreplist.java
new file mode 100644
index 00000000000..e5fffa77e1e
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbPreplist.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbPreplist.java,v 11.3 2002/01/11 15:52:40 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is filled in and returned by the
+ * DbEnv.txn_recover() method.
+ */
+public class DbPreplist
+{
+ public DbTxn txn;
+ public byte gid[];
+}
+
+// end of DbPreplist.java
diff --git a/bdb/java/src/com/sleepycat/db/DbQueueStat.java b/bdb/java/src/com/sleepycat/db/DbQueueStat.java
index 652878b1adb..67d229ab840 100644
--- a/bdb/java/src/com/sleepycat/db/DbQueueStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbQueueStat.java
@@ -1,32 +1,21 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbQueueStat.java,v 11.5 2000/11/07 18:45:27 dda Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * Db.stat() method.
- */
public class DbQueueStat
{
- public int qs_magic; // Magic number.
- public int qs_version; // Version number.
- public int qs_metaflags; // Metadata flags.
- public int qs_nkeys; // Number of unique keys.
- public int qs_ndata; // Number of data items.
- public int qs_pagesize; // Page size.
- public int qs_pages; // Data pages.
- public int qs_re_len; // Fixed-length record length.
- public int qs_re_pad; // Fixed-length record pad.
- public int qs_pgfree; // Bytes free in data pages.
- public int qs_first_recno; // First not deleted record.
- public int qs_cur_recno; // Last allocated record number.
+ public int qs_magic;
+ public int qs_version;
+ public int qs_metaflags;
+ public int qs_nkeys;
+ public int qs_ndata;
+ public int qs_pagesize;
+ public int qs_extentsize;
+ public int qs_pages;
+ public int qs_re_len;
+ public int qs_re_pad;
+ public int qs_pgfree;
+ public int qs_first_recno;
+ public int qs_cur_recno;
}
-
// end of DbQueueStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java b/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java
deleted file mode 100644
index b32eebcaa6c..00000000000
--- a/bdb/java/src/com/sleepycat/db/DbRecoveryInit.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbRecoveryInit.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
- */
-
-package com.sleepycat.db;
-
-/**
- *
- * @author Donald D. Anderson
- */
-public interface DbRecoveryInit
-{
- // methods
- //
- public abstract void recovery_init(DbEnv dbenv);
-}
-
-// end of DbRecoveryInit.java
diff --git a/bdb/java/src/com/sleepycat/db/DbRepStat.java b/bdb/java/src/com/sleepycat/db/DbRepStat.java
new file mode 100644
index 00000000000..953d10eddd1
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbRepStat.java
@@ -0,0 +1,43 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+
+package com.sleepycat.db;
+
+public class DbRepStat
+{
+ public int st_status;
+ public DbLsn st_next_lsn;
+ public DbLsn st_waiting_lsn;
+ public int st_dupmasters;
+ public int st_env_id;
+ public int st_env_priority;
+ public int st_gen;
+ public int st_log_duplicated;
+ public int st_log_queued;
+ public int st_log_queued_max;
+ public int st_log_queued_total;
+ public int st_log_records;
+ public int st_log_requested;
+ public int st_master;
+ public int st_master_changes;
+ public int st_msgs_badgen;
+ public int st_msgs_processed;
+ public int st_msgs_recover;
+ public int st_msgs_send_failures;
+ public int st_msgs_sent;
+ public int st_newsites;
+ public int st_nsites;
+ public int st_nthrottles;
+ public int st_outdated;
+ public int st_txns_applied;
+ public int st_elections;
+ public int st_elections_won;
+ public int st_election_cur_winner;
+ public int st_election_gen;
+ public DbLsn st_election_lsn;
+ public int st_election_nsites;
+ public int st_election_priority;
+ public int st_election_status;
+ public int st_election_tiebreaker;
+ public int st_election_votes;
+}
+// end of DbRepStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbRepTransport.java b/bdb/java/src/com/sleepycat/db/DbRepTransport.java
new file mode 100644
index 00000000000..441c783b890
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbRepTransport.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbRepTransport.java,v 11.2 2002/01/11 15:52:40 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is used as a callback by DbEnv.set_rep_transport.
+ */
+public interface DbRepTransport
+{
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException;
+}
diff --git a/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java b/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
index 78736b6ed1e..32f2727a78b 100644
--- a/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
+++ b/bdb/java/src/com/sleepycat/db/DbRunRecoveryException.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbRunRecoveryException.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
+ * $Id: DbRunRecoveryException.java,v 11.6 2002/01/11 15:52:40 bostic Exp $
*/
package com.sleepycat.db;
diff --git a/bdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java b/bdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java
new file mode 100644
index 00000000000..ca2acde1b03
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbSecondaryKeyCreate.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbSecondaryKeyCreate.java,v 11.3 2002/01/11 15:52:40 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is used as a callback by Db.associate.
+ */
+public interface DbSecondaryKeyCreate
+{
+ public int secondary_key_create(Db secondary, Dbt key,
+ Dbt data, Dbt result)
+ throws DbException;
+}
+
+// end of DbSecondaryKeyCreate.java
diff --git a/bdb/java/src/com/sleepycat/db/DbTxn.java b/bdb/java/src/com/sleepycat/db/DbTxn.java
index 201ff94c8f3..d3906d112c0 100644
--- a/bdb/java/src/com/sleepycat/db/DbTxn.java
+++ b/bdb/java/src/com/sleepycat/db/DbTxn.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: DbTxn.java,v 11.5 2000/05/25 04:18:13 dda Exp $
+ * $Id: DbTxn.java,v 11.17 2002/08/29 14:22:22 margo Exp $
*/
package com.sleepycat.db;
@@ -23,14 +23,44 @@ public class DbTxn
public native void commit(int flags)
throws DbException;
+ public native void discard(int flags)
+ throws DbException;
+
public native /*u_int32_t*/ int id()
throws DbException;
- public native void prepare()
+ public native void prepare(byte[] gid)
throws DbException;
- protected native void finalize()
- throws Throwable;
+ public native void set_timeout(/*db_timeout_t*/ long timeout,
+ /*u_int32_t*/ int flags)
+ throws DbException;
+
+ // We override Object.equals because it is possible for
+ // the Java API to create multiple DbTxns that reference
+ // the same underlying object. This can happen for example
+ // during DbEnv.txn_recover().
+ //
+ public boolean equals(Object obj)
+ {
+ if (this == obj)
+ return true;
+
+ if (obj != null && (obj instanceof DbTxn)) {
+ DbTxn that = (DbTxn)obj;
+ return (this.private_dbobj_ == that.private_dbobj_);
+ }
+ return false;
+ }
+
+ // We must override Object.hashCode whenever we override
+ // Object.equals() to enforce the maxim that equal objects
+ // have the same hashcode.
+ //
+ public int hashCode()
+ {
+ return ((int)private_dbobj_ ^ (int)(private_dbobj_ >> 32));
+ }
// get/set methods
//
diff --git a/bdb/java/src/com/sleepycat/db/DbTxnRecover.java b/bdb/java/src/com/sleepycat/db/DbTxnRecover.java
deleted file mode 100644
index ee47935941d..00000000000
--- a/bdb/java/src/com/sleepycat/db/DbTxnRecover.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbTxnRecover.java,v 11.1 2000/06/29 18:08:17 dda Exp $
- */
-
-package com.sleepycat.db;
-
-/*
- * This interface is used by DbEnv.set_tx_recover()
- *
- */
-public interface DbTxnRecover
-{
- // The value of recops is one of the Db.DB_TXN_* constants
- public abstract int tx_recover(DbEnv env, Dbt dbt, DbLsn lsn, int recops);
-}
-
-// end of DbBtreeCompare.java
diff --git a/bdb/java/src/com/sleepycat/db/DbTxnStat.java b/bdb/java/src/com/sleepycat/db/DbTxnStat.java
index e72addb00b1..78794aea504 100644
--- a/bdb/java/src/com/sleepycat/db/DbTxnStat.java
+++ b/bdb/java/src/com/sleepycat/db/DbTxnStat.java
@@ -1,40 +1,27 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: DbTxnStat.java,v 11.3 2000/02/14 02:59:56 bostic Exp $
- */
+/* DO NOT EDIT: automatically built by dist/s_java. */
package com.sleepycat.db;
-/*
- * This is filled in and returned by the
- * DbTxnMgr.fstat() method.
- */
public class DbTxnStat
{
public static class Active {
- public int txnid; // Transaction ID
- public int parentid; // Transaction ID of parent
- public DbLsn lsn; // Lsn of the begin record
+ public int txnid;
+ public int parentid;
+ public DbLsn lsn;
};
-
- public DbLsn st_last_ckp; // lsn of the last checkpoint
- public DbLsn st_pending_ckp; // last checkpoint did not finish
- public long st_time_ckp; // time of last checkpoint (UNIX secs)
- public int st_last_txnid; // last transaction id given out
- public int st_maxtxns; // maximum number of active txns
- public int st_naborts; // number of aborted transactions
- public int st_nbegins; // number of begun transactions
- public int st_ncommits; // number of committed transactions
- public int st_nactive; // number of active transactions
- public int st_maxnactive; // maximum active transactions
- public Active st_txnarray[]; // array of active transactions
- public int st_region_wait; // Region lock granted after wait.
- public int st_region_nowait; // Region lock granted without wait.
- public int st_regsize; // Region size.
+ public DbLsn st_last_ckp;
+ public long st_time_ckp;
+ public int st_last_txnid;
+ public int st_maxtxns;
+ public int st_naborts;
+ public int st_nbegins;
+ public int st_ncommits;
+ public int st_nactive;
+ public int st_nrestores;
+ public int st_maxnactive;
+ public Active st_txnarray[];
+ public int st_region_wait;
+ public int st_region_nowait;
+ public int st_regsize;
}
-
// end of DbTxnStat.java
diff --git a/bdb/java/src/com/sleepycat/db/DbUtil.java b/bdb/java/src/com/sleepycat/db/DbUtil.java
new file mode 100644
index 00000000000..30ca93d03cc
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/DbUtil.java
@@ -0,0 +1,98 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbUtil.java,v 11.5 2002/01/11 15:52:41 bostic Exp $
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author David M. Krinsky
+ */
+
+// DbUtil is a simple, package-private wrapper class that holds a few
+// static utility functions other parts of the package share and that don't
+// have a good home elsewhere. (For now, that's limited to byte-array-to-int
+// conversion and back.)
+
+class DbUtil
+{
+ // Get the u_int32_t stored beginning at offset "offset" into
+ // array "arr". We have to do the conversion manually since it's
+ // a C-native int, and we're not really supposed to make this kind of
+ // cast in Java.
+ static int array2int(byte[] arr, int offset)
+ {
+ int b1, b2, b3, b4;
+ int pos = offset;
+
+ // Get the component bytes; b4 is most significant, b1 least.
+ if (big_endian) {
+ b4 = arr[pos++];
+ b3 = arr[pos++];
+ b2 = arr[pos++];
+ b1 = arr[pos];
+ } else {
+ b1 = arr[pos++];
+ b2 = arr[pos++];
+ b3 = arr[pos++];
+ b4 = arr[pos];
+ }
+
+ // Bytes are signed. Convert [-128, -1] to [128, 255].
+ if (b1 < 0) { b1 += 256; }
+ if (b2 < 0) { b2 += 256; }
+ if (b3 < 0) { b3 += 256; }
+ if (b4 < 0) { b4 += 256; }
+
+ // Put the bytes in their proper places in an int.
+ b2 <<= 8;
+ b3 <<= 16;
+ b4 <<= 24;
+
+ // Return their sum.
+ return (b1 + b2 + b3 + b4);
+ }
+
+ // Store the specified u_int32_t, with endianness appropriate
+ // to the platform we're running on, into four consecutive bytes of
+ // the specified byte array, starting from the specified offset.
+ static void int2array(int n, byte[] arr, int offset)
+ {
+ int b1, b2, b3, b4;
+ int pos = offset;
+
+ b1 = n & 0xff;
+ b2 = (n >> 8) & 0xff;
+ b3 = (n >> 16) & 0xff;
+ b4 = (n >> 24) & 0xff;
+
+ // Bytes are signed. Convert [128, 255] to [-128, -1].
+ if (b1 >= 128) { b1 -= 256; }
+ if (b2 >= 128) { b2 -= 256; }
+ if (b3 >= 128) { b3 -= 256; }
+ if (b4 >= 128) { b4 -= 256; }
+
+ // Put the bytes in the appropriate place in the array.
+ if (big_endian) {
+ arr[pos++] = (byte)b4;
+ arr[pos++] = (byte)b3;
+ arr[pos++] = (byte)b2;
+ arr[pos] = (byte)b1;
+ } else {
+ arr[pos++] = (byte)b1;
+ arr[pos++] = (byte)b2;
+ arr[pos++] = (byte)b3;
+ arr[pos] = (byte)b4;
+ }
+ }
+
+ private static final boolean big_endian = is_big_endian();
+ private static native boolean is_big_endian();
+}
+
+// end of DbUtil.java
diff --git a/bdb/java/src/com/sleepycat/db/Dbc.java b/bdb/java/src/com/sleepycat/db/Dbc.java
index b097cbad802..60c00b1deb7 100644
--- a/bdb/java/src/com/sleepycat/db/Dbc.java
+++ b/bdb/java/src/com/sleepycat/db/Dbc.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: Dbc.java,v 11.5 2000/05/25 04:18:13 dda Exp $
+ * $Id: Dbc.java,v 11.9 2002/01/11 15:52:41 bostic Exp $
*/
package com.sleepycat.db;
@@ -34,6 +34,10 @@ public class Dbc
public native int get(Dbt key, Dbt data, int flags)
throws DbException;
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int pget(Dbt key, Dbt pkey, Dbt data, int flags)
+ throws DbException;
+
// returns: 0, DB_KEYEXIST, or throws error
public native int put(Dbt key, Dbt data, int flags)
throws DbException;
diff --git a/bdb/java/src/com/sleepycat/db/Dbt.java b/bdb/java/src/com/sleepycat/db/Dbt.java
index bbb478cd542..f51d0419963 100644
--- a/bdb/java/src/com/sleepycat/db/Dbt.java
+++ b/bdb/java/src/com/sleepycat/db/Dbt.java
@@ -1,14 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
*
- * $Id: Dbt.java,v 11.6 2000/06/16 03:34:01 dda Exp $
+ * $Id: Dbt.java,v 11.15 2002/09/04 00:37:25 mjc Exp $
*/
package com.sleepycat.db;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+
/**
*
* @author Donald D. Anderson
@@ -18,6 +23,34 @@ public class Dbt
// methods
//
+ public Dbt(byte[] data)
+ {
+ init();
+ this.data = data;
+ if (data != null) {
+ this.size = data.length;
+ }
+ }
+
+ public Dbt(byte[] data, int off, int len)
+ {
+ init();
+ this.data = data;
+ this.offset = off;
+ this.size = len;
+ }
+
+ public Dbt()
+ {
+ init();
+ }
+
+ public Dbt(Object serialobj) throws java.io.IOException
+ {
+ init();
+ this.set_object(serialobj);
+ }
+
protected native void finalize()
throws Throwable;
@@ -25,86 +58,173 @@ public class Dbt
//
// key/data
+ public byte[] get_data()
+ {
+ // In certain circumstances, like callbacks to
+ // user code that have Dbt args, we do not create
+ // data arrays until the user explicitly does a get_data.
+ // This saves us from needlessly creating objects
+ // (potentially large arrays) that may never be accessed.
+ //
+ if (must_create_data) {
+ data = create_data();
+ must_create_data = false;
+ }
+ return data;
+ }
public void set_data(byte[] data)
{
- // internal_set_data is separated from set_data in case
- // we want to have set_data automatically set some other
- // fields (size, etc.) someday.
- //
- internal_set_data(data);
+ this.data = data;
+ this.must_create_data = false;
}
- public native byte[] get_data();
- private native void internal_set_data(byte[] data);
- // These are not in the original DB interface,
- // but they can be used to get/set the offset
+ // get_offset/set_offset is unique to the Java portion
+ // of the DB APIs. They can be used to get/set the offset
// into the attached byte array.
//
- public native void set_offset(int off);
- public native int get_offset();
+ public int get_offset()
+ {
+ return offset;
+ }
+
+ public void set_offset(int offset)
+ {
+ this.offset = offset;
+ }
// key/data length
- public native /*u_int32_t*/ int get_size();
- public native void set_size(/*u_int32_t*/ int size);
+ public /*u_int32_t*/ int get_size()
+ {
+ return size;
+ }
+
+ public void set_size(/*u_int32_t*/ int size)
+ {
+ this.size = size;
+ }
// RO: length of user buffer.
- public native /*u_int32_t*/ int get_ulen();
- public native void set_ulen(/*u_int32_t*/ int ulen);
+ public /*u_int32_t*/ int get_ulen()
+ {
+ return ulen;
+ }
+
+ public void set_ulen(/*u_int32_t*/ int ulen)
+ {
+ this.ulen = ulen;
+ }
+
// RO: get/put record length.
- public native /*u_int32_t*/ int get_dlen();
- public native void set_dlen(/*u_int32_t*/ int dlen);
+ public /*u_int32_t*/ int get_dlen()
+ {
+ return dlen;
+ }
+
+ public void set_dlen(/*u_int32_t*/ int dlen)
+ {
+ this.dlen = dlen;
+ }
// RO: get/put record offset.
- public native /*u_int32_t*/ int get_doff();
- public native void set_doff(/*u_int32_t*/ int doff);
+ public /*u_int32_t*/ int get_doff()
+ {
+ return doff;
+ }
+
+ public void set_doff(/*u_int32_t*/ int doff)
+ {
+ this.doff = doff;
+ }
// flags
- public native /*u_int32_t*/ int get_flags();
- public native void set_flags(/*u_int32_t*/ int flags);
+ public /*u_int32_t*/ int get_flags()
+ {
+ return flags;
+ }
+
+ public void set_flags(/*u_int32_t*/ int flags)
+ {
+ this.flags = flags;
+ }
+
+ // Helper methods to get/set a Dbt from a serializable object.
+ public Object get_object() throws java.io.IOException,
+ java.lang.ClassNotFoundException
+ {
+ ByteArrayInputStream bytestream = new ByteArrayInputStream(get_data());
+ ObjectInputStream ois = new ObjectInputStream(bytestream);
+ Object serialobj = ois.readObject();
+ ois.close();
+ bytestream.close();
+ return (serialobj);
+ }
+
+ public void set_object(Object serialobj) throws java.io.IOException
+ {
+ ByteArrayOutputStream bytestream = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream(bytestream);
+ oos.writeObject(serialobj);
+ oos.close();
+ byte[] buf = bytestream.toByteArray();
+ bytestream.close();
+ set_data(buf);
+ set_offset(0);
+ set_size(buf.length);
+ }
// These are not in the original DB interface.
// They can be used to set the recno key for a Dbt.
- // Note: you must set the data field to an array of
- // at least four bytes before calling either of these.
- //
- public native void set_recno_key_data(int recno);
- public native int get_recno_key_data();
-
- public Dbt(byte[] data)
+ // Note: if data is less than (offset + 4) bytes, these
+ // methods may throw an ArrayIndexException. get_recno_key_data()
+ // will additionally throw a NullPointerException if data is null.
+ public void set_recno_key_data(int recno)
{
- init();
- internal_set_data(data);
- if (data != null)
- set_size(data.length);
+ if (data == null) {
+ data = new byte[4];
+ size = 4;
+ offset = 0;
+ }
+ DbUtil.int2array(recno, data, offset);
}
- public Dbt(byte[] data, int off, int len)
+ public int get_recno_key_data()
{
- this(data);
- set_ulen(len);
- set_offset(off);
+ return (DbUtil.array2int(data, offset));
}
- public Dbt()
+ // Used internally by DbMultipleRecnoIterator
+ //
+ /*package*/ void set_recno_key_from_buffer(byte[] data, int offset)
{
- init();
+ this.data = data;
+ this.offset = offset;
+ this.size = 4;
+ }
+
+ static {
+ Db.load_db();
}
// private methods
//
private native void init();
+ private native byte[] create_data();
// private data
//
private long private_dbobj_ = 0;
- static {
- Db.load_db();
- }
+ private byte[] data = null;
+ private int offset = 0;
+ private int size = 0;
+ private int ulen = 0;
+ private int dlen = 0;
+ private int doff = 0;
+ private int flags = 0;
+ private boolean must_create_data = false;
}
-
// end of Dbt.java
diff --git a/bdb/java/src/com/sleepycat/db/xa/DbXAResource.java b/bdb/java/src/com/sleepycat/db/xa/DbXAResource.java
new file mode 100644
index 00000000000..3a996c0d045
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/xa/DbXAResource.java
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbXAResource.java,v 1.2 2002/08/09 01:54:57 bostic Exp $
+ */
+
+package com.sleepycat.db.xa;
+
+import com.sleepycat.db.Db;
+import com.sleepycat.db.DbEnv;
+import com.sleepycat.db.DbTxn;
+import javax.transaction.xa.XAResource;
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.Xid;
+
+public class DbXAResource implements XAResource
+{
+ public DbXAResource(String home, int rmid, int flags)
+ throws XAException
+ {
+ this.home = home;
+ this.rmid = rmid;
+
+ // We force single-threading for calls to _init/_close.
+ // This makes our internal code much easier, and
+ // should not be a performance burden.
+ synchronized (DbXAResource.class) {
+ _init(home, rmid, flags);
+ }
+ }
+
+ //
+ // Alternate constructor for convenience.
+ // Uses an rmid that is unique within this JVM,
+ // numbered started at 0.
+ //
+ public DbXAResource(String home)
+ throws XAException
+ {
+ this(home, get_unique_rmid(), 0);
+ }
+
+ private native void _init(String home, int rmid, int flags);
+
+ public void close(int flags)
+ throws XAException
+ {
+ // We force single-threading for calls to _init/_close.
+ // This makes our internal code much easier, and
+ // should not be a performance burden.
+ synchronized (DbXAResource.class) {
+ _close(home, rmid, flags);
+ }
+ }
+
+ private native void _close(String home, int rmid, int flags);
+
+ public void commit(Xid xid, boolean onePhase)
+ throws XAException
+ {
+ _commit(xid, rmid, onePhase);
+ }
+
+ private native void _commit(Xid xid, int rmid, boolean onePhase);
+
+ public void end(Xid xid, int flags)
+ throws XAException
+ {
+ _end(xid, rmid, flags);
+ }
+
+ private native void _end(Xid xid, int rmid, int flags);
+
+ public void forget(Xid xid)
+ throws XAException
+ {
+ _forget(xid, rmid);
+ }
+
+ private native void _forget(Xid xid, int rmid);
+
+ public int getTransactionTimeout()
+ throws XAException
+ {
+ return transactionTimeout;
+ }
+
+ public boolean isSameRM(XAResource xares)
+ throws XAException
+ {
+ if (!(xares instanceof DbXAResource))
+ return false;
+ return (this.rmid == ((DbXAResource)xares).rmid);
+ }
+
+ public int prepare(Xid xid)
+ throws XAException
+ {
+ return _prepare(xid, rmid);
+ }
+
+ private native int _prepare(Xid xid, int rmid);
+
+ public Xid [] recover(int flag)
+ throws XAException
+ {
+ return _recover(rmid, flag);
+ }
+
+ private native Xid[] _recover(int rmid, int flags);
+
+ public void rollback(Xid xid)
+ throws XAException
+ {
+ _rollback(xid, rmid);
+ System.err.println("DbXAResource.rollback returned");
+ }
+
+ private native void _rollback(Xid xid, int rmid);
+
+ public boolean setTransactionTimeout(int seconds)
+ throws XAException
+ {
+ // XXX we are not using the transaction timeout.
+ transactionTimeout = seconds;
+ return true;
+ }
+
+ public void start(Xid xid, int flags)
+ throws XAException
+ {
+ _start(xid, rmid, flags);
+ }
+
+ private native void _start(Xid xid, int rmid, int flags);
+
+ private static synchronized int get_unique_rmid()
+ {
+ return unique_rmid++;
+ }
+
+ public interface DbAttach
+ {
+ public DbEnv get_env();
+ public DbTxn get_txn();
+ }
+
+ protected static class DbAttachImpl implements DbAttach
+ {
+ private DbEnv env;
+ private DbTxn txn;
+
+ DbAttachImpl(DbEnv env, DbTxn txn)
+ {
+ this.env = env;
+ this.txn = txn;
+ }
+
+ public DbTxn get_txn()
+ {
+ return txn;
+ }
+
+ public DbEnv get_env()
+ {
+ return env;
+ }
+ }
+
+ public static native DbAttach xa_attach(Xid xid, Integer rmid);
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private long private_dbobj_ = 0;
+ private int transactionTimeout = 0;
+ private String home;
+ private int rmid;
+
+ private static int unique_rmid = 0;
+
+ static
+ {
+ Db.load_db();
+ }
+}
diff --git a/bdb/java/src/com/sleepycat/db/xa/DbXid.java b/bdb/java/src/com/sleepycat/db/xa/DbXid.java
new file mode 100644
index 00000000000..ba03d3ebf3e
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/db/xa/DbXid.java
@@ -0,0 +1,49 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbXid.java,v 1.2 2002/08/09 01:54:58 bostic Exp $
+ */
+
+package com.sleepycat.db.xa;
+
+import com.sleepycat.db.DbException;
+import com.sleepycat.db.DbTxn;
+import javax.transaction.xa.XAException;
+import javax.transaction.xa.Xid;
+
+public class DbXid implements Xid
+{
+ public DbXid(int formatId, byte[] gtrid, byte[] bqual)
+ throws XAException
+ {
+ this.formatId = formatId;
+ this.gtrid = gtrid;
+ this.bqual = bqual;
+ }
+
+ public int getFormatId()
+ {
+ return formatId;
+ }
+
+ public byte[] getGlobalTransactionId()
+ {
+ return gtrid;
+ }
+
+ public byte[] getBranchQualifier()
+ {
+ return bqual;
+ }
+
+ ////////////////////////////////////////////////////////////////
+ //
+ // private data
+ //
+ private byte[] gtrid;
+ private byte[] bqual;
+ private int formatId;
+}
diff --git a/bdb/java/src/com/sleepycat/examples/AccessExample.java b/bdb/java/src/com/sleepycat/examples/AccessExample.java
index f3a98c2c7d5..ea3d5f342ed 100644
--- a/bdb/java/src/com/sleepycat/examples/AccessExample.java
+++ b/bdb/java/src/com/sleepycat/examples/AccessExample.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: AccessExample.java,v 11.5 2000/12/13 07:09:42 krinsky Exp $
+ * $Id: AccessExample.java,v 11.12 2002/02/05 22:27:13 mjc Exp $
*/
package com.sleepycat.examples;
@@ -24,12 +24,6 @@ class AccessExample
{
}
- private static void usage()
- {
- System.err.println("usage: AccessExample\n");
- System.exit(1);
- }
-
public static void main(String argv[])
{
try
@@ -101,7 +95,7 @@ class AccessExample
Db table = new Db(null, 0);
table.set_error_stream(System.err);
table.set_errpfx("AccessExample");
- table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
//
// Insert records into the database, where the key is the user
@@ -124,7 +118,7 @@ class AccessExample
try
{
int err;
- if ((err = table.put(null,
+ if ((err = table.put(null,
key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
System.out.println("Key " + line + " already exists.");
}
@@ -174,8 +168,9 @@ class AccessExample
void setString(String value)
{
- set_data(value.getBytes());
- set_size(value.length());
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
}
String getString()
diff --git a/bdb/java/src/com/sleepycat/examples/BtRecExample.java b/bdb/java/src/com/sleepycat/examples/BtRecExample.java
index 5101f676a0b..68365600748 100644
--- a/bdb/java/src/com/sleepycat/examples/BtRecExample.java
+++ b/bdb/java/src/com/sleepycat/examples/BtRecExample.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: BtRecExample.java,v 11.6 2000/02/19 20:58:02 bostic Exp $
+ * $Id: BtRecExample.java,v 11.11 2002/02/05 22:27:13 mjc Exp $
*/
package com.sleepycat.examples;
@@ -41,7 +41,7 @@ public class BtRecExample
dbp.set_pagesize(1024); // 1K page sizes.
dbp.set_flags(Db.DB_RECNUM); // Record numbers.
- dbp.open(database, null, Db.DB_BTREE, Db.DB_CREATE, 0664);
+ dbp.open(null, database, null, Db.DB_BTREE, Db.DB_CREATE, 0664);
//
// Insert records into the database, where the key is the word
@@ -177,36 +177,26 @@ public class BtRecExample
// Open the word database.
FileReader freader = new FileReader(wordlist);
- BtRecExample app = new BtRecExample(new BufferedReader(freader));
+ BtRecExample app = new BtRecExample(new BufferedReader(freader));
- // Close the word database.
+ // Close the word database.
freader.close();
freader = null;
app.stats();
app.run();
- }
- catch (FileNotFoundException fnfe) {
+ } catch (FileNotFoundException fnfe) {
System.err.println(progname + ": unexpected open error " + fnfe);
System.exit (1);
- }
- catch (IOException ioe) {
+ } catch (IOException ioe) {
System.err.println(progname + ": open " + wordlist + ": " + ioe);
System.exit (1);
+ } catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(dbe.get_errno());
}
- catch (DbException dbe) {
- System.err.println("Exception: " + dbe);
- System.exit(dbe.get_errno());
- }
- System.exit(0);
- }
-
- void
- usage()
- {
- System.err.println("usage: " + progname);
- System.exit(1);
+ System.exit(0);
}
// Prompts for a line, and keeps prompting until a non blank
@@ -279,10 +269,11 @@ public class BtRecExample
void setString(String value)
{
- set_data(value.getBytes());
- set_size(value.length());
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
// must set ulen because sometimes a string is returned
- set_ulen(value.length());
+ set_ulen(data.length);
}
String getString()
@@ -329,8 +320,9 @@ public class BtRecExample
void setString(String value)
{
- set_data(value.getBytes());
- set_size(value.length());
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
}
int getRecno()
diff --git a/bdb/java/src/com/sleepycat/examples/BulkAccessExample.java b/bdb/java/src/com/sleepycat/examples/BulkAccessExample.java
new file mode 100644
index 00000000000..e6690197105
--- /dev/null
+++ b/bdb/java/src/com/sleepycat/examples/BulkAccessExample.java
@@ -0,0 +1,198 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: BulkAccessExample.java,v 1.6 2002/02/05 22:27:13 mjc Exp $
+ */
+
+package com.sleepycat.examples;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+class BulkAccessExample
+{
+ private static final String FileName = "access.db";
+
+ public BulkAccessExample()
+ {
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ BulkAccessExample app = new BulkAccessExample();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("BulkAccessExample: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("BulkAccessExample: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("BulkAccessExample");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire a cursor for the table and two Dbts.
+ Dbc dbc = table.cursor(null, 0);
+ Dbt foo = new Dbt();
+ foo.set_flags(Db.DB_DBT_MALLOC);
+
+ Dbt bulk_data = new Dbt();
+
+ // Set Db.DB_DBT_USERMEM on the data Dbt; Db.DB_MULTIPLE_KEY requires
+ // it. Then allocate a byte array of a reasonable size; we'll
+ // go through the database in chunks this big.
+ bulk_data.set_flags(Db.DB_DBT_USERMEM);
+ bulk_data.set_data(new byte[1000000]);
+ bulk_data.set_ulen(1000000);
+
+
+ // Walk through the table, printing the key/data pairs.
+ //
+ while (dbc.get(foo, bulk_data, Db.DB_NEXT | Db.DB_MULTIPLE_KEY) == 0)
+ {
+ DbMultipleKeyDataIterator iterator;
+ iterator = new DbMultipleKeyDataIterator(bulk_data);
+
+ StringDbt key, data;
+ key = new StringDbt();
+ data = new StringDbt();
+
+ while (iterator.next(key, data)) {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ }
+ dbc.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ byte[] data = value.getBytes();
+ set_data(data);
+ set_size(data.length);
+ }
+
+ String getString()
+ {
+ return new String(get_data(), get_offset(), get_size());
+ }
+ }
+}
diff --git a/bdb/java/src/com/sleepycat/examples/EnvExample.java b/bdb/java/src/com/sleepycat/examples/EnvExample.java
index f1b855836c5..acbd9f59621 100644
--- a/bdb/java/src/com/sleepycat/examples/EnvExample.java
+++ b/bdb/java/src/com/sleepycat/examples/EnvExample.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: EnvExample.java,v 11.7 2000/09/25 13:16:51 dda Exp $
+ * $Id: EnvExample.java,v 11.9 2002/01/11 15:52:42 bostic Exp $
*/
package com.sleepycat.examples;
diff --git a/bdb/java/src/com/sleepycat/examples/LockExample.java b/bdb/java/src/com/sleepycat/examples/LockExample.java
index 33b7d0538ce..d450193146a 100644
--- a/bdb/java/src/com/sleepycat/examples/LockExample.java
+++ b/bdb/java/src/com/sleepycat/examples/LockExample.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: LockExample.java,v 11.5 2001/01/04 14:23:30 dda Exp $
+ * $Id: LockExample.java,v 11.8 2002/01/11 15:52:43 bostic Exp $
*/
package com.sleepycat.examples;
@@ -150,7 +150,7 @@ class LockExample extends DbEnv
}
did_get = false;
DbLock lock = (DbLock)locks.elementAt(lockid);
- lock.put(this);
+ lock_put(lock);
}
System.out.println("Lock #" + lockid + " " +
(did_get ? "granted" : "released"));
diff --git a/bdb/java/src/com/sleepycat/examples/TpcbExample.java b/bdb/java/src/com/sleepycat/examples/TpcbExample.java
index 29a90790801..aa9d10b37bc 100644
--- a/bdb/java/src/com/sleepycat/examples/TpcbExample.java
+++ b/bdb/java/src/com/sleepycat/examples/TpcbExample.java
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: TpcbExample.java,v 11.9 2000/04/01 15:52:15 dda Exp $
+ * $Id: TpcbExample.java,v 11.16 2002/02/13 06:08:35 mjc Exp $
*/
package com.sleepycat.examples;
@@ -82,6 +82,10 @@ class TpcbExample extends DbEnv
set_errpfx(progname);
set_cachesize(0, cachesize == 0 ? 4 * 1024 * 1024 : cachesize, 0);
+ if ((flags & (Db.DB_TXN_NOSYNC)) != 0)
+ set_flags(Db.DB_TXN_NOSYNC, true);
+ flags &= ~(Db.DB_TXN_NOSYNC);
+
int local_flags = flags | Db.DB_CREATE;
if (initializing)
local_flags |= Db.DB_INIT_MPOOL;
@@ -117,8 +121,8 @@ class TpcbExample extends DbEnv
try {
dbp = new Db(this, 0);
dbp.set_h_nelem(h_nelem);
- dbp.open("account", null,
- Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ dbp.open(null, "account", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
}
// can be DbException or FileNotFoundException
catch (Exception e1) {
@@ -154,8 +158,8 @@ class TpcbExample extends DbEnv
dbp.set_h_ffactor(1);
dbp.set_pagesize(512);
- dbp.open("branch", null,
- Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ dbp.open(null, "branch", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
}
// can be DbException or FileNotFoundException
catch (Exception e3) {
@@ -191,8 +195,8 @@ class TpcbExample extends DbEnv
dbp.set_h_ffactor(0);
dbp.set_pagesize(512);
- dbp.open("teller", null,
- Db.DB_HASH, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ dbp.open(null, "teller", null, Db.DB_HASH,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
}
// can be DbException or FileNotFoundException
catch (Exception e5) {
@@ -218,8 +222,8 @@ class TpcbExample extends DbEnv
try {
dbp = new Db(this, 0);
dbp.set_re_len(HISTORY_LEN);
- dbp.open("history", null,
- Db.DB_RECNO, Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
+ dbp.open(null, "history", null, Db.DB_RECNO,
+ Db.DB_CREATE | Db.DB_TRUNCATE, 0644);
}
// can be DbException or FileNotFoundException
catch (Exception e7) {
@@ -349,13 +353,17 @@ class TpcbExample extends DbEnv
int err;
try {
adb = new Db(this, 0);
- adb.open("account", null, Db.DB_UNKNOWN, 0, 0);
+ adb.open(null, "account", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
bdb = new Db(this, 0);
- bdb.open("branch", null, Db.DB_UNKNOWN, 0, 0);
+ bdb.open(null, "branch", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
tdb = new Db(this, 0);
- tdb.open("teller", null, Db.DB_UNKNOWN, 0, 0);
+ tdb.open(null, "teller", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
hdb = new Db(this, 0);
- hdb.open("history", null, Db.DB_UNKNOWN, 0, 0);
+ hdb.open(null, "history", null, Db.DB_UNKNOWN,
+ Db.DB_AUTO_COMMIT, 0);
}
catch (DbException dbe) {
errExit(dbe, "Open of db files failed");
@@ -494,7 +502,11 @@ class TpcbExample extends DbEnv
tcurs.close();
hcurs.close();
- t.commit(0);
+ // null out t in advance; if the commit fails,
+ // we don't want to abort it in the catch clause.
+ DbTxn tmptxn = t;
+ t = null;
+ tmptxn.commit(0);
// END TIMING
return (0);
@@ -634,9 +646,9 @@ class TpcbExample extends DbEnv
history = history == 0 ? HISTORY : history;
if (verbose)
- System.out.println((long)accounts + " Accounts "
- + String.valueOf(branches) + " Branches "
- + String.valueOf(tellers) + " Tellers "
+ System.out.println((long)accounts + " Accounts, "
+ + String.valueOf(branches) + " Branches, "
+ + String.valueOf(tellers) + " Tellers, "
+ String.valueOf(history) + " History");
if (iflag) {
diff --git a/bdb/libdb_java/checkapi.prl b/bdb/libdb_java/checkapi.prl
index 25882c056cc..a27b8ffd107 100644
--- a/bdb/libdb_java/checkapi.prl
+++ b/bdb/libdb_java/checkapi.prl
@@ -30,7 +30,7 @@ nextline:
$def = "";
}
if ($in_def == 1) {
- $def .= $_;
+ $def .= " $_";
}
if (/\)/) {
$line = "";
@@ -42,6 +42,8 @@ nextline:
# remove comments
s@/\*[^*]*\*/@@g;
s@[ ][ ]*@ @g;
+ s@^[ ]@@g;
+ s@[ ]$@@g;
s@JNIEnv *\* *@JNIEnv @g;
s@([,*()]) @\1@g;
s@ ([,*()])@\1@g;
diff --git a/bdb/libdb_java/com_sleepycat_db_Db.h b/bdb/libdb_java/com_sleepycat_db_Db.h
index d9e1f1cbbc7..0787ae87aed 100644
--- a/bdb/libdb_java/com_sleepycat_db_Db.h
+++ b/bdb/libdb_java/com_sleepycat_db_Db.h
@@ -3,10 +3,211 @@
/* Header for class com_sleepycat_db_Db */
#ifndef _Included_com_sleepycat_db_Db
-#define _Included_com_sleepycat_db_Db
+#define _Included_com_sleepycat_db_Db
#ifdef __cplusplus
extern "C" {
#endif
+#undef com_sleepycat_db_Db_DB_BTREE
+#define com_sleepycat_db_Db_DB_BTREE 1L
+#undef com_sleepycat_db_Db_DB_DONOTINDEX
+#define com_sleepycat_db_Db_DB_DONOTINDEX -30999L
+#undef com_sleepycat_db_Db_DB_HASH
+#define com_sleepycat_db_Db_DB_HASH 2L
+#undef com_sleepycat_db_Db_DB_KEYEMPTY
+#define com_sleepycat_db_Db_DB_KEYEMPTY -30998L
+#undef com_sleepycat_db_Db_DB_KEYEXIST
+#define com_sleepycat_db_Db_DB_KEYEXIST -30997L
+#undef com_sleepycat_db_Db_DB_LOCK_DEADLOCK
+#define com_sleepycat_db_Db_DB_LOCK_DEADLOCK -30996L
+#undef com_sleepycat_db_Db_DB_LOCK_NOTGRANTED
+#define com_sleepycat_db_Db_DB_LOCK_NOTGRANTED -30995L
+#undef com_sleepycat_db_Db_DB_NOSERVER
+#define com_sleepycat_db_Db_DB_NOSERVER -30994L
+#undef com_sleepycat_db_Db_DB_NOSERVER_HOME
+#define com_sleepycat_db_Db_DB_NOSERVER_HOME -30993L
+#undef com_sleepycat_db_Db_DB_NOSERVER_ID
+#define com_sleepycat_db_Db_DB_NOSERVER_ID -30992L
+#undef com_sleepycat_db_Db_DB_NOTFOUND
+#define com_sleepycat_db_Db_DB_NOTFOUND -30991L
+#undef com_sleepycat_db_Db_DB_OLD_VERSION
+#define com_sleepycat_db_Db_DB_OLD_VERSION -30990L
+#undef com_sleepycat_db_Db_DB_PAGE_NOTFOUND
+#define com_sleepycat_db_Db_DB_PAGE_NOTFOUND -30989L
+#undef com_sleepycat_db_Db_DB_QUEUE
+#define com_sleepycat_db_Db_DB_QUEUE 4L
+#undef com_sleepycat_db_Db_DB_RECNO
+#define com_sleepycat_db_Db_DB_RECNO 3L
+#undef com_sleepycat_db_Db_DB_REP_DUPMASTER
+#define com_sleepycat_db_Db_DB_REP_DUPMASTER -30988L
+#undef com_sleepycat_db_Db_DB_REP_HOLDELECTION
+#define com_sleepycat_db_Db_DB_REP_HOLDELECTION -30987L
+#undef com_sleepycat_db_Db_DB_REP_NEWMASTER
+#define com_sleepycat_db_Db_DB_REP_NEWMASTER -30986L
+#undef com_sleepycat_db_Db_DB_REP_NEWSITE
+#define com_sleepycat_db_Db_DB_REP_NEWSITE -30985L
+#undef com_sleepycat_db_Db_DB_REP_OUTDATED
+#define com_sleepycat_db_Db_DB_REP_OUTDATED -30984L
+#undef com_sleepycat_db_Db_DB_RUNRECOVERY
+#define com_sleepycat_db_Db_DB_RUNRECOVERY -30982L
+#undef com_sleepycat_db_Db_DB_SECONDARY_BAD
+#define com_sleepycat_db_Db_DB_SECONDARY_BAD -30981L
+#undef com_sleepycat_db_Db_DB_TXN_ABORT
+#define com_sleepycat_db_Db_DB_TXN_ABORT 0L
+#undef com_sleepycat_db_Db_DB_TXN_APPLY
+#define com_sleepycat_db_Db_DB_TXN_APPLY 1L
+#undef com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_BACKWARD_ROLL 3L
+#undef com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL
+#define com_sleepycat_db_Db_DB_TXN_FORWARD_ROLL 4L
+#undef com_sleepycat_db_Db_DB_TXN_PRINT
+#define com_sleepycat_db_Db_DB_TXN_PRINT 8L
+#undef com_sleepycat_db_Db_DB_UNKNOWN
+#define com_sleepycat_db_Db_DB_UNKNOWN 5L
+#undef com_sleepycat_db_Db_DB_VERIFY_BAD
+#define com_sleepycat_db_Db_DB_VERIFY_BAD -30980L
+/* Inaccessible static: DB_AFTER */
+/* Inaccessible static: DB_AGGRESSIVE */
+/* Inaccessible static: DB_APPEND */
+/* Inaccessible static: DB_ARCH_ABS */
+/* Inaccessible static: DB_ARCH_DATA */
+/* Inaccessible static: DB_ARCH_LOG */
+/* Inaccessible static: DB_AUTO_COMMIT */
+/* Inaccessible static: DB_BEFORE */
+/* Inaccessible static: DB_CACHED_COUNTS */
+/* Inaccessible static: DB_CDB_ALLDB */
+/* Inaccessible static: DB_CHKSUM_SHA1 */
+/* Inaccessible static: DB_CLIENT */
+/* Inaccessible static: DB_CONSUME */
+/* Inaccessible static: DB_CONSUME_WAIT */
+/* Inaccessible static: DB_CREATE */
+/* Inaccessible static: DB_CURRENT */
+/* Inaccessible static: DB_CXX_NO_EXCEPTIONS */
+/* Inaccessible static: DB_DBT_MALLOC */
+/* Inaccessible static: DB_DBT_PARTIAL */
+/* Inaccessible static: DB_DBT_REALLOC */
+/* Inaccessible static: DB_DBT_USERMEM */
+/* Inaccessible static: DB_DIRECT */
+/* Inaccessible static: DB_DIRECT_DB */
+/* Inaccessible static: DB_DIRECT_LOG */
+/* Inaccessible static: DB_DIRTY_READ */
+/* Inaccessible static: DB_DUP */
+/* Inaccessible static: DB_DUPSORT */
+/* Inaccessible static: DB_EID_BROADCAST */
+/* Inaccessible static: DB_EID_INVALID */
+/* Inaccessible static: DB_ENCRYPT */
+/* Inaccessible static: DB_ENCRYPT_AES */
+/* Inaccessible static: DB_EXCL */
+/* Inaccessible static: DB_FAST_STAT */
+/* Inaccessible static: DB_FIRST */
+/* Inaccessible static: DB_FLUSH */
+/* Inaccessible static: DB_FORCE */
+/* Inaccessible static: DB_GET_BOTH */
+/* Inaccessible static: DB_GET_BOTH_RANGE */
+/* Inaccessible static: DB_GET_RECNO */
+/* Inaccessible static: DB_INIT_CDB */
+/* Inaccessible static: DB_INIT_LOCK */
+/* Inaccessible static: DB_INIT_LOG */
+/* Inaccessible static: DB_INIT_MPOOL */
+/* Inaccessible static: DB_INIT_TXN */
+/* Inaccessible static: DB_JOINENV */
+/* Inaccessible static: DB_JOIN_ITEM */
+/* Inaccessible static: DB_JOIN_NOSORT */
+/* Inaccessible static: DB_KEYFIRST */
+/* Inaccessible static: DB_KEYLAST */
+/* Inaccessible static: DB_LAST */
+/* Inaccessible static: DB_LOCKDOWN */
+/* Inaccessible static: DB_LOCK_DEFAULT */
+/* Inaccessible static: DB_LOCK_EXPIRE */
+/* Inaccessible static: DB_LOCK_GET */
+/* Inaccessible static: DB_LOCK_GET_TIMEOUT */
+/* Inaccessible static: DB_LOCK_IREAD */
+/* Inaccessible static: DB_LOCK_IWR */
+/* Inaccessible static: DB_LOCK_IWRITE */
+/* Inaccessible static: DB_LOCK_MAXLOCKS */
+/* Inaccessible static: DB_LOCK_MINLOCKS */
+/* Inaccessible static: DB_LOCK_MINWRITE */
+/* Inaccessible static: DB_LOCK_NOWAIT */
+/* Inaccessible static: DB_LOCK_OLDEST */
+/* Inaccessible static: DB_LOCK_PUT */
+/* Inaccessible static: DB_LOCK_PUT_ALL */
+/* Inaccessible static: DB_LOCK_PUT_OBJ */
+/* Inaccessible static: DB_LOCK_RANDOM */
+/* Inaccessible static: DB_LOCK_READ */
+/* Inaccessible static: DB_LOCK_TIMEOUT */
+/* Inaccessible static: DB_LOCK_WRITE */
+/* Inaccessible static: DB_LOCK_YOUNGEST */
+/* Inaccessible static: DB_MULTIPLE */
+/* Inaccessible static: DB_MULTIPLE_KEY */
+/* Inaccessible static: DB_NEXT */
+/* Inaccessible static: DB_NEXT_DUP */
+/* Inaccessible static: DB_NEXT_NODUP */
+/* Inaccessible static: DB_NODUPDATA */
+/* Inaccessible static: DB_NOLOCKING */
+/* Inaccessible static: DB_NOMMAP */
+/* Inaccessible static: DB_NOORDERCHK */
+/* Inaccessible static: DB_NOOVERWRITE */
+/* Inaccessible static: DB_NOPANIC */
+/* Inaccessible static: DB_NOSYNC */
+/* Inaccessible static: DB_ODDFILESIZE */
+/* Inaccessible static: DB_ORDERCHKONLY */
+/* Inaccessible static: DB_OVERWRITE */
+/* Inaccessible static: DB_PANIC_ENVIRONMENT */
+/* Inaccessible static: DB_POSITION */
+/* Inaccessible static: DB_PREV */
+/* Inaccessible static: DB_PREV_NODUP */
+/* Inaccessible static: DB_PRINTABLE */
+/* Inaccessible static: DB_PRIORITY_DEFAULT */
+/* Inaccessible static: DB_PRIORITY_HIGH */
+/* Inaccessible static: DB_PRIORITY_LOW */
+/* Inaccessible static: DB_PRIORITY_VERY_HIGH */
+/* Inaccessible static: DB_PRIORITY_VERY_LOW */
+/* Inaccessible static: DB_PRIVATE */
+/* Inaccessible static: DB_RDONLY */
+/* Inaccessible static: DB_RECNUM */
+/* Inaccessible static: DB_RECORDCOUNT */
+/* Inaccessible static: DB_RECOVER */
+/* Inaccessible static: DB_RECOVER_FATAL */
+/* Inaccessible static: DB_REGION_INIT */
+/* Inaccessible static: DB_RENUMBER */
+/* Inaccessible static: DB_REP_CLIENT */
+/* Inaccessible static: DB_REP_LOGSONLY */
+/* Inaccessible static: DB_REP_MASTER */
+/* Inaccessible static: DB_REP_PERMANENT */
+/* Inaccessible static: DB_REP_UNAVAIL */
+/* Inaccessible static: DB_REVSPLITOFF */
+/* Inaccessible static: DB_RMW */
+/* Inaccessible static: DB_SALVAGE */
+/* Inaccessible static: DB_SET */
+/* Inaccessible static: DB_SET_LOCK_TIMEOUT */
+/* Inaccessible static: DB_SET_RANGE */
+/* Inaccessible static: DB_SET_RECNO */
+/* Inaccessible static: DB_SET_TXN_TIMEOUT */
+/* Inaccessible static: DB_SNAPSHOT */
+/* Inaccessible static: DB_STAT_CLEAR */
+/* Inaccessible static: DB_SYSTEM_MEM */
+/* Inaccessible static: DB_THREAD */
+/* Inaccessible static: DB_TRUNCATE */
+/* Inaccessible static: DB_TXN_NOSYNC */
+/* Inaccessible static: DB_TXN_NOWAIT */
+/* Inaccessible static: DB_TXN_SYNC */
+/* Inaccessible static: DB_TXN_WRITE_NOSYNC */
+/* Inaccessible static: DB_UPGRADE */
+/* Inaccessible static: DB_USE_ENVIRON */
+/* Inaccessible static: DB_USE_ENVIRON_ROOT */
+/* Inaccessible static: DB_VERB_CHKPOINT */
+/* Inaccessible static: DB_VERB_DEADLOCK */
+/* Inaccessible static: DB_VERB_RECOVERY */
+/* Inaccessible static: DB_VERB_REPLICATION */
+/* Inaccessible static: DB_VERB_WAITSFOR */
+/* Inaccessible static: DB_VERIFY */
+/* Inaccessible static: DB_VERSION_MAJOR */
+/* Inaccessible static: DB_VERSION_MINOR */
+/* Inaccessible static: DB_VERSION_PATCH */
+/* Inaccessible static: DB_WRITECURSOR */
+/* Inaccessible static: DB_XA_CREATE */
+/* Inaccessible static: DB_XIDDATASIZE */
+/* Inaccessible static: DB_YIELDCPU */
+/* Inaccessible static: already_loaded_ */
/*
* Class: com_sleepycat_db_Db
* Method: _init
@@ -25,6 +226,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1notify_1internal
/*
* Class: com_sleepycat_db_Db
+ * Method: _associate
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Db;Lcom/sleepycat/db/DbSecondaryKeyCreate;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
* Method: _close
* Signature: (I)I
*/
@@ -122,10 +331,18 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
/*
* Class: com_sleepycat_db_Db
* Method: _open
- * Signature: (Ljava/lang/String;Ljava/lang/String;III)V
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;III)V
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
- (JNIEnv *, jobject, jstring, jstring, jint, jint, jint);
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/DbTxn;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jobject, jint);
/*
* Class: com_sleepycat_db_Db
@@ -137,18 +354,18 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
/*
* Class: com_sleepycat_db_Db
- * Method: rename
+ * Method: _rename
* Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_rename
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
(JNIEnv *, jobject, jstring, jstring, jstring, jint);
/*
* Class: com_sleepycat_db_Db
- * Method: remove
+ * Method: _remove
* Signature: (Ljava/lang/String;Ljava/lang/String;I)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_remove
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
(JNIEnv *, jobject, jstring, jstring, jint);
/*
@@ -201,6 +418,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cachesize
/*
* Class: com_sleepycat_db_Db
+ * Method: set_cache_priority
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1cache_1priority
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
* Method: dup_compare_changed
* Signature: (Lcom/sleepycat/db/DbDupCompare;)V
*/
@@ -209,6 +434,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
/*
* Class: com_sleepycat_db_Db
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
* Method: feedback_changed
* Signature: (Lcom/sleepycat/db/DbFeedback;)V
*/
@@ -225,6 +458,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1flags
/*
* Class: com_sleepycat_db_Db
+ * Method: get_flags_raw
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1flags_1raw
+ (JNIEnv *, jobject);
+
+/*
+ * Class: com_sleepycat_db_Db
* Method: set_h_ffactor
* Signature: (I)V
*/
@@ -314,13 +555,21 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
/*
* Class: com_sleepycat_db_Db
* Method: sync
- * Signature: (I)I
+ * Signature: (I)V
*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_sync
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_sync
(JNIEnv *, jobject, jint);
/*
* Class: com_sleepycat_db_Db
+ * Method: truncate
+ * Signature: (Lcom/sleepycat/db/DbTxn;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Db
* Method: upgrade
* Signature: (Ljava/lang/String;I)V
*/
diff --git a/bdb/libdb_java/com_sleepycat_db_DbEnv.h b/bdb/libdb_java/com_sleepycat_db_DbEnv.h
index 4168ea9abe2..f239dfc7593 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbEnv.h
+++ b/bdb/libdb_java/com_sleepycat_db_DbEnv.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_DbEnv */
#ifndef _Included_com_sleepycat_db_DbEnv
-#define _Included_com_sleepycat_db_DbEnv
+#define _Included_com_sleepycat_db_DbEnv
#ifdef __cplusplus
extern "C" {
#endif
@@ -17,6 +17,22 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: dbremove
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *, jobject, jobject, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: dbrename
+ * Signature: (Lcom/sleepycat/db/DbTxn;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *, jobject, jobject, jstring, jstring, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: err
* Signature: (ILjava/lang/String;)V
*/
@@ -57,6 +73,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: _init_using_xa
+ * Signature: (Lcom/sleepycat/db/DbErrcall;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1xa
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: _notify_db_close
* Signature: ()V
*/
@@ -89,6 +113,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1cachesize
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: set_encrypt
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *, jobject, jstring, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: _set_errcall
* Signature: (Lcom/sleepycat/db/DbErrcall;)V
*/
@@ -106,7 +138,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
/*
* Class: com_sleepycat_db_DbEnv
* Method: feedback_changed
- * Signature: (Lcom/sleepycat/db/DbFeedback;)V
+ * Signature: (Lcom/sleepycat/db/DbEnvFeedback;)V
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
(JNIEnv *, jobject, jobject);
@@ -114,10 +146,10 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
/*
* Class: com_sleepycat_db_DbEnv
* Method: set_verbose
- * Signature: (II)V
+ * Signature: (IZ)V
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1verbose
- (JNIEnv *, jobject, jint, jint);
+ (JNIEnv *, jobject, jint, jboolean);
/*
* Class: com_sleepycat_db_DbEnv
@@ -153,6 +185,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1max
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: set_lg_regionmax
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lg_1regionmax
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: set_lk_conflicts
* Signature: ([[B)V
*/
@@ -209,59 +249,35 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
/*
* Class: com_sleepycat_db_DbEnv
- * Method: set_mutexlocks
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1mutexlocks
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_DbEnv
- * Method: set_pageyield
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1pageyield
- (JNIEnv *, jclass, jint);
-
-/*
- * Class: com_sleepycat_db_DbEnv
- * Method: set_panicstate
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1panicstate
- (JNIEnv *, jclass, jint);
-
-/*
- * Class: com_sleepycat_db_DbEnv
- * Method: recovery_init_changed
- * Signature: (Lcom/sleepycat/db/DbRecoveryInit;)V
+ * Method: set_flags
+ * Signature: (IZ)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_recovery_1init_1changed
- (JNIEnv *, jobject, jobject);
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
+ (JNIEnv *, jobject, jint, jboolean);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: set_region_init
- * Signature: (I)V
+ * Method: set_rep_limit
+ * Signature: (II)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1region_1init
- (JNIEnv *, jclass, jint);
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *, jobject, jint, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: set_flags
- * Signature: (II)V
+ * Method: rep_transport_changed
+ * Signature: (ILcom/sleepycat/db/DbRepTransport;)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1flags
- (JNIEnv *, jobject, jint, jint);
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *, jobject, jint, jobject);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: set_server
- * Signature: (Ljava/lang/String;JJI)V
+ * Method: set_rpc_server
+ * Signature: (Lcom/sleepycat/db/DbClient;Ljava/lang/String;JJI)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1server
- (JNIEnv *, jobject, jstring, jlong, jlong, jint);
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *, jobject, jobject, jstring, jlong, jlong, jint);
/*
* Class: com_sleepycat_db_DbEnv
@@ -277,7 +293,15 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1shm_1key
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tas_1spins
- (JNIEnv *, jclass, jint);
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: set_timeout
+ * Signature: (JI)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
/*
* Class: com_sleepycat_db_DbEnv
@@ -289,10 +313,10 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1tmp_1dir
/*
* Class: com_sleepycat_db_DbEnv
- * Method: tx_recover_changed
- * Signature: (Lcom/sleepycat/db/DbTxnRecover;)V
+ * Method: app_dispatch_changed
+ * Signature: (Lcom/sleepycat/db/DbAppDispatch;)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_tx_1recover_1changed
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
(JNIEnv *, jobject, jobject);
/*
@@ -369,6 +393,14 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: lock_put
+ * Signature: (Lcom/sleepycat/db/DbLock;)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: lock_id
* Signature: ()I
*/
@@ -377,11 +409,27 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: lock_id_free
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1id_1free
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: lock_stat
- * Signature: ()Lcom/sleepycat/db/DbLockStat;
+ * Signature: (I)Lcom/sleepycat/db/DbLockStat;
*/
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
- (JNIEnv *, jobject);
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: lock_vec
+ * Signature: (II[Lcom/sleepycat/db/DbLockRequest;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *, jobject, jint, jint, jobjectArray, jint, jint);
/*
* Class: com_sleepycat_db_DbEnv
@@ -401,6 +449,14 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: log_cursor
+ * Signature: (I)Lcom/sleepycat/db/DbLogc;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: log_file
* Signature: (Lcom/sleepycat/db/DbLsn;)Ljava/lang/String;
*/
@@ -417,14 +473,6 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
/*
* Class: com_sleepycat_db_DbEnv
- * Method: log_get
- * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1get
- (JNIEnv *, jobject, jobject, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_DbEnv
* Method: log_put
* Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)V
*/
@@ -434,49 +482,65 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
/*
* Class: com_sleepycat_db_DbEnv
* Method: log_stat
- * Signature: ()Lcom/sleepycat/db/DbLogStat;
+ * Signature: (I)Lcom/sleepycat/db/DbLogStat;
*/
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
- (JNIEnv *, jobject);
+ (JNIEnv *, jobject, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: log_register
- * Signature: (Lcom/sleepycat/db/Db;Ljava/lang/String;)V
+ * Method: memp_stat
+ * Signature: (I)Lcom/sleepycat/db/DbMpoolStat;
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1register
- (JNIEnv *, jobject, jobject, jstring);
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
+ (JNIEnv *, jobject, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: log_unregister
- * Signature: (Lcom/sleepycat/db/Db;)V
+ * Method: memp_fstat
+ * Signature: (I)[Lcom/sleepycat/db/DbMpoolFStat;
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1unregister
- (JNIEnv *, jobject, jobject);
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
+ (JNIEnv *, jobject, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: memp_stat
- * Signature: ()Lcom/sleepycat/db/DbMpoolStat;
+ * Method: memp_trickle
+ * Signature: (I)I
*/
-JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
- (JNIEnv *, jobject);
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+ (JNIEnv *, jobject, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: memp_fstat
- * Signature: ()[Lcom/sleepycat/db/DbMpoolFStat;
+ * Method: rep_elect
+ * Signature: (III)I
*/
-JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
- (JNIEnv *, jobject);
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *, jobject, jint, jint, jint);
/*
* Class: com_sleepycat_db_DbEnv
- * Method: memp_trickle
- * Signature: (I)I
+ * Method: rep_process_message
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/DbEnv$RepProcessMessage;)I
*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *, jobject, jobject, jobject, jobject);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_start
+ * Signature: (Lcom/sleepycat/db/Dbt;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
+ * Method: rep_stat
+ * Signature: (I)Lcom/sleepycat/db/DbRepStat;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
(JNIEnv *, jobject, jint);
/*
@@ -490,18 +554,26 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
/*
* Class: com_sleepycat_db_DbEnv
* Method: txn_checkpoint
- * Signature: (III)I
+ * Signature: (III)V
*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
(JNIEnv *, jobject, jint, jint, jint);
/*
* Class: com_sleepycat_db_DbEnv
+ * Method: txn_recover
+ * Signature: (II)[Lcom/sleepycat/db/DbPreplist;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_DbEnv
* Method: txn_stat
- * Signature: ()Lcom/sleepycat/db/DbTxnStat;
+ * Signature: (I)Lcom/sleepycat/db/DbTxnStat;
*/
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
- (JNIEnv *, jobject);
+ (JNIEnv *, jobject, jint);
#ifdef __cplusplus
}
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLock.h b/bdb/libdb_java/com_sleepycat_db_DbLock.h
index 8a1c135bb3b..9f3d77d44bc 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbLock.h
+++ b/bdb/libdb_java/com_sleepycat_db_DbLock.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_DbLock */
#ifndef _Included_com_sleepycat_db_DbLock
-#define _Included_com_sleepycat_db_DbLock
+#define _Included_com_sleepycat_db_DbLock
#ifdef __cplusplus
extern "C" {
#endif
@@ -15,14 +15,6 @@ extern "C" {
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
(JNIEnv *, jobject);
-/*
- * Class: com_sleepycat_db_DbLock
- * Method: put
- * Signature: (Lcom/sleepycat/db/DbEnv;)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_put
- (JNIEnv *, jobject, jobject);
-
#ifdef __cplusplus
}
#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLogc.h b/bdb/libdb_java/com_sleepycat_db_DbLogc.h
new file mode 100644
index 00000000000..8d029c761ba
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbLogc.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLogc */
+
+#ifndef _Included_com_sleepycat_db_DbLogc
+#define _Included_com_sleepycat_db_DbLogc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: close
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbLsn.h b/bdb/libdb_java/com_sleepycat_db_DbLsn.h
index 093eaf372b5..080fa0a8758 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbLsn.h
+++ b/bdb/libdb_java/com_sleepycat_db_DbLsn.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_DbLsn */
#ifndef _Included_com_sleepycat_db_DbLsn
-#define _Included_com_sleepycat_db_DbLsn
+#define _Included_com_sleepycat_db_DbLsn
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_DbTxn.h b/bdb/libdb_java/com_sleepycat_db_DbTxn.h
index 4dcf47405c0..59641c041a4 100644
--- a/bdb/libdb_java/com_sleepycat_db_DbTxn.h
+++ b/bdb/libdb_java/com_sleepycat_db_DbTxn.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_DbTxn */
#ifndef _Included_com_sleepycat_db_DbTxn
-#define _Included_com_sleepycat_db_DbTxn
+#define _Included_com_sleepycat_db_DbTxn
#ifdef __cplusplus
extern "C" {
#endif
@@ -25,6 +25,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
/*
* Class: com_sleepycat_db_DbTxn
+ * Method: discard
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_discard
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbTxn
* Method: id
* Signature: ()I
*/
@@ -34,18 +42,18 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
/*
* Class: com_sleepycat_db_DbTxn
* Method: prepare
- * Signature: ()V
+ * Signature: ([B)V
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
- (JNIEnv *, jobject);
+ (JNIEnv *, jobject, jbyteArray);
/*
* Class: com_sleepycat_db_DbTxn
- * Method: finalize
- * Signature: ()V
+ * Method: set_timeout
+ * Signature: (JI)V
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_finalize
- (JNIEnv *, jobject);
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_set_1timeout
+ (JNIEnv *, jobject, jlong, jint);
#ifdef __cplusplus
}
diff --git a/bdb/libdb_java/com_sleepycat_db_DbUtil.h b/bdb/libdb_java/com_sleepycat_db_DbUtil.h
new file mode 100644
index 00000000000..7f8495590c0
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_DbUtil.h
@@ -0,0 +1,22 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbUtil */
+
+#ifndef _Included_com_sleepycat_db_DbUtil
+#define _Included_com_sleepycat_db_DbUtil
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: big_endian */
+/*
+ * Class: com_sleepycat_db_DbUtil
+ * Method: is_big_endian
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_DbUtil_is_1big_1endian
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbc.h b/bdb/libdb_java/com_sleepycat_db_Dbc.h
index e62679c6f66..447ab234844 100644
--- a/bdb/libdb_java/com_sleepycat_db_Dbc.h
+++ b/bdb/libdb_java/com_sleepycat_db_Dbc.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_Dbc */
#ifndef _Included_com_sleepycat_db_Dbc
-#define _Included_com_sleepycat_db_Dbc
+#define _Included_com_sleepycat_db_Dbc
#ifdef __cplusplus
extern "C" {
#endif
@@ -49,6 +49,14 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
/*
* Class: com_sleepycat_db_Dbc
+ * Method: pget
+ * Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *, jobject, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_Dbc
* Method: put
* Signature: (Lcom/sleepycat/db/Dbt;Lcom/sleepycat/db/Dbt;I)I
*/
diff --git a/bdb/libdb_java/com_sleepycat_db_Dbt.h b/bdb/libdb_java/com_sleepycat_db_Dbt.h
index cdb58c682c9..c09bd8e6131 100644
--- a/bdb/libdb_java/com_sleepycat_db_Dbt.h
+++ b/bdb/libdb_java/com_sleepycat_db_Dbt.h
@@ -3,7 +3,7 @@
/* Header for class com_sleepycat_db_Dbt */
#ifndef _Included_com_sleepycat_db_Dbt
-#define _Included_com_sleepycat_db_Dbt
+#define _Included_com_sleepycat_db_Dbt
#ifdef __cplusplus
extern "C" {
#endif
@@ -17,138 +17,18 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
/*
* Class: com_sleepycat_db_Dbt
- * Method: get_data
- * Signature: ()[B
- */
-JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_get_1data
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: internal_set_data
- * Signature: ([B)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_internal_1set_1data
- (JNIEnv *, jobject, jbyteArray);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_offset
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1offset
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_offset
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1offset
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_size
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1size
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_size
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1size
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_ulen
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1ulen
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_ulen
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1ulen
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_dlen
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1dlen
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_dlen
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1dlen
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_doff
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1doff
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_doff
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1doff
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_flags
- * Signature: ()I
- */
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1flags
- (JNIEnv *, jobject);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_flags
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1flags
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: set_recno_key_data
- * Signature: (I)V
- */
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1recno_1key_1data
- (JNIEnv *, jobject, jint);
-
-/*
- * Class: com_sleepycat_db_Dbt
- * Method: get_recno_key_data
- * Signature: ()I
+ * Method: init
+ * Signature: ()V
*/
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1recno_1key_1data
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
(JNIEnv *, jobject);
/*
* Class: com_sleepycat_db_Dbt
- * Method: init
- * Signature: ()V
+ * Method: create_data
+ * Signature: ()[B
*/
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
(JNIEnv *, jobject);
#ifdef __cplusplus
diff --git a/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h b/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
new file mode 100644
index 00000000000..00e9e2e6893
--- /dev/null
+++ b/bdb/libdb_java/com_sleepycat_db_xa_DbXAResource.h
@@ -0,0 +1,95 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_xa_DbXAResource */
+
+#ifndef _Included_com_sleepycat_db_xa_DbXAResource
+#define _Included_com_sleepycat_db_xa_DbXAResource
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Inaccessible static: unique_rmid */
+/* Inaccessible static: class_00024com_00024sleepycat_00024db_00024xa_00024DbXAResource */
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _init
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _close
+ * Signature: (Ljava/lang/String;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *, jobject, jstring, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _commit
+ * Signature: (Ljavax/transaction/xa/Xid;IZ)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *, jobject, jobject, jint, jboolean);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _end
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _forget
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _prepare
+ * Signature: (Ljavax/transaction/xa/Xid;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _recover
+ * Signature: (II)[Ljavax/transaction/xa/Xid;
+ */
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _rollback
+ * Signature: (Ljavax/transaction/xa/Xid;I)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: _start
+ * Signature: (Ljavax/transaction/xa/Xid;II)V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *, jobject, jobject, jint, jint);
+
+/*
+ * Class: com_sleepycat_db_xa_DbXAResource
+ * Method: xa_attach
+ * Signature: (Ljavax/transaction/xa/Xid;Ljava/lang/Integer;)Lcom/sleepycat/db/xa/DbXAResource$DbAttach;
+ */
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *, jclass, jobject, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/bdb/libdb_java/java_Db.c b/bdb/libdb_java/java_Db.c
index 5b01e5068d6..465c40f7d5a 100644
--- a/bdb/libdb_java/java_Db.c
+++ b/bdb/libdb_java/java_Db.c
@@ -1,24 +1,25 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_Db.c,v 11.34 2000/11/30 00:58:38 ubell Exp $";
+static const char revid[] = "$Id: java_Db.c,v 11.80 2002/08/29 14:22:23 margo Exp $";
#endif /* not lint */
#include <jni.h>
#include <stdlib.h>
#include <string.h>
-#include "db.h"
#include "db_int.h"
-#include "db_page.h"
-#include "db_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc_auto/db_ext.h"
#include "java_util.h"
+#include "java_stat_auto.h"
#include "com_sleepycat_db_Db.h"
/* This struct is used in Db.verify and its callback */
@@ -30,25 +31,29 @@ struct verify_callback_struct {
jmethodID writemid;
};
-JAVADB_WO_ACCESS_METHOD(Db, jint, flags, DB, flags)
-JAVADB_WO_ACCESS_METHOD(Db, jint, h_1ffactor, DB, h_ffactor)
-JAVADB_WO_ACCESS_METHOD(Db, jint, h_1nelem, DB, h_nelem)
-JAVADB_WO_ACCESS_METHOD(Db, jint, lorder, DB, lorder)
-JAVADB_WO_ACCESS_METHOD(Db, jint, re_1delim, DB, re_delim)
-JAVADB_WO_ACCESS_METHOD(Db, jint, re_1len, DB, re_len)
-JAVADB_WO_ACCESS_METHOD(Db, jint, re_1pad, DB, re_pad)
-JAVADB_WO_ACCESS_METHOD(Db, jint, q_1extentsize, DB, q_extentsize)
-JAVADB_WO_ACCESS_METHOD(Db, jint, bt_1maxkey, DB, bt_maxkey)
-JAVADB_WO_ACCESS_METHOD(Db, jint, bt_1minkey, DB, bt_minkey)
-
-/* This only gets called once ever, at the beginning of execution
+JAVADB_GET_FLD(Db, jint, flags_1raw, DB, flags)
+
+JAVADB_SET_METH(Db, jint, flags, DB, flags)
+JAVADB_SET_METH(Db, jint, h_1ffactor, DB, h_ffactor)
+JAVADB_SET_METH(Db, jint, h_1nelem, DB, h_nelem)
+JAVADB_SET_METH(Db, jint, lorder, DB, lorder)
+JAVADB_SET_METH(Db, jint, re_1delim, DB, re_delim)
+JAVADB_SET_METH(Db, jint, re_1len, DB, re_len)
+JAVADB_SET_METH(Db, jint, re_1pad, DB, re_pad)
+JAVADB_SET_METH(Db, jint, q_1extentsize, DB, q_extentsize)
+JAVADB_SET_METH(Db, jint, bt_1maxkey, DB, bt_maxkey)
+JAVADB_SET_METH(Db, jint, bt_1minkey, DB, bt_minkey)
+
+/*
+ * This only gets called once ever, at the beginning of execution
* and can be used to initialize unchanging methodIds, fieldIds, etc.
*/
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_one_1time_1init
(JNIEnv *jnienv, /*Db.class*/ jclass jthisclass)
{
- COMPQUIET(jnienv, NULL);
COMPQUIET(jthisclass, NULL);
+
+ one_time_init(jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
@@ -66,12 +71,31 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1init
err = db_create(&db, dbenv, flags);
if (verify_return(jnienv, err, 0)) {
set_private_dbobj(jnienv, name_DB, jthis, db);
- dbinfo = dbji_construct(jnienv, flags);
+ dbinfo = dbji_construct(jnienv, jthis, flags);
set_private_info(jnienv, name_DB, jthis, dbinfo);
- db->cj_internal = dbinfo;
+ db->api_internal = dbinfo;
}
}
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1associate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /* DbTxn */ jobject jtxn,
+ /*Db*/ jobject jsecondary, /*DbSecondaryKeyCreate*/ jobject jcallback,
+ jint flags)
+{
+ DB *db, *secondary;
+ DB_JAVAINFO *second_info;
+ DB_TXN *txn;
+
+ db = get_DB(jnienv, jthis);
+ txn = get_DB_TXN(jnienv, jtxn);
+ secondary = get_DB(jnienv, jsecondary);
+
+ second_info = (DB_JAVAINFO*)secondary->api_internal;
+ dbji_set_assoc_object(second_info, jnienv, db, txn, secondary,
+ jcallback, flags);
+
+}
+
JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
(JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
{
@@ -84,24 +108,22 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db__1close
if (!verify_non_null(jnienv, db))
return (0);
- JAVADB_API_BEGIN(db, jthis);
-
- /* Null out the private data to indicate the DB is invalid.
+ /*
+ * Null out the private data to indicate the DB is invalid.
* We do this in advance to help guard against multithreading
* issues.
*/
set_private_dbobj(jnienv, name_DB, jthis, 0);
err = db->close(db, flags);
- if (err != DB_INCOMPLETE)
- verify_return(jnienv, err, 0);
+ verify_return(jnienv, err, 0);
dbji_dealloc(dbinfo, jnienv);
- /* don't call JAVADB_API_END - db cannot be used */
return (err);
}
-/* We are being notified that the parent DbEnv has closed.
+/*
+ * We are being notified that the parent DbEnv has closed.
* Zero out the pointer to the DB, since it is no longer
* valid, to prevent mistakes. The user will get a null
* pointer exception if they try to use this Db again.
@@ -122,10 +144,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_append_1recno_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_append_recno_object(dbinfo, jnienv, db, jcallback);
- JAVADB_API_END(db);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
@@ -138,10 +158,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1compare_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_bt_compare_object(dbinfo, jnienv, db, jbtcompare);
- JAVADB_API_END(db);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
@@ -154,10 +172,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_bt_1prefix_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_bt_prefix_object(dbinfo, jnienv, db, jbtprefix);
- JAVADB_API_END(db);
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_cursor
@@ -182,26 +198,23 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_del
int err;
DB_TXN *dbtxnid;
DB *db;
- JDBT dbkey;
+ LOCKED_DBT lkey;
err = 0;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return (0);
- JAVADB_API_BEGIN(db, jthis);
dbtxnid = get_DB_TXN(jnienv, txnid);
- if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, key, inOp) != 0)
goto out;
- err = db->del(db, dbtxnid, &dbkey.dbt->dbt, dbflags);
- if (err != DB_NOTFOUND) {
+ err = db->del(db, dbtxnid, &lkey.javainfo->dbt, dbflags);
+ if (!DB_RETOK_DBDEL(err))
verify_return(jnienv, err, 0);
- }
out:
- jdbt_unlock(&dbkey, jnienv);
- JAVADB_API_END(db);
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
return (err);
}
@@ -215,49 +228,43 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_dup_1compare_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_dup_compare_object(dbinfo, jnienv, db, jdupcompare);
- JAVADB_API_END(db);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_err
(JNIEnv *jnienv, /*Db*/ jobject jthis, jint ecode, jstring msg)
{
DB *db;
- JSTR msg_string;
+ LOCKED_STRING ls_msg;
- if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
goto out;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
goto out;
- JAVADB_API_BEGIN(db, jthis);
- db->err(db, ecode, msg_string.string);
- JAVADB_API_END(db);
+ db->err(db, ecode, "%s", ls_msg.string);
out:
- jstr_unlock(&msg_string, jnienv);
+ locked_string_put(&ls_msg, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_errx
(JNIEnv *jnienv, /*Db*/ jobject jthis, jstring msg)
{
- JSTR msg_string;
+ LOCKED_STRING ls_msg;
DB *db = get_DB(jnienv, jthis);
- if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
goto out;
if (!verify_non_null(jnienv, db))
goto out;
- JAVADB_API_BEGIN(db, jthis);
- db->errx(db, msg_string.string);
- JAVADB_API_END(db);
+ db->errx(db, "%s", ls_msg.string);
out:
- jstr_unlock(&msg_string, jnienv);
+ locked_string_put(&ls_msg, jnienv);
}
JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
@@ -270,14 +277,31 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_fd
if (!verify_non_null(jnienv, db))
return (0);
- JAVADB_API_BEGIN(db, jthis);
err = db->fd(db, &return_value);
verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
return (return_value);
}
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_set_1encrypt
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring jpasswd, jint flags)
+{
+ int err;
+ DB *db;
+ LOCKED_STRING ls_passwd;
+
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = db->set_encrypt(db, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
+}
+
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
(JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
{
@@ -288,10 +312,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_feedback_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_feedback_object(dbinfo, jnienv, db, jfeedback);
- JAVADB_API_END(db);
}
JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
@@ -300,16 +322,16 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
{
int err, op_flags, retry;
DB *db;
+ DB_ENV *dbenv;
OpKind keyop, dataop;
DB_TXN *dbtxnid;
- JDBT dbkey, dbdata;
+ LOCKED_DBT lkey, ldata;
err = 0;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
goto out3;
-
- JAVADB_API_BEGIN(db, jthis);
+ dbenv = db->dbenv;
/* Depending on flags, the key may be input/output. */
keyop = inOp;
@@ -325,30 +347,34 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get
dbtxnid = get_DB_TXN(jnienv, txnid);
- if (jdbt_lock(&dbkey, jnienv, key, keyop) != 0)
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
goto out2;
- if (jdbt_lock(&dbdata, jnienv, data, dataop) != 0)
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
goto out1;
for (retry = 0; retry < 3; retry++) {
- err = db->get(db, dbtxnid, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+ err = db->get(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
- /* If we failed due to lack of memory in our DBT arrays,
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
* retry.
*/
if (err != ENOMEM)
break;
- if (!jdbt_realloc(&dbdata, jnienv))
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
break;
}
- if (err != DB_NOTFOUND) {
- verify_return(jnienv, err, 0);
- }
out1:
- jdbt_unlock(&dbdata, jnienv);
+ locked_dbt_put(&ldata, jnienv, dbenv);
out2:
- jdbt_unlock(&dbkey, jnienv);
+ locked_dbt_put(&lkey, jnienv, dbenv);
out3:
- JAVADB_API_END(db);
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
return (err);
}
@@ -362,10 +388,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_hash_1changed
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- dbinfo = (DB_JAVAINFO*)db->cj_internal;
+ dbinfo = (DB_JAVAINFO*)db->api_internal;
dbji_set_h_hash_object(dbinfo, jnienv, db, jhash);
- JAVADB_API_END(db);
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
@@ -373,15 +397,25 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
jint flags)
{
int err;
- DB *db = get_DB(jnienv, jthis);
- int count = (*jnienv)->GetArrayLength(jnienv, curslist);
- DBC **newlist = (DBC **)malloc(sizeof(DBC *) * (count+1));
+ DB *db;
+ int count;
+ DBC **newlist;
DBC *dbc;
int i;
+ int size;
+
+ db = get_DB(jnienv, jthis);
+ count = (*jnienv)->GetArrayLength(jnienv, curslist);
+ size = sizeof(DBC *) * (count+1);
+ if ((err = __os_malloc(db->dbenv, size, &newlist)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+ }
/* Convert the java array of Dbc's to a C array of DBC's. */
- for (i=0; i<count; i++) {
- jobject jobj = (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
+ for (i = 0; i < count; i++) {
+ jobject jobj =
+ (*jnienv)->GetObjectArrayElement(jnienv, curslist, i);
if (jobj == 0) {
/*
* An embedded null in the array is treated
@@ -398,39 +432,39 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_join
if (!verify_non_null(jnienv, db))
return (NULL);
- JAVADB_API_BEGIN(db, jthis);
err = db->join(db, newlist, &dbc, flags);
- free(newlist);
verify_return(jnienv, err, 0);
+ __os_free(db->dbenv, newlist);
- JAVADB_API_END(db);
return (get_Dbc(jnienv, dbc));
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
- (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
/*Dbt*/ jobject jkey, jobject /*DbKeyRange*/ range, jint flags)
{
int err;
- DB *db = get_DB(jnienv, jthis);
- DB_TXN *txn = get_DB_TXN(jnienv, jtxn);
- JDBT dbkey;
+ DB *db;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey;
DB_KEY_RANGE result;
jfieldID fid;
jclass krclass;
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, txnid);
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
if (!verify_non_null(jnienv, range))
return;
- if (jdbt_lock(&dbkey, jnienv, jkey, inOp) != 0)
+ if (locked_dbt_get(&lkey, jnienv, db->dbenv, jkey, inOp) != 0)
goto out;
- err = db->key_range(db, txn, &dbkey.dbt->dbt, &result, flags);
+ err = db->key_range(db, dbtxnid, &lkey.javainfo->dbt, &result, flags);
if (verify_return(jnienv, err, 0)) {
/* fill in the values of the DbKeyRange structure */
- krclass = get_class(jnienv, "DbKeyRange");
+ if ((krclass = get_class(jnienv, "DbKeyRange")) == NULL)
+ return; /* An exception has been posted. */
fid = (*jnienv)->GetFieldID(jnienv, krclass, "less", "D");
(*jnienv)->SetDoubleField(jnienv, range, fid, result.less);
fid = (*jnienv)->GetFieldID(jnienv, krclass, "equal", "D");
@@ -439,8 +473,77 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_key_1range
(*jnienv)->SetDoubleField(jnienv, range, fid, result.greater);
}
out:
- jdbt_unlock(&dbkey, jnienv);
- JAVADB_API_END(db);
+ locked_dbt_put(&lkey, jnienv, db->dbenv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_pget
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject rkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, op_flags, retry;
+ DB *db;
+ DB_ENV *dbenv;
+ OpKind keyop, rkeyop, dataop;
+ DB_TXN *dbtxnid;
+ LOCKED_DBT lkey, lrkey, ldata;
+
+ err = 0;
+ db = get_DB(jnienv, jthis);
+ if (!verify_non_null(jnienv, db))
+ goto out4;
+ dbenv = db->dbenv;
+
+ /* Depending on flags, the key may be input/output. */
+ keyop = inOp;
+ rkeyop = outOp;
+ dataop = outOp;
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH) {
+ keyop = inOutOp;
+ rkeyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lrkey, jnienv, dbenv, rkey, rkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+ for (retry = 0; retry < 3; retry++) {
+ err = db->pget(db, dbtxnid, &lkey.javainfo->dbt,
+ &lrkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lrkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lrkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ out4:
+ if (!DB_RETOK_DBGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lrkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
}
JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
@@ -449,137 +552,124 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_put
{
int err;
DB *db;
+ DB_ENV *dbenv;
DB_TXN *dbtxnid;
- JDBT dbkey, dbdata;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
err = 0;
db = get_DB(jnienv, jthis);
dbtxnid = get_DB_TXN(jnienv, txnid);
if (!verify_non_null(jnienv, db))
- return (0); /* error will be thrown, retval doesn't matter */
- JAVADB_API_BEGIN(db, jthis);
+ return (0); /* error will be thrown, retval doesn't matter */
+ dbenv = db->dbenv;
- if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ /*
+ * For DB_APPEND, the key may be output-only; for all other flags,
+ * it's input-only.
+ */
+ if ((flags & DB_OPFLAGS_MASK) == DB_APPEND)
+ keyop = outOp;
+ else
+ keyop = inOp;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
goto out2;
- if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
goto out1;
if (!verify_non_null(jnienv, db))
goto out1;
- err = db->put(db, dbtxnid, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
- if (err != DB_KEYEXIST) {
+
+ err = db->put(db,
+ dbtxnid, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBPUT(err))
verify_return(jnienv, err, 0);
- }
+
out1:
- jdbt_unlock(&dbdata, jnienv);
+ locked_dbt_put(&ldata, jnienv, dbenv);
out2:
- jdbt_unlock(&dbkey, jnienv);
- JAVADB_API_END(db);
+ locked_dbt_put(&lkey, jnienv, dbenv);
return (err);
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_rename
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file,
- jstring database, jstring newname, jint flags)
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1remove
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jint flags)
{
int err;
DB *db;
DB_JAVAINFO *dbinfo;
- JSTR j_file;
- JSTR j_database;
- JSTR j_newname;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
db = get_DB(jnienv, jthis);
dbinfo = get_DB_JAVAINFO(jnienv, jthis);
+
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- if (jstr_lock(&j_file, jnienv, file) != 0)
- goto out3;
- if (jstr_lock(&j_database, jnienv, database) != 0)
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
goto out2;
- if (jstr_lock(&j_newname, jnienv, newname) != 0)
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
goto out1;
+ err = db->remove(db, ls_file.string, ls_database.string, flags);
- err = db->rename(db, j_file.string, j_database.string,
- j_newname.string, flags);
-
- verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
- dbji_dealloc(dbinfo, jnienv);
set_private_dbobj(jnienv, name_DB, jthis, 0);
+ verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
out1:
- jstr_unlock(&j_newname, jnienv);
+ locked_string_put(&ls_database, jnienv);
out2:
- jstr_unlock(&j_database, jnienv);
- out3:
- jstr_unlock(&j_file, jnienv);
- /* don't call JAVADB_API_END - db cannot be used */
+ locked_string_put(&ls_file, jnienv);
+
+ dbji_dealloc(dbinfo, jnienv);
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_remove
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file,
- jstring database, jint flags)
+JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1rename
+ (JNIEnv *jnienv, /*Db*/ jobject jthis,
+ jstring file, jstring database, jstring newname, jint flags)
{
int err;
- DB *db = get_DB(jnienv, jthis);
- DB_JAVAINFO *dbinfo = get_DB_JAVAINFO(jnienv, jthis);
- JSTR j_file;
- JSTR j_database;
+ DB *db;
+ DB_JAVAINFO *dbinfo;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
+ LOCKED_STRING ls_newname;
+ db = get_DB(jnienv, jthis);
dbinfo = get_DB_JAVAINFO(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
- if (jstr_lock(&j_file, jnienv, file) != 0)
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
+ goto out3;
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
goto out2;
- if (jstr_lock(&j_database, jnienv, database) != 0)
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
goto out1;
- err = db->remove(db, j_file.string, j_database.string, flags);
- set_private_dbobj(jnienv, name_DB, jthis, 0);
+ err = db->rename(db, ls_file.string, ls_database.string,
+ ls_newname.string, flags);
+
verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
- dbji_dealloc(dbinfo, jnienv);
+ set_private_dbobj(jnienv, name_DB, jthis, 0);
out1:
- jstr_unlock(&j_database, jnienv);
+ locked_string_put(&ls_newname, jnienv);
out2:
- jstr_unlock(&j_file, jnienv);
- /* don't call JAVADB_API_END - db cannot be used */
-}
-
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_Db_set_1pagesize
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jlong value)
-{
- int err;
- DB *db;
+ locked_string_put(&ls_database, jnienv);
+ out3:
+ locked_string_put(&ls_file, jnienv);
- db = get_DB(jnienv, jthis);
- if (verify_non_null(jnienv, db)) {
- JAVADB_API_BEGIN(db, jthis);
- err = db->set_pagesize(db, (u_int32_t)value);
- verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
- }
+ dbji_dealloc(dbinfo, jnienv);
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_Db_set_1cachesize
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jint gbytes, jint bytes,
- jint ncaches)
-{
- int err;
- DB *db;
-
- db = get_DB(jnienv, jthis);
- if (verify_non_null(jnienv, db)) {
- JAVADB_API_BEGIN(db, jthis);
- err = db->set_cachesize(db, gbytes, bytes, ncaches);
- verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
- }
-}
+JAVADB_METHOD(Db_set_1pagesize, (JAVADB_ARGS, jlong pagesize), DB,
+ set_pagesize, (c_this, (u_int32_t)pagesize))
+JAVADB_METHOD(Db_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+JAVADB_METHOD(Db_set_1cache_1priority, (JAVADB_ARGS, jint priority), DB,
+ set_cache_priority, (c_this, (DB_CACHE_PRIORITY)priority))
JNIEXPORT void JNICALL
Java_com_sleepycat_db_Db_set_1re_1source
@@ -590,257 +680,185 @@ JNIEXPORT void JNICALL
db = get_DB(jnienv, jthis);
if (verify_non_null(jnienv, db)) {
- JAVADB_API_BEGIN(db, jthis);
/* XXX does the string from get_c_string ever get freed? */
if (re_source != NULL)
- err = db->set_re_source(db, get_c_string(jnienv, re_source));
+ err = db->set_re_source(db,
+ get_c_string(jnienv, re_source));
else
err = db->set_re_source(db, 0);
verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
}
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Db_stat
(JNIEnv *jnienv, jobject jthis, jint flags)
{
- int err;
- DB *db = get_DB(jnienv, jthis);
- jobject retval = NULL;
- jclass dbclass;
- void *statp = 0;
+ DB *db;
DB_BTREE_STAT *bstp;
DB_HASH_STAT *hstp;
DB_QUEUE_STAT *qstp;
+ DBTYPE dbtype;
+ jobject retval;
+ jclass dbclass;
+ size_t bytesize;
+ void *statp;
+ bytesize = 0;
+ retval = NULL;
+ statp = NULL;
+
+ db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return (NULL);
- JAVADB_API_BEGIN(db, jthis);
-
- err = db->stat(db, &statp, NULL, flags);
- if (verify_return(jnienv, err, 0)) {
- DBTYPE dbtype = db->get_type(db);
+ if (verify_return(jnienv, db->stat(db, &statp, flags), 0) &&
+ verify_return(jnienv, db->get_type(db, &dbtype), 0)) {
switch (dbtype) {
-
/* Btree and recno share the same stat structure */
case DB_BTREE:
case DB_RECNO:
bstp = (DB_BTREE_STAT *)statp;
+ bytesize = sizeof(DB_BTREE_STAT);
retval = create_default_object(jnienv,
name_DB_BTREE_STAT);
- dbclass = get_class(jnienv, name_DB_BTREE_STAT);
-
- /* Set the individual fields */
- set_int_field(jnienv, dbclass, retval,
- "bt_magic", bstp->bt_magic);
- set_int_field(jnienv, dbclass, retval,
- "bt_version", bstp->bt_version);
- set_int_field(jnienv, dbclass, retval,
- "bt_metaflags", bstp->bt_metaflags);
- set_int_field(jnienv, dbclass, retval,
- "bt_nkeys", bstp->bt_nkeys);
- set_int_field(jnienv, dbclass, retval,
- "bt_ndata", bstp->bt_ndata);
- set_int_field(jnienv, dbclass, retval,
- "bt_pagesize", bstp->bt_pagesize);
- set_int_field(jnienv, dbclass, retval,
- "bt_maxkey", bstp->bt_maxkey);
- set_int_field(jnienv, dbclass, retval,
- "bt_minkey", bstp->bt_minkey);
- set_int_field(jnienv, dbclass, retval,
- "bt_re_len", bstp->bt_re_len);
- set_int_field(jnienv, dbclass, retval,
- "bt_re_pad", bstp->bt_re_pad);
- set_int_field(jnienv, dbclass, retval,
- "bt_levels", bstp->bt_levels);
- set_int_field(jnienv, dbclass, retval,
- "bt_int_pg", bstp->bt_int_pg);
- set_int_field(jnienv, dbclass, retval,
- "bt_leaf_pg", bstp->bt_leaf_pg);
- set_int_field(jnienv, dbclass, retval,
- "bt_dup_pg", bstp->bt_dup_pg);
- set_int_field(jnienv, dbclass, retval,
- "bt_over_pg", bstp->bt_over_pg);
- set_int_field(jnienv, dbclass, retval,
- "bt_free", bstp->bt_free);
- set_int_field(jnienv, dbclass, retval,
- "bt_int_pgfree", bstp->bt_int_pgfree);
- set_int_field(jnienv, dbclass, retval,
- "bt_leaf_pgfree", bstp->bt_leaf_pgfree);
- set_int_field(jnienv, dbclass, retval,
- "bt_dup_pgfree", bstp->bt_dup_pgfree);
- set_int_field(jnienv, dbclass, retval,
- "bt_over_pgfree", bstp->bt_over_pgfree);
+ if ((dbclass =
+ get_class(jnienv, name_DB_BTREE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+ __jv_fill_bt_stat(jnienv, dbclass, retval, bstp);
break;
/* Hash stat structure */
case DB_HASH:
hstp = (DB_HASH_STAT *)statp;
+ bytesize = sizeof(DB_HASH_STAT);
retval = create_default_object(jnienv,
name_DB_HASH_STAT);
- dbclass = get_class(jnienv, name_DB_HASH_STAT);
-
- /* Set the individual fields */
- set_int_field(jnienv, dbclass, retval,
- "hash_magic", hstp->hash_magic);
- set_int_field(jnienv, dbclass, retval,
- "hash_version", hstp->hash_version);
- set_int_field(jnienv, dbclass, retval,
- "hash_metaflags", hstp->hash_metaflags);
- set_int_field(jnienv, dbclass, retval,
- "hash_nkeys", hstp->hash_nkeys);
- set_int_field(jnienv, dbclass, retval,
- "hash_ndata", hstp->hash_ndata);
- set_int_field(jnienv, dbclass, retval,
- "hash_pagesize", hstp->hash_pagesize);
- set_int_field(jnienv, dbclass, retval,
- "hash_nelem", hstp->hash_nelem);
- set_int_field(jnienv, dbclass, retval,
- "hash_ffactor", hstp->hash_ffactor);
- set_int_field(jnienv, dbclass, retval,
- "hash_buckets", hstp->hash_buckets);
- set_int_field(jnienv, dbclass, retval,
- "hash_free", hstp->hash_free);
- set_int_field(jnienv, dbclass, retval,
- "hash_bfree", hstp->hash_bfree);
- set_int_field(jnienv, dbclass, retval,
- "hash_bigpages", hstp->hash_bigpages);
- set_int_field(jnienv, dbclass, retval,
- "hash_big_bfree", hstp->hash_big_bfree);
- set_int_field(jnienv, dbclass, retval,
- "hash_overflows", hstp->hash_overflows);
- set_int_field(jnienv, dbclass, retval,
- "hash_ovfl_free", hstp->hash_ovfl_free);
- set_int_field(jnienv, dbclass, retval,
- "hash_dup", hstp->hash_dup);
- set_int_field(jnienv, dbclass, retval,
- "hash_dup_free", hstp->hash_dup_free);
+ if ((dbclass =
+ get_class(jnienv, name_DB_HASH_STAT)) == NULL)
+ break; /* An exception has been posted. */
+ __jv_fill_h_stat(jnienv, dbclass, retval, hstp);
break;
case DB_QUEUE:
qstp = (DB_QUEUE_STAT *)statp;
+ bytesize = sizeof(DB_QUEUE_STAT);
retval = create_default_object(jnienv,
name_DB_QUEUE_STAT);
- dbclass = get_class(jnienv, name_DB_QUEUE_STAT);
-
- /* Set the individual fields */
- set_int_field(jnienv, dbclass, retval,
- "qs_magic", qstp->qs_magic);
- set_int_field(jnienv, dbclass, retval,
- "qs_version", qstp->qs_version);
- set_int_field(jnienv, dbclass, retval,
- "qs_metaflags", qstp->qs_metaflags);
- set_int_field(jnienv, dbclass, retval,
- "qs_nkeys", qstp->qs_nkeys);
- set_int_field(jnienv, dbclass, retval,
- "qs_ndata", qstp->qs_ndata);
- set_int_field(jnienv, dbclass, retval,
- "qs_pagesize", qstp->qs_pagesize);
- set_int_field(jnienv, dbclass, retval,
- "qs_pages", qstp->qs_pages);
- set_int_field(jnienv, dbclass, retval,
- "qs_re_len", qstp->qs_re_len);
- set_int_field(jnienv, dbclass, retval,
- "qs_re_pad", qstp->qs_re_pad);
- set_int_field(jnienv, dbclass, retval,
- "qs_pgfree", qstp->qs_pgfree);
- set_int_field(jnienv, dbclass, retval,
- "qs_first_recno", qstp->qs_first_recno);
- set_int_field(jnienv, dbclass, retval,
- "qs_cur_recno", qstp->qs_cur_recno);
+ if ((dbclass =
+ get_class(jnienv, name_DB_QUEUE_STAT)) == NULL)
+ break; /* An exception has been posted. */
+
+ __jv_fill_qam_stat(jnienv, dbclass, retval, qstp);
break;
/* That's all the database types we're aware of! */
default:
report_exception(jnienv,
"Db.stat not implemented for types"
- "other than HASH, BTREE and RECNO",
+ " other than BTREE, HASH, QUEUE,"
+ " and RECNO",
EINVAL, 0);
break;
}
- free(statp);
+ if (bytesize != 0)
+ __os_ufree(db->dbenv, statp);
}
- JAVADB_API_END(db);
return (retval);
}
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_sync
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jint flags)
-{
- int err;
- DB *db = get_DB(jnienv, jthis);
-
- if (!verify_non_null(jnienv, db))
- return (0);
- JAVADB_API_BEGIN(db, jthis);
- err = db->sync(db, flags);
- if (err != DB_INCOMPLETE)
- verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
- return (err);
-}
+JAVADB_METHOD(Db_sync, (JAVADB_ARGS, jint flags), DB,
+ sync, (c_this, flags))
JNIEXPORT jboolean JNICALL Java_com_sleepycat_db_Db_get_1byteswapped
(JNIEnv *jnienv, /*Db*/ jobject jthis)
{
DB *db;
- jboolean retval;
+ int err, isbyteswapped;
+
+ /* This value should never be seen, because of the exception. */
+ isbyteswapped = 0;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return (0);
- JAVADB_API_BEGIN(db, jthis);
- retval = db->get_byteswapped(db) ? 1 : 0;
- JAVADB_API_END(db);
- return (retval);
+ err = db->get_byteswapped(db, &isbyteswapped);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jboolean)isbyteswapped);
}
JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_get_1type
(JNIEnv *jnienv, /*Db*/ jobject jthis)
{
DB *db;
+ int err;
+ DBTYPE dbtype;
+
+ /* This value should never be seen, because of the exception. */
+ dbtype = DB_UNKNOWN;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return (0);
- return ((jint)db->type);
+ err = db->get_type(db, &dbtype);
+ (void)verify_return(jnienv, err, 0);
+
+ return ((jint)dbtype);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1open
- (JNIEnv *jnienv, /*Db*/ jobject jthis, jstring file, jstring database,
- jint type, jint flags, jint mode)
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject txnid,
+ jstring file, jstring database, jint type, jint flags, jint mode)
{
int err;
DB *db;
- JSTR dbfile;
- JSTR dbdatabase;
+ DB_TXN *dbtxnid;
+ LOCKED_STRING ls_file;
+ LOCKED_STRING ls_database;
- /* Java is assumed to be threaded. */
+ /* Java is assumed to be threaded */
flags |= DB_THREAD;
db = get_DB(jnienv, jthis);
- if (jstr_lock(&dbfile, jnienv, file) != 0)
+
+ dbtxnid = get_DB_TXN(jnienv, txnid);
+ if (locked_string_get(&ls_file, jnienv, file) != 0)
goto out2;
- if (jstr_lock(&dbdatabase, jnienv, database) != 0)
+ if (locked_string_get(&ls_database, jnienv, database) != 0)
goto out1;
if (verify_non_null(jnienv, db)) {
- JAVADB_API_BEGIN(db, jthis);
- err = db->open(db, dbfile.string, dbdatabase.string,
+ err = db->open(db, dbtxnid, ls_file.string, ls_database.string,
(DBTYPE)type, flags, mode);
verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
- JAVADB_API_END(db);
}
out1:
- jstr_unlock(&dbdatabase, jnienv);
+ locked_string_put(&ls_database, jnienv);
out2:
- jstr_unlock(&dbfile, jnienv);
+ locked_string_put(&ls_file, jnienv);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Db_truncate
+ (JNIEnv *jnienv, /*Db*/ jobject jthis, /*DbTxn*/ jobject jtxnid, jint flags)
+{
+ int err;
+ DB *db;
+ u_int32_t count;
+ DB_TXN *dbtxnid;
+
+ db = get_DB(jnienv, jthis);
+ dbtxnid = get_DB_TXN(jnienv, jtxnid);
+ count = 0;
+ if (verify_non_null(jnienv, db)) {
+ err = db->truncate(db, dbtxnid, &count, flags);
+ verify_return(jnienv, err, 0);
+ }
+ return (jint)count;
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
@@ -849,18 +867,16 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_upgrade
{
int err;
DB *db = get_DB(jnienv, jthis);
- JSTR j_name;
+ LOCKED_STRING ls_name;
if (verify_non_null(jnienv, db)) {
- JAVADB_API_BEGIN(db, jthis);
- if (jstr_lock(&j_name, jnienv, name) != 0)
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
goto out;
- err = db->upgrade(db, j_name.string, flags);
+ err = db->upgrade(db, ls_name.string, flags);
verify_return(jnienv, err, 0);
- JAVADB_API_END(db);
}
out:
- jstr_unlock(&j_name, jnienv);
+ locked_string_put(&ls_name, jnienv);
}
static int java_verify_callback(void *handle, const void *str_arg)
@@ -868,7 +884,6 @@ static int java_verify_callback(void *handle, const void *str_arg)
char *str;
struct verify_callback_struct *vc;
int len;
- jthrowable except;
JNIEnv *jnienv;
str = (char *)str_arg;
@@ -879,11 +894,15 @@ static int java_verify_callback(void *handle, const void *str_arg)
vc->nbytes = len;
vc->bytes = (*jnienv)->NewByteArray(jnienv, len);
}
- (*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len, (jbyte*)str);
- (*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
- vc->writemid, vc->bytes, 0, len-1);
- if ((except = (*jnienv)->ExceptionOccurred(jnienv)) != NULL)
+ if (vc->bytes != NULL) {
+ (*jnienv)->SetByteArrayRegion(jnienv, vc->bytes, 0, len,
+ (jbyte*)str);
+ (*jnienv)->CallVoidMethod(jnienv, vc->streamobj,
+ vc->writemid, vc->bytes, 0, len-1);
+ }
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
return (EIO);
return (0);
@@ -895,26 +914,25 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
{
int err;
DB *db;
- JSTR j_name;
- JSTR j_subdb;
+ LOCKED_STRING ls_name;
+ LOCKED_STRING ls_subdb;
struct verify_callback_struct vcs;
jclass streamclass;
db = get_DB(jnienv, jthis);
if (!verify_non_null(jnienv, db))
return;
- JAVADB_API_BEGIN(db, jthis);
-
- if (jstr_lock(&j_name, jnienv, name) != 0)
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
goto out2;
- if (jstr_lock(&j_subdb, jnienv, subdb) != 0)
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
goto out1;
/* set up everything we need for the callbacks */
vcs.env = jnienv;
vcs.streamobj = stream;
vcs.nbytes = 100;
- vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes);
+ if ((vcs.bytes = (*jnienv)->NewByteArray(jnienv, vcs.nbytes)) == NULL)
+ goto out1;
/* get the method ID for OutputStream.write(byte[], int, int); */
streamclass = (*jnienv)->FindClass(jnienv, "java/io/OutputStream");
@@ -922,15 +940,14 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db_verify
"write", "([BII)V");
/* invoke verify - this will invoke the callback repeatedly. */
- err = __db_verify_internal(db, j_name.string, j_subdb.string,
+ err = __db_verify_internal(db, ls_name.string, ls_subdb.string,
&vcs, java_verify_callback, flags);
verify_return(jnienv, err, 0);
out1:
- jstr_unlock(&j_subdb, jnienv);
+ locked_string_put(&ls_subdb, jnienv);
out2:
- jstr_unlock(&j_name, jnienv);
- JAVADB_API_END(db);
+ locked_string_put(&ls_name, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
@@ -944,7 +961,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Db__1finalize
db = get_DB(jnienv, jthis);
DB_ASSERT(dbinfo != NULL);
- /* Note: We can never be sure if the underlying DB is attached to
+ /*
+ * Note: We can never be sure if the underlying DB is attached to
* a DB_ENV that was already closed. Sure, that's a user error,
* but it shouldn't crash the VM. Therefore, we cannot just
* automatically close if the handle indicates we are not yet
diff --git a/bdb/libdb_java/java_DbEnv.c b/bdb/libdb_java/java_DbEnv.c
index ff9207dd2c8..651c38a0e3d 100644
--- a/bdb/libdb_java/java_DbEnv.c
+++ b/bdb/libdb_java/java_DbEnv.c
@@ -1,40 +1,40 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_DbEnv.c,v 11.37 2001/01/11 18:19:52 bostic Exp $";
+static const char revid[] = "$Id: java_DbEnv.c,v 11.105 2002/08/29 14:22:23 margo Exp $";
#endif /* not lint */
#include <jni.h>
#include <stdlib.h>
#include <string.h>
-#include "db.h"
#include "db_int.h"
#include "java_util.h"
+#include "java_stat_auto.h"
#include "com_sleepycat_db_DbEnv.h"
/* We keep these lined up, and alphabetical by field name,
* for comparison with C++'s list.
*/
-JAVADB_WO_ACCESS_STRING(DbEnv, data_1dir, DB_ENV, data_dir)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
-JAVADB_WO_ACCESS_STRING(DbEnv, lg_1dir, DB_ENV, lg_dir)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lg_1max, DB_ENV, lg_max)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max, DB_ENV, lk_max)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
+JAVADB_SET_METH_STR(DbEnv, data_1dir, DB_ENV, data_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1bsize, DB_ENV, lg_bsize)
+JAVADB_SET_METH_STR(DbEnv, lg_1dir, DB_ENV, lg_dir)
+JAVADB_SET_METH(DbEnv, jint, lg_1max, DB_ENV, lg_max)
+JAVADB_SET_METH(DbEnv, jint, lg_1regionmax, DB_ENV, lg_regionmax)
+JAVADB_SET_METH(DbEnv, jint, lk_1detect, DB_ENV, lk_detect)
+JAVADB_SET_METH(DbEnv, jint, lk_1max, DB_ENV, lk_max)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1locks, DB_ENV, lk_max_locks)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1lockers, DB_ENV, lk_max_lockers)
+JAVADB_SET_METH(DbEnv, jint, lk_1max_1objects, DB_ENV, lk_max_objects)
/* mp_mmapsize is declared below, it needs an extra cast */
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, mutexlocks, DB_ENV, mutexlocks)
-JAVADB_WO_ACCESS_STRING(DbEnv, tmp_1dir, DB_ENV, tmp_dir)
-JAVADB_WO_ACCESS_METHOD(DbEnv, jint, tx_1max, DB_ENV, tx_max)
+JAVADB_SET_METH_STR(DbEnv, tmp_1dir, DB_ENV, tmp_dir)
+JAVADB_SET_METH(DbEnv, jint, tx_1max, DB_ENV, tx_max)
static void DbEnv_errcall_callback(const char *prefix, char *message)
{
@@ -42,7 +42,8 @@ static void DbEnv_errcall_callback(const char *prefix, char *message)
DB_ENV_JAVAINFO *envinfo = (DB_ENV_JAVAINFO *)prefix;
jstring pre;
- /* Note: these error cases are "impossible", and would
+ /*
+ * Note: these error cases are "impossible", and would
* normally warrant an exception. However, without
* a jnienv, we cannot throw an exception...
* We don't want to trap or exit, since the point of
@@ -50,7 +51,8 @@ static void DbEnv_errcall_callback(const char *prefix, char *message)
* error situations.
*/
if (envinfo == NULL) {
- /* Something is *really* wrong here, the
+ /*
+ * Something is *really* wrong here, the
* prefix is set in every environment created.
*/
fprintf(stderr, "Error callback failed!\n");
@@ -70,7 +72,7 @@ static void DbEnv_errcall_callback(const char *prefix, char *message)
}
pre = dbjie_get_errpfx(envinfo, jnienv);
- report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
+ report_errcall(jnienv, dbjie_get_errcall(envinfo), pre, message);
}
static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
@@ -82,15 +84,16 @@ static void DbEnv_initialize(JNIEnv *jnienv, DB_ENV *dbenv,
envinfo = get_DB_ENV_JAVAINFO(jnienv, jenv);
DB_ASSERT(envinfo == NULL);
- envinfo = dbjie_construct(jnienv, jerrcall, is_dbopen);
+ envinfo = dbjie_construct(jnienv, jenv, jerrcall, is_dbopen);
set_private_info(jnienv, name_DB_ENV, jenv, envinfo);
dbenv->set_errpfx(dbenv, (const char*)envinfo);
dbenv->set_errcall(dbenv, DbEnv_errcall_callback);
- dbenv->cj_internal = envinfo;
+ dbenv->api2_internal = envinfo;
set_private_dbobj(jnienv, name_DB_ENV, jenv, dbenv);
}
-/* This is called when this DbEnv was made on behalf of a Db
+/*
+ * This is called when this DbEnv was made on behalf of a Db
* created directly (without a parent DbEnv), and the Db is
* being closed. We'll zero out the pointer to the DB_ENV,
* since it is no longer valid, to prevent mistakes.
@@ -107,7 +110,8 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1notify_1db_1close
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jfeedback)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis,
+ /*DbEnvFeedback*/ jobject jfeedback)
{
DB_ENV *dbenv;
DB_ENV_JAVAINFO *dbenvinfo;
@@ -118,9 +122,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_feedback_1changed
!verify_non_null(jnienv, dbenvinfo))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
dbjie_set_feedback_object(dbenvinfo, jnienv, dbenv, jfeedback);
- JAVADB_ENV_API_END(dbenv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init
@@ -144,7 +146,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1init_1using_1db
db = get_DB(jnienv, jdb);
dbenv = db->dbenv;
- DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 1);
+ DbEnv_initialize(jnienv, dbenv, jthis, jerrcall, 0);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
@@ -153,7 +155,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
{
int err;
DB_ENV *dbenv;
- JSTR j_home;
+ LOCKED_STRING ls_home;
DB_ENV_JAVAINFO *dbenvinfo;
dbenv = get_DB_ENV(jnienv, jthis);
@@ -161,18 +163,16 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_open
if (!verify_non_null(jnienv, dbenv) ||
!verify_non_null(jnienv, dbenvinfo))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- if (jstr_lock(&j_home, jnienv, db_home) != 0)
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
goto out;
/* Java is assumed to be threaded. */
flags |= DB_THREAD;
- err = dbenv->open(dbenv, j_home.string, flags, mode);
+ err = dbenv->open(dbenv, ls_home.string, flags, mode);
verify_return(jnienv, err, EXCEPTION_FILE_NOT_FOUND);
out:
- jstr_unlock(&j_home, jnienv);
- JAVADB_ENV_API_END(dbenv);
+ locked_string_put(&ls_home, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
@@ -180,27 +180,25 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_remove
{
DB_ENV *dbenv;
DB_ENV_JAVAINFO *dbenvinfo;
- JSTR j_home;
+ LOCKED_STRING ls_home;
int err = 0;
dbenv = get_DB_ENV(jnienv, jthis);
dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
if (!verify_non_null(jnienv, dbenv))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- if (jstr_lock(&j_home, jnienv, db_home) != 0)
+ if (locked_string_get(&ls_home, jnienv, db_home) != 0)
goto out;
- err = dbenv->remove(dbenv, j_home.string, flags);
+ err = dbenv->remove(dbenv, ls_home.string, flags);
set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
- if (dbenvinfo != NULL)
- dbjie_dealloc(dbenvinfo, jnienv);
-
verify_return(jnienv, err, 0);
out:
- jstr_unlock(&j_home, jnienv);
- /* don't call JAVADB_ENV_API_END - env cannot be used */
+ locked_string_put(&ls_home, jnienv);
+
+ if (dbenvinfo != NULL)
+ dbjie_dealloc(dbenvinfo, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
@@ -215,8 +213,6 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
if (!verify_non_null(jnienv, dbenv))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
err = dbenv->close(dbenv, flags);
set_private_dbobj(jnienv, name_DB_ENV, jthis, 0);
@@ -225,50 +221,101 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1close
/* Throw an exception if the close failed. */
verify_return(jnienv, err, 0);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbremove
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err1;
+
+ err = dbenv->dbremove(dbenv, txn, ls_name.string, ls_subdb.string,
+ flags);
+
+ /* Throw an exception if the dbremove failed. */
+ verify_return(jnienv, err, 0);
+
+ locked_string_put(&ls_subdb, jnienv);
+err1: locked_string_put(&ls_name, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_dbrename
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbTxn*/ jobject jtxn,
+ jstring name, jstring subdb, jstring newname, jint flags)
+{
+ LOCKED_STRING ls_name, ls_subdb, ls_newname;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int err;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ txn = get_DB_TXN(jnienv, jtxn);
+ if (locked_string_get(&ls_name, jnienv, name) != 0)
+ return;
+ if (locked_string_get(&ls_subdb, jnienv, subdb) != 0)
+ goto err2;
+ if (locked_string_get(&ls_newname, jnienv, newname) != 0)
+ goto err1;
+
+ err = dbenv->dbrename(dbenv, txn, ls_name.string, ls_subdb.string,
+ ls_newname.string, flags);
+
+ /* Throw an exception if the dbrename failed. */
+ verify_return(jnienv, err, 0);
- /* don't call JAVADB_ENV_API_END - env cannot be used */
+ locked_string_put(&ls_newname, jnienv);
+err1: locked_string_put(&ls_subdb, jnienv);
+err2: locked_string_put(&ls_name, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_err
(JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint ecode, jstring msg)
{
- JSTR msg_string;
+ LOCKED_STRING ls_msg;
DB_ENV *dbenv;
dbenv = get_DB_ENV(jnienv, jthis);
if (!verify_non_null(jnienv, dbenv))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
goto out;
- dbenv->err(dbenv, ecode, msg_string.string);
+ dbenv->err(dbenv, ecode, "%s", ls_msg.string);
out:
- jstr_unlock(&msg_string, jnienv);
- JAVADB_ENV_API_END(dbenv);
+ locked_string_put(&ls_msg, jnienv);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_errx
(JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring msg)
{
- JSTR msg_string;
+ LOCKED_STRING ls_msg;
DB_ENV *dbenv;
dbenv = get_DB_ENV(jnienv, jthis);
if (!verify_non_null(jnienv, dbenv))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- if (jstr_lock(&msg_string, jnienv, msg) != 0)
+ if (locked_string_get(&ls_msg, jnienv, msg) != 0)
goto out;
- dbenv->errx(dbenv, msg_string.string);
+ dbenv->errx(dbenv, "%s", ls_msg.string);
out:
- jstr_unlock(&msg_string, jnienv);
- JAVADB_ENV_API_END(dbenv);
+ locked_string_put(&ls_msg, jnienv);
}
/*static*/
@@ -282,190 +329,255 @@ JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_strerror
return (get_java_string(jnienv, message));
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1cachesize
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint gbytes, jint bytes,
- jint ncaches)
+JAVADB_METHOD(DbEnv_set_1cachesize,
+ (JAVADB_ARGS, jint gbytes, jint bytes, jint ncaches), DB_ENV,
+ set_cachesize, (c_this, gbytes, bytes, ncaches))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1encrypt
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jpasswd, jint flags)
{
- DB_ENV *dbenv;
int err;
+ DB_ENV *dbenv;
+ LOCKED_STRING ls_passwd;
dbenv = get_DB_ENV(jnienv, jthis);
- if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_cachesize(dbenv, gbytes, bytes, ncaches);
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
- }
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+ if (locked_string_get(&ls_passwd, jnienv, jpasswd) != 0)
+ goto out;
+
+ err = dbenv->set_encrypt(dbenv, ls_passwd.string, flags);
+ verify_return(jnienv, err, 0);
+
+out: locked_string_put(&ls_passwd, jnienv);
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1flags
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags, jint onoff)
+JAVADB_METHOD(DbEnv_set_1flags,
+ (JAVADB_ARGS, jint flags, jboolean onoff), DB_ENV,
+ set_flags, (c_this, flags, onoff ? 1 : 0))
+
+JAVADB_METHOD(DbEnv_set_1mp_1mmapsize, (JAVADB_ARGS, jlong value), DB_ENV,
+ set_mp_mmapsize, (c_this, (size_t)value))
+
+JAVADB_METHOD(DbEnv_set_1tas_1spins, (JAVADB_ARGS, jint spins), DB_ENV,
+ set_tas_spins, (c_this, (u_int32_t)spins))
+
+JAVADB_METHOD(DbEnv_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_ENV,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
{
DB_ENV *dbenv;
+ DB_ENV_JAVAINFO *dbenvinfo;
int err;
+ jsize i, len;
+ u_char *newarr;
+ int bytesize;
dbenv = get_DB_ENV(jnienv, jthis);
- if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_flags(dbenv, flags, onoff);
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
+ dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv) ||
+ !verify_non_null(jnienv, dbenvinfo))
+ return;
+
+ len = (*jnienv)->GetArrayLength(jnienv, array);
+ bytesize = sizeof(u_char) * len * len;
+
+ if ((err = __os_malloc(dbenv, bytesize, &newarr)) != 0) {
+ if (!verify_return(jnienv, err, 0))
+ return;
}
+
+ for (i=0; i<len; i++) {
+ jobject subArray =
+ (*jnienv)->GetObjectArrayElement(jnienv, array, i);
+ (*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
+ 0, len,
+ (jbyte *)&newarr[i*len]);
+ }
+ dbjie_set_conflict(dbenvinfo, newarr, bytesize);
+ err = dbenv->set_lk_conflicts(dbenv, newarr, len);
+ verify_return(jnienv, err, 0);
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1mp_1mmapsize
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong value)
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1elect
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint nsites, jint pri,
+ jint timeout)
{
DB_ENV *dbenv;
- int err;
+ int err, id;
- dbenv = get_DB_ENV(jnienv, jthis);
- if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_mp_mmapsize(dbenv, (size_t)value);
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
- }
-}
+ if (!verify_non_null(jnienv, jthis))
+ return (DB_EID_INVALID);
-/*static*/
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1pageyield
- (JNIEnv *jnienv, jclass jthis_class, jint value)
-{
- int err;
+ dbenv = get_DB_ENV(jnienv, jthis);
- COMPQUIET(jthis_class, NULL);
- err = db_env_set_pageyield(value);
+ err = dbenv->rep_elect(dbenv, (int)nsites,
+ (int)pri, (u_int32_t)timeout, &id);
verify_return(jnienv, err, 0);
+
+ return ((jint)id);
}
-/*static*/
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1panicstate
- (JNIEnv *jnienv, jclass jthis_class, jint value)
+JNIEXPORT jint JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1process_1message
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject control,
+ /* Dbt */ jobject rec, /* RepProcessMessage */ jobject result)
{
- int err;
+ DB_ENV *dbenv;
+ LOCKED_DBT cdbt, rdbt;
+ int err, envid;
- COMPQUIET(jthis_class, NULL);
- err = db_env_set_panicstate(value);
- verify_return(jnienv, err, 0);
+ if (!verify_non_null(jnienv, jthis) || !verify_non_null(jnienv, result))
+ return (-1);
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ err = 0;
+
+ /* The DBTs are always inputs. */
+ if (locked_dbt_get(&cdbt, jnienv, dbenv, control, inOp) != 0)
+ goto out2;
+ if (locked_dbt_get(&rdbt, jnienv, dbenv, rec, inOp) != 0)
+ goto out1;
+
+ envid = (*jnienv)->GetIntField(jnienv,
+ result, fid_RepProcessMessage_envid);
+
+ err = dbenv->rep_process_message(dbenv, &cdbt.javainfo->dbt,
+ &rdbt.javainfo->dbt, &envid);
+
+ if (err == DB_REP_NEWMASTER)
+ (*jnienv)->SetIntField(jnienv,
+ result, fid_RepProcessMessage_envid, envid);
+ else if (!DB_RETOK_REPPMSG(err))
+ verify_return(jnienv, err, 0);
+
+out1: locked_dbt_put(&rdbt, jnienv, dbenv);
+out2: locked_dbt_put(&cdbt, jnienv, dbenv);
+
+ return (err);
}
-/*static*/
JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1region_1init
- (JNIEnv *jnienv, jclass jthis_class, jint value)
+ Java_com_sleepycat_db_DbEnv_rep_1start
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, /* Dbt */ jobject cookie,
+ jint flags)
{
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ LOCKED_DBT ldbt;
int err;
- COMPQUIET(jthis_class, NULL);
- err = db_env_set_region_init(value);
+ if (!verify_non_null(jnienv, jthis))
+ return;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+
+ /* The Dbt cookie may be null; if so, pass in a NULL DBT. */
+ if (cookie != NULL) {
+ if (locked_dbt_get(&ldbt, jnienv, dbenv, cookie, inOp) != 0)
+ goto out;
+ dbtp = &ldbt.javainfo->dbt;
+ } else
+ dbtp = NULL;
+
+ err = dbenv->rep_start(dbenv, dbtp, flags);
verify_return(jnienv, err, 0);
+
+out: if (cookie != NULL)
+ locked_dbt_put(&ldbt, jnienv, dbenv);
}
-/*static*/
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1tas_1spins
- (JNIEnv *jnienv, jclass jthis_class, jint value)
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_rep_1stat
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
+ DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
+ DB_REP_STAT *statp = NULL;
+ jobject retval = NULL;
+ jclass dbclass;
- COMPQUIET(jthis_class, NULL);
- err = db_env_set_tas_spins(value);
- verify_return(jnienv, err, 0);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ err = dbenv->rep_stat(dbenv, &statp, (u_int32_t)flags);
+ if (verify_return(jnienv, err, 0)) {
+ if ((dbclass = get_class(jnienv, name_DB_REP_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_REP_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_rep_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
+ }
+ return (retval);
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_recovery_1init_1changed
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbRecoveryInit*/ jobject jrecoveryinit)
+JNIEXPORT void JNICALL
+Java_com_sleepycat_db_DbEnv_set_1rep_1limit
+ (JNIEnv *jnienv, /* DbEnv */ jobject jthis, jint gbytes, jint bytes)
{
DB_ENV *dbenv;
- DB_ENV_JAVAINFO *dbenvinfo;
+ int err;
dbenv = get_DB_ENV(jnienv, jthis);
- dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
- if (!verify_non_null(jnienv, dbenv) ||
- !verify_non_null(jnienv, dbenv))
- return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- dbjie_set_recovery_init_object(dbenvinfo, jnienv, dbenv, jrecoveryinit);
- JAVADB_ENV_API_END(dbenv);
+ if (verify_non_null(jnienv, dbenv)) {
+ err = dbenv->set_rep_limit(dbenv,
+ (u_int32_t)gbytes, (u_int32_t)bytes);
+ verify_return(jnienv, err, 0);
+ }
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_set_1lk_1conflicts
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jobjectArray array)
+JNIEXPORT void JNICALL
+ Java_com_sleepycat_db_DbEnv_rep_1transport_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint envid,
+ /* DbRepTransport */ jobject jreptransport)
{
DB_ENV *dbenv;
DB_ENV_JAVAINFO *dbenvinfo;
- int err;
- jsize i, len;
- unsigned char *newarr;
dbenv = get_DB_ENV(jnienv, jthis);
dbenvinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
if (!verify_non_null(jnienv, dbenv) ||
- !verify_non_null(jnienv, dbenvinfo))
+ !verify_non_null(jnienv, dbenvinfo) ||
+ !verify_non_null(jnienv, jreptransport))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- len = (*jnienv)->GetArrayLength(jnienv, array);
-
- newarr = (unsigned char *)malloc(sizeof(unsigned char) * len * len);
-
- for (i=0; i<len; i++) {
- jobject subArray =
- (*jnienv)->GetObjectArrayElement(jnienv, array, i);
- (*jnienv)->GetByteArrayRegion(jnienv, (jbyteArray)subArray,
- 0, len,
- (jbyte *)&newarr[i*len]);
- }
- dbjie_set_conflict(dbenvinfo, newarr);
- err = dbenv->set_lk_conflicts(dbenv, newarr, len);
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
+ dbjie_set_rep_transport_object(dbenvinfo,
+ jnienv, dbenv, envid, jreptransport);
}
JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1server
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jstring jhost, jlong tsec,
- jlong ssec, jint flags)
+ Java_com_sleepycat_db_DbEnv_set_1rpc_1server
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbClient*/ jobject jclient,
+ jstring jhost, jlong tsec, jlong ssec, jint flags)
{
int err;
DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
const char *host = (*jnienv)->GetStringUTFChars(jnienv, jhost, NULL);
+ if (jclient != NULL) {
+ report_exception(jnienv, "DbEnv.set_rpc_server client arg "
+ "must be null; reserved for future use",
+ EINVAL, 0);
+ return;
+ }
if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_server(dbenv, (char *)host,
+ err = dbenv->set_rpc_server(dbenv, NULL, host,
(long)tsec, (long)ssec, flags);
/* Throw an exception if the call failed. */
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
}
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1shm_1key
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jlong shm_key)
-{
- int err;
- DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-
- if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_shm_key(dbenv, (long)shm_key);
-
- /* Throw an exception if the call failed. */
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
- }
-}
+JAVADB_METHOD(DbEnv_set_1shm_1key, (JAVADB_ARGS, jlong shm_key), DB_ENV,
+ set_shm_key, (c_this, (long)shm_key))
JNIEXPORT void JNICALL
Java_com_sleepycat_db_DbEnv__1set_1tx_1timestamp
@@ -476,31 +588,16 @@ JNIEXPORT void JNICALL
time_t time = seconds;
if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
err = dbenv->set_tx_timestamp(dbenv, &time);
/* Throw an exception if the call failed. */
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
}
}
-JNIEXPORT void JNICALL
- Java_com_sleepycat_db_DbEnv_set_1verbose
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint which, jint onoff)
-{
- int err;
- DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-
- if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = dbenv->set_verbose(dbenv, which, onoff);
-
- /* Throw an exception if the call failed. */
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
- }
-}
+JAVADB_METHOD(DbEnv_set_1verbose,
+ (JAVADB_ARGS, jint which, jboolean onoff), DB_ENV,
+ set_verbose, (c_this, which, onoff ? 1 : 0))
/*static*/
JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_get_1version_1major
@@ -550,15 +647,16 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1id
if (!verify_non_null(jnienv, dbenv))
return (-1);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = lock_id(dbenv, &id);
+ err = dbenv->lock_id(dbenv, &id);
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
return (id);
}
+JAVADB_METHOD(DbEnv_lock_1id_1free, (JAVADB_ARGS, jint id), DB_ENV,
+ lock_id_free, (c_this, id))
+
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
@@ -568,44 +666,18 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1stat
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- /* We cannot use the default allocator (on Win* platforms anyway)
- * because it often causes problems when we free storage
- * in a DLL that was allocated in another DLL. Using
- * our own allocator (ours just calls malloc!) ensures
- * that there is no mismatch.
- */
- err = lock_stat(dbenv, &statp, NULL);
+ err = dbenv->lock_stat(dbenv, &statp, (u_int32_t)flags);
if (verify_return(jnienv, err, 0)) {
- retval = create_default_object(jnienv, name_DB_LOCK_STAT);
- dbclass = get_class(jnienv, name_DB_LOCK_STAT);
+ if ((dbclass = get_class(jnienv, name_DB_LOCK_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOCK_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
- /* Set the individual fields */
- set_int_field(jnienv, dbclass, retval,
- "st_maxlocks", statp->st_maxlocks);
- set_int_field(jnienv, dbclass, retval,
- "st_nmodes", statp->st_nmodes);
- set_int_field(jnienv, dbclass, retval,
- "st_nlockers", statp->st_nlockers);
- set_int_field(jnienv, dbclass, retval,
- "st_nconflicts", statp->st_nconflicts);
- set_int_field(jnienv, dbclass, retval,
- "st_nrequests", statp->st_nrequests);
- set_int_field(jnienv, dbclass, retval,
- "st_nreleases", statp->st_nreleases);
- set_int_field(jnienv, dbclass, retval,
- "st_ndeadlocks", statp->st_ndeadlocks);
- set_int_field(jnienv, dbclass, retval,
- "st_region_wait", statp->st_region_wait);
- set_int_field(jnienv, dbclass, retval,
- "st_region_nowait", statp->st_region_nowait);
- set_int_field(jnienv, dbclass, retval,
- "st_regsize", statp->st_regsize);
-
- free(statp);
+ __jv_fill_lock_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
}
- JAVADB_ENV_API_END(dbenv);
return (retval);
}
@@ -618,10 +690,8 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_lock_1detect
if (!verify_non_null(jnienv, dbenv))
return (0);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = lock_detect(dbenv, atype, flags, &aborted);
+ err = dbenv->lock_detect(dbenv, atype, flags, &aborted);
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
return (aborted);
}
@@ -632,33 +702,260 @@ JNIEXPORT /*DbLock*/ jobject JNICALL Java_com_sleepycat_db_DbEnv_lock_1get
int err;
DB_ENV *dbenv;
DB_LOCK *dblock;
- JDBT dbobj;
+ LOCKED_DBT lobj;
/*DbLock*/ jobject retval;
dbenv = get_DB_ENV(jnienv, jthis);
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- dblock = (DB_LOCK*)malloc(sizeof(DB_LOCK));
+ if ((err = __os_malloc(dbenv, sizeof(DB_LOCK), &dblock)) != 0)
+ if (!verify_return(jnienv, err, 0))
+ return (NULL);
+
memset(dblock, 0, sizeof(DB_LOCK));
err = 0;
retval = NULL;
- if (jdbt_lock(&dbobj, jnienv, obj, inOp) != 0)
+ if (locked_dbt_get(&lobj, jnienv, dbenv, obj, inOp) != 0)
goto out;
- err = lock_get(dbenv, locker, flags, &dbobj.dbt->dbt,
+ err = dbenv->lock_get(dbenv, locker, flags, &lobj.javainfo->dbt,
(db_lockmode_t)lock_mode, dblock);
- if (verify_return(jnienv, err, 0)) {
+
+ if (err == DB_LOCK_NOTGRANTED)
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_get not granted",
+ DB_LOCK_GET, lock_mode, obj,
+ NULL, -1);
+ else if (verify_return(jnienv, err, 0)) {
retval = create_default_object(jnienv, name_DB_LOCK);
set_private_dbobj(jnienv, name_DB_LOCK, retval, dblock);
}
+
out:
- jdbt_unlock(&dbobj, jnienv);
- JAVADB_ENV_API_END(dbenv);
+ locked_dbt_put(&lobj, jnienv, dbenv);
return (retval);
}
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1vec
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*u_int32_t*/ jint locker,
+ jint flags, /*const Dbt*/ jobjectArray list, jint offset, jint count)
+{
+ DB_ENV *dbenv;
+ DB_LOCKREQ *lockreq;
+ DB_LOCKREQ *prereq; /* preprocessed requests */
+ DB_LOCKREQ *failedreq;
+ DB_LOCK *lockp;
+ LOCKED_DBT *locked_dbts;
+ int err;
+ int alloc_err;
+ int i;
+ size_t bytesize;
+ size_t ldbtsize;
+ jobject jlockreq;
+ db_lockop_t op;
+ jobject jobj;
+ jobject jlock;
+ int completed;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ goto out0;
+
+ if ((*jnienv)->GetArrayLength(jnienv, list) < offset + count) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec array not large enough",
+ 0, 0);
+ goto out0;
+ }
+
+ bytesize = sizeof(DB_LOCKREQ) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &lockreq)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out0;
+ }
+ memset(lockreq, 0, bytesize);
+
+ ldbtsize = sizeof(LOCKED_DBT) * count;
+ if ((err = __os_malloc(dbenv, ldbtsize, &locked_dbts)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out1;
+ }
+ memset(lockreq, 0, ldbtsize);
+ prereq = &lockreq[0];
+
+ /* fill in the lockreq array */
+ for (i = 0, prereq = &lockreq[0]; i < count; i++, prereq++) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv, list,
+ offset + i);
+ if (jlockreq == NULL) {
+ report_exception(jnienv,
+ "DbEnv.lock_vec list entry is null",
+ 0, 0);
+ goto out2;
+ }
+ op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_op);
+ prereq->op = op;
+
+ switch (op) {
+ case DB_LOCK_GET_TIMEOUT:
+ /* Needed: mode, timeout, obj. Returned: lock. */
+ prereq->op = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_timeout);
+ /* FALLTHROUGH */
+ case DB_LOCK_GET:
+ /* Needed: mode, obj. Returned: lock. */
+ prereq->mode = (*jnienv)->GetIntField(jnienv, jlockreq,
+ fid_DbLockRequest_mode);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ case DB_LOCK_PUT:
+ /* Needed: lock. Ignored: mode, obj. */
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ if (!verify_non_null(jnienv, jlock))
+ goto out2;
+ lockp = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, lockp))
+ goto out2;
+
+ prereq->lock = *lockp;
+ break;
+ case DB_LOCK_PUT_ALL:
+ case DB_LOCK_TIMEOUT:
+ /* Needed: (none). Ignored: lock, mode, obj. */
+ break;
+ case DB_LOCK_PUT_OBJ:
+ /* Needed: obj. Ignored: lock, mode. */
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ if ((err = locked_dbt_get(&locked_dbts[i], jnienv,
+ dbenv, jobj, inOp)) != 0)
+ goto out2;
+ prereq->obj = &locked_dbts[i].javainfo->dbt;
+ break;
+ default:
+ report_exception(jnienv,
+ "DbEnv.lock_vec bad op value",
+ 0, 0);
+ goto out2;
+ }
+ }
+
+ err = dbenv->lock_vec(dbenv, locker, flags, lockreq, count, &failedreq);
+ if (err == 0)
+ completed = count;
+ else
+ completed = failedreq - lockreq;
+
+ /* do post processing for any and all requests that completed */
+ for (i = 0; i < completed; i++) {
+ op = lockreq[i].op;
+ if (op == DB_LOCK_PUT) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it.
+ */
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ lockp = get_DB_LOCK(jnienv, jlock);
+ __os_free(NULL, lockp);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+ else if (op == DB_LOCK_GET) {
+ /*
+ * Store the lock that was obtained.
+ * We need to create storage for it since
+ * the lockreq array only exists during this
+ * method call.
+ */
+ alloc_err = __os_malloc(dbenv, sizeof(DB_LOCK), &lockp);
+ if (!verify_return(jnienv, alloc_err, 0))
+ goto out2;
+
+ *lockp = lockreq[i].lock;
+
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jlock = create_default_object(jnienv, name_DB_LOCK);
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, lockp);
+ (*jnienv)->SetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock,
+ jlock);
+ }
+ }
+
+ /* If one of the locks was not granted, build the exception now. */
+ if (err == DB_LOCK_NOTGRANTED && i < count) {
+ jlockreq = (*jnienv)->GetObjectArrayElement(jnienv,
+ list, i + offset);
+ jobj = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_obj);
+ jlock = (*jnienv)->GetObjectField(jnienv, jlockreq,
+ fid_DbLockRequest_lock);
+ report_notgranted_exception(jnienv,
+ "DbEnv.lock_vec incomplete",
+ lockreq[i].op,
+ lockreq[i].mode,
+ jobj,
+ jlock,
+ i);
+ }
+ else
+ verify_return(jnienv, err, 0);
+
+ out2:
+ /* Free the dbts that we have locked */
+ for (i = 0 ; i < (prereq - lockreq); i++) {
+ if ((op = lockreq[i].op) == DB_LOCK_GET ||
+ op == DB_LOCK_PUT_OBJ)
+ locked_dbt_put(&locked_dbts[i], jnienv, dbenv);
+ }
+ __os_free(dbenv, locked_dbts);
+
+ out1:
+ __os_free(dbenv, lockreq);
+
+ out0:
+ return;
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_lock_1put
+ (JNIEnv *jnienv, jobject jthis, /*DbLock*/ jobject jlock)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_LOCK *dblock;
+
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return;
+
+ dblock = get_DB_LOCK(jnienv, jlock);
+ if (!verify_non_null(jnienv, dblock))
+ return;
+
+ err = dbenv->lock_put(dbenv, dblock);
+ if (verify_return(jnienv, err, 0)) {
+ /*
+ * After a successful put, the DbLock can no longer
+ * be used, so we release the storage related to it
+ * (allocated in DbEnv.lock_get()).
+ */
+ __os_free(NULL, dblock);
+
+ set_private_dbobj(jnienv, name_DB_LOCK, jlock, 0);
+ }
+}
+
JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
(JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
@@ -672,8 +969,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
strarray = NULL;
if (!verify_non_null(jnienv, dbenv))
return (0);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = log_archive(dbenv, &ret, flags, 0);
+ err = dbenv->log_archive(dbenv, &ret, flags);
if (!verify_return(jnienv, err, 0))
return (0);
@@ -682,16 +978,16 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_log_1archive
while (ret[len] != NULL)
len++;
stringClass = (*jnienv)->FindClass(jnienv, "java/lang/String");
- strarray = (*jnienv)->NewObjectArray(jnienv, len,
- stringClass, 0);
+ if ((strarray = (*jnienv)->NewObjectArray(jnienv,
+ len, stringClass, 0)) == NULL)
+ goto out;
for (i=0; i<len; i++) {
jstring str = (*jnienv)->NewStringUTF(jnienv, ret[i]);
(*jnienv)->SetObjectArrayElement(jnienv, strarray,
- i, str);
+ i, str);
}
}
- JAVADB_ENV_API_END(dbenv);
- return (strarray);
+out: return (strarray);
}
JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
@@ -708,80 +1004,40 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_log_1compare
return (log_compare(dblsn0, dblsn1));
}
-JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1cursor
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
+ DB_LOGC *dblogc;
DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
- DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
- char filename[FILENAME_MAX+1] = "";
if (!verify_non_null(jnienv, dbenv))
return (NULL);
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- err = log_file(dbenv, dblsn, filename, FILENAME_MAX);
+ err = dbenv->log_cursor(dbenv, &dblogc, flags);
verify_return(jnienv, err, 0);
- filename[FILENAME_MAX] = '\0'; /* just to be sure */
- JAVADB_ENV_API_END(dbenv);
- return (get_java_string(jnienv, filename));
+ return (get_DbLogc(jnienv, dblogc));
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1flush
+JNIEXPORT jstring JNICALL Java_com_sleepycat_db_DbEnv_log_1file
(JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn)
{
int err;
DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
DB_LSN *dblsn = get_DB_LSN(jnienv, lsn);
+ char filename[FILENAME_MAX+1] = "";
if (!verify_non_null(jnienv, dbenv))
- return;
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
+ return (NULL);
- err = log_flush(dbenv, dblsn);
+ err = dbenv->log_file(dbenv, dblsn, filename, FILENAME_MAX);
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
+ filename[FILENAME_MAX] = '\0'; /* just to be sure */
+ return (get_java_string(jnienv, filename));
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1get
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
- /*DbDbt*/ jobject data, jint flags)
-{
- int err, retry;
- DB_ENV *dbenv;
- DB_LSN *dblsn;
- JDBT dbdata;
-
- dbenv = get_DB_ENV(jnienv, jthis);
- dblsn = get_DB_LSN(jnienv, lsn);
-
- if (!verify_non_null(jnienv, dbenv))
- return;
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- if (jdbt_lock(&dbdata, jnienv, data, outOp) != 0)
- goto out;
-
- for (retry = 0; retry < 3; retry++) {
- err = log_get(dbenv, dblsn, &dbdata.dbt->dbt, flags);
- /* If we failed due to lack of memory in our DBT arrays,
- * retry.
- */
- if (err != ENOMEM)
- break;
- if (!jdbt_realloc(&dbdata, jnienv))
- break;
- }
-
- verify_return(jnienv, err, 0);
-
- out:
- jdbt_unlock(&dbdata, jnienv);
- JAVADB_ENV_API_END(dbenv);
-}
+JAVADB_METHOD(DbEnv_log_1flush,
+ (JAVADB_ARGS, /*DbLsn*/ jobject lsn), DB_ENV,
+ log_flush, (c_this, get_DB_LSN(jnienv, lsn)))
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
(JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbLsn*/ jobject lsn,
@@ -790,72 +1046,28 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1put
int err;
DB_ENV *dbenv;
DB_LSN *dblsn;
- JDBT dbdata;
+ LOCKED_DBT ldata;
dbenv = get_DB_ENV(jnienv, jthis);
dblsn = get_DB_LSN(jnienv, lsn);
if (!verify_non_null(jnienv, dbenv))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
- goto out;
-
- err = log_put(dbenv, dblsn, &dbdata.dbt->dbt, flags);
- verify_return(jnienv, err, 0);
- out:
- jdbt_unlock(&dbdata, jnienv);
- JAVADB_ENV_API_END(dbenv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1register
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*Db*/ jobject dbp,
- jstring name)
-{
- int err;
- DB_ENV *dbenv;
- DB *dbdb;
- JSTR dbname;
-
- dbenv = get_DB_ENV(jnienv, jthis);
- dbdb = get_DB(jnienv, dbp);
- if (!verify_non_null(jnienv, dbenv))
+ /* log_put's DB_LSN argument may not be NULL. */
+ if (!verify_non_null(jnienv, dblsn))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- if (jstr_lock(&dbname, jnienv, name) != 0)
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
goto out;
- err = log_register(dbenv, dbdb, dbname.string);
+ err = dbenv->log_put(dbenv, dblsn, &ldata.javainfo->dbt, flags);
verify_return(jnienv, err, 0);
out:
- jstr_unlock(&dbname, jnienv);
- JAVADB_ENV_API_END(dbenv);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_log_1unregister
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*Db*/ jobject dbp)
-{
- int err;
- DB_ENV *dbenv;
- DB *dbdb;
-
- dbenv = get_DB_ENV(jnienv, jthis);
- dbdb = get_DB(jnienv, dbp);
- if (!verify_non_null(jnienv, dbenv))
- return;
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- err = log_unregister(dbenv, dbdb);
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
+ locked_dbt_put(&ldata, jnienv, dbenv);
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
DB_ENV *dbenv;
@@ -869,59 +1081,22 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_log_1stat
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- /* We cannot use the default allocator (on Win* platforms anyway)
- * because it often causes problems when we free storage
- * in a DLL that was allocated in another DLL. Using
- * our own allocator (ours just calls malloc!) ensures
- * that there is no mismatch.
- */
- err = log_stat(dbenv, &statp, NULL);
+ err = dbenv->log_stat(dbenv, &statp, (u_int32_t)flags);
if (verify_return(jnienv, err, 0)) {
- retval = create_default_object(jnienv, name_DB_LOG_STAT);
- dbclass = get_class(jnienv, name_DB_LOG_STAT);
+ if ((dbclass = get_class(jnienv, name_DB_LOG_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_LOG_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
- /* Set the individual fields */
- set_int_field(jnienv, dbclass, retval,
- "st_magic", statp->st_magic);
- set_int_field(jnienv, dbclass, retval,
- "st_version", statp->st_version);
- set_int_field(jnienv, dbclass, retval,
- "st_mode", statp->st_mode);
- set_int_field(jnienv, dbclass, retval,
- "st_lg_max", statp->st_lg_max);
- set_int_field(jnienv, dbclass, retval,
- "st_w_bytes", statp->st_w_bytes);
- set_int_field(jnienv, dbclass, retval,
- "st_w_mbytes", statp->st_w_mbytes);
- set_int_field(jnienv, dbclass, retval,
- "st_wc_bytes", statp->st_wc_bytes);
- set_int_field(jnienv, dbclass, retval,
- "st_wc_mbytes", statp->st_wc_mbytes);
- set_int_field(jnienv, dbclass, retval,
- "st_wcount", statp->st_wcount);
- set_int_field(jnienv, dbclass, retval,
- "st_scount", statp->st_scount);
- set_int_field(jnienv, dbclass, retval,
- "st_region_wait", statp->st_region_wait);
- set_int_field(jnienv, dbclass, retval,
- "st_region_nowait", statp->st_region_nowait);
- set_int_field(jnienv, dbclass, retval,
- "st_cur_file", statp->st_cur_file);
- set_int_field(jnienv, dbclass, retval,
- "st_cur_offset", statp->st_cur_offset);
- set_int_field(jnienv, dbclass, retval,
- "st_regsize", statp->st_regsize);
-
- free(statp);
+ __jv_fill_log_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
}
- JAVADB_ENV_API_END(dbenv);
return (retval);
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
jclass dbclass;
@@ -935,65 +1110,22 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_memp_1stat
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- /* We cannot use the default allocator (on Win* platforms anyway)
- * because it often causes problems when we free storage
- * in a DLL that was allocated in another DLL. Using
- * our own allocator (ours just calls malloc!) ensures
- * that there is no mismatch.
- */
- err = memp_stat(dbenv, &statp, 0, NULL);
+ err = dbenv->memp_stat(dbenv, &statp, 0, (u_int32_t)flags);
if (verify_return(jnienv, err, 0)) {
- retval = create_default_object(jnienv, name_DB_MPOOL_STAT);
- dbclass = get_class(jnienv, name_DB_MPOOL_STAT);
-
- set_int_field(jnienv, dbclass, retval, "st_cachesize", 0);
- set_int_field(jnienv, dbclass, retval,
- "st_cache_hit", statp->st_cache_hit);
- set_int_field(jnienv, dbclass, retval,
- "st_cache_miss", statp->st_cache_miss);
- set_int_field(jnienv, dbclass, retval,
- "st_map", statp->st_map);
- set_int_field(jnienv, dbclass, retval,
- "st_page_create", statp->st_page_create);
- set_int_field(jnienv, dbclass, retval,
- "st_page_in", statp->st_page_in);
- set_int_field(jnienv, dbclass, retval,
- "st_page_out", statp->st_page_out);
- set_int_field(jnienv, dbclass, retval,
- "st_ro_evict", statp->st_ro_evict);
- set_int_field(jnienv, dbclass, retval,
- "st_rw_evict", statp->st_rw_evict);
- set_int_field(jnienv, dbclass, retval,
- "st_hash_buckets", statp->st_hash_buckets);
- set_int_field(jnienv, dbclass, retval,
- "st_hash_searches", statp->st_hash_searches);
- set_int_field(jnienv, dbclass, retval,
- "st_hash_longest", statp->st_hash_longest);
- set_int_field(jnienv, dbclass, retval,
- "st_hash_examined", statp->st_hash_examined);
- set_int_field(jnienv, dbclass, retval,
- "st_page_clean", statp->st_page_clean);
- set_int_field(jnienv, dbclass, retval,
- "st_page_dirty", statp->st_page_dirty);
- set_int_field(jnienv, dbclass, retval,
- "st_page_trickle", statp->st_page_trickle);
- set_int_field(jnienv, dbclass, retval,
- "st_region_wait", statp->st_region_wait);
- set_int_field(jnienv, dbclass, retval,
- "st_region_nowait", statp->st_region_nowait);
- set_int_field(jnienv, dbclass, retval,
- "st_regsize", statp->st_regsize);
-
- free(statp);
+ if ((dbclass = get_class(jnienv, name_DB_MPOOL_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_MPOOL_STAT)) == NULL)
+ goto err; /* An exception has been posted. */
+
+ __jv_fill_mpool_stat(jnienv, dbclass, retval, statp);
+
+err: __os_ufree(dbenv, statp);
}
- JAVADB_ENV_API_END(dbenv);
return (retval);
}
JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err, i, len;
jclass fstat_class;
@@ -1009,57 +1141,49 @@ JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_memp_1fstat
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- /* We cannot use the default allocator (on Win* platforms anyway)
- * because it often causes problems when we free storage
- * in a DLL that was allocated in another DLL. Using
- * our own allocator (ours just calls malloc!) ensures
- * that there is no mismatch.
- */
- err = memp_stat(dbenv, 0, &fstatp, NULL);
+ err = dbenv->memp_stat(dbenv, 0, &fstatp, (u_int32_t)flags);
if (verify_return(jnienv, err, 0)) {
len = 0;
- while (fstatp[len])
+ while (fstatp[len] != NULL)
len++;
- fstat_class = get_class(jnienv, name_DB_MPOOL_FSTAT);
- retval = (*jnienv)->NewObjectArray(jnienv, len,
- fstat_class, 0);
+ if ((fstat_class =
+ get_class(jnienv, name_DB_MPOOL_FSTAT)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, len,
+ fstat_class, 0)) == NULL)
+ goto err;
for (i=0; i<len; i++) {
- jobject obj = create_default_object(jnienv,
- name_DB_MPOOL_FSTAT);
+ jobject obj;
+ if ((obj = create_default_object(jnienv,
+ name_DB_MPOOL_FSTAT)) == NULL)
+ goto err;
(*jnienv)->SetObjectArrayElement(jnienv, retval,
- i, obj);
+ i, obj);
/* Set the string field. */
- filename_id =
- (*jnienv)->GetFieldID(jnienv, fstat_class,
- "file_name",
- string_signature);
- jfilename =
- get_java_string(jnienv, fstatp[i]->file_name);
+ filename_id = (*jnienv)->GetFieldID(jnienv,
+ fstat_class, "file_name", string_signature);
+ jfilename = get_java_string(jnienv,
+ fstatp[i]->file_name);
(*jnienv)->SetObjectField(jnienv, obj,
- filename_id, jfilename);
-
+ filename_id, jfilename);
set_int_field(jnienv, fstat_class, obj,
- "st_pagesize", fstatp[i]->st_pagesize);
+ "st_pagesize", fstatp[i]->st_pagesize);
set_int_field(jnienv, fstat_class, obj,
- "st_cache_hit", fstatp[i]->st_cache_hit);
+ "st_cache_hit", fstatp[i]->st_cache_hit);
set_int_field(jnienv, fstat_class, obj,
- "st_cache_miss", fstatp[i]->st_cache_miss);
+ "st_cache_miss", fstatp[i]->st_cache_miss);
set_int_field(jnienv, fstat_class, obj,
- "st_map", fstatp[i]->st_map);
+ "st_map", fstatp[i]->st_map);
set_int_field(jnienv, fstat_class, obj,
- "st_page_create", fstatp[i]->st_page_create);
+ "st_page_create", fstatp[i]->st_page_create);
set_int_field(jnienv, fstat_class, obj,
- "st_page_in", fstatp[i]->st_page_in);
+ "st_page_in", fstatp[i]->st_page_in);
set_int_field(jnienv, fstat_class, obj,
- "st_page_out", fstatp[i]->st_page_out);
- free(fstatp[i]);
+ "st_page_out", fstatp[i]->st_page_out);
+ __os_ufree(dbenv, fstatp[i]);
}
- free(fstatp);
+err: __os_ufree(dbenv, fstatp);
}
- JAVADB_ENV_API_END(dbenv);
return (retval);
}
@@ -1071,10 +1195,8 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_memp_1trickle
int result = 0;
if (verify_non_null(jnienv, dbenv)) {
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = memp_trickle(dbenv, pct, &result);
+ err = dbenv->memp_trickle(dbenv, pct, &result);
verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
}
return (result);
}
@@ -1090,36 +1212,21 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1begin
if (!verify_non_null(jnienv, dbenv))
return (0);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
dbpid = get_DB_TXN(jnienv, pid);
result = 0;
- err = txn_begin(dbenv, dbpid, &result, flags);
+ err = dbenv->txn_begin(dbenv, dbpid, &result, flags);
if (!verify_return(jnienv, err, 0))
return (0);
- JAVADB_ENV_API_END(dbenv);
return (get_DbTxn(jnienv, result));
}
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbEnv_txn_1checkpoint
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint kbyte, jint min, jint flags)
-{
- int err;
- DB_ENV *dbenv = get_DB_ENV(jnienv, jthis);
-
- if (!verify_non_null(jnienv, dbenv))
- return (0);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- err = txn_checkpoint(dbenv, kbyte, min, flags);
- if (err != DB_INCOMPLETE)
- verify_return(jnienv, err, 0);
- JAVADB_ENV_API_END(dbenv);
- return (err);
-}
+JAVADB_METHOD(DbEnv_txn_1checkpoint,
+ (JAVADB_ARGS, jint kbyte, jint min, jint flags), DB_ENV,
+ txn_checkpoint, (c_this, kbyte, min, flags))
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_tx_1recover_1changed
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jtxrecover)
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_app_1dispatch_1changed
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, /*DbFeedback*/ jobject jappdispatch)
{
DB_ENV *dbenv;
DB_ENV_JAVAINFO *dbenvinfo;
@@ -1130,13 +1237,96 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv_tx_1recover_1changed
!verify_non_null(jnienv, dbenvinfo))
return;
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
- dbjie_set_tx_recover_object(dbenvinfo, jnienv, dbenv, jtxrecover);
- JAVADB_ENV_API_END(dbenv);
+ dbjie_set_app_dispatch_object(dbenvinfo, jnienv, dbenv, jappdispatch);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_DbEnv_txn_1recover
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint count, jint flags)
+{
+ int err;
+ DB_ENV *dbenv;
+ DB_PREPLIST *preps;
+ long retcount;
+ int i;
+ char signature[128];
+ size_t bytesize;
+ jobject retval;
+ jobject obj;
+ jobject txnobj;
+ jbyteArray bytearr;
+ jclass preplist_class;
+ jfieldID txn_fieldid;
+ jfieldID gid_fieldid;
+
+ retval = NULL;
+ dbenv = get_DB_ENV(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbenv))
+ return (NULL);
+
+ /*
+ * We need to allocate some local storage for the
+ * returned preplist, and that requires us to do
+ * our own argument validation.
+ */
+ if (count <= 0) {
+ verify_return(jnienv, EINVAL, 0);
+ goto out;
+ }
+
+ bytesize = sizeof(DB_PREPLIST) * count;
+ if ((err = __os_malloc(dbenv, bytesize, &preps)) != 0) {
+ verify_return(jnienv, err, 0);
+ goto out;
+ }
+
+ err = dbenv->txn_recover(dbenv, preps, count, &retcount, flags);
+
+ if (verify_return(jnienv, err, 0)) {
+ if ((preplist_class =
+ get_class(jnienv, name_DB_PREPLIST)) == NULL ||
+ (retval = (*jnienv)->NewObjectArray(jnienv, retcount,
+ preplist_class, 0)) == NULL)
+ goto err;
+
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, name_DB_TXN);
+ txn_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "txn", signature);
+ gid_fieldid = (*jnienv)->GetFieldID(jnienv, preplist_class,
+ "gid", "[B");
+
+ for (i=0; i<retcount; i++) {
+ /*
+ * First, make a blank DbPreplist object
+ * and set the array entry.
+ */
+ if ((obj = create_default_object(jnienv,
+ name_DB_PREPLIST)) == NULL)
+ goto err;
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ retval, i, obj);
+
+ /* Set the txn field. */
+ txnobj = get_DbTxn(jnienv, preps[i].txn);
+ (*jnienv)->SetObjectField(jnienv,
+ obj, txn_fieldid, txnobj);
+
+ /* Build the gid array and set the field. */
+ if ((bytearr = (*jnienv)->NewByteArray(jnienv,
+ sizeof(preps[i].gid))) == NULL)
+ goto err;
+ (*jnienv)->SetByteArrayRegion(jnienv, bytearr, 0,
+ sizeof(preps[i].gid), (jbyte *)&preps[i].gid[0]);
+ (*jnienv)->SetObjectField(jnienv, obj,
+ gid_fieldid, bytearr);
+ }
+ }
+err: __os_free(dbenv, preps);
+out: return (retval);
}
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
- (JNIEnv *jnienv, /*DbEnv*/ jobject jthis)
+ (JNIEnv *jnienv, /*DbEnv*/ jobject jthis, jint flags)
{
int err;
DB_ENV *dbenv;
@@ -1154,57 +1344,29 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
if (!verify_non_null(jnienv, dbenv))
return (NULL);
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
-
- /* We cannot use the default allocator (on Win* platforms anyway)
- * because it often causes problems when we free storage
- * in a DLL that was allocated in another DLL. Using
- * our own allocator (ours just calls malloc!) ensures
- * that there is no mismatch.
- */
- err = txn_stat(dbenv, &statp, NULL);
+ err = dbenv->txn_stat(dbenv, &statp, (u_int32_t)flags);
if (verify_return(jnienv, err, 0)) {
- retval = create_default_object(jnienv, name_DB_TXN_STAT);
- dbclass = get_class(jnienv, name_DB_TXN_STAT);
+ if ((dbclass = get_class(jnienv, name_DB_TXN_STAT)) == NULL ||
+ (retval =
+ create_default_object(jnienv, name_DB_TXN_STAT)) == NULL)
+ goto err;
/* Set the individual fields */
+ __jv_fill_txn_stat(jnienv, dbclass, retval, statp);
+
+ if ((active_class =
+ get_class(jnienv, name_DB_TXN_STAT_ACTIVE)) == NULL ||
+ (actives = (*jnienv)->NewObjectArray(jnienv,
+ statp->st_nactive, active_class, 0)) == NULL)
+ goto err;
- set_lsn_field(jnienv, dbclass, retval,
- "st_last_ckp", statp->st_last_ckp);
- set_lsn_field(jnienv, dbclass, retval,
- "st_pending_ckp", statp->st_pending_ckp);
- set_long_field(jnienv, dbclass, retval,
- "st_time_ckp", statp->st_time_ckp);
- set_int_field(jnienv, dbclass, retval,
- "st_last_txnid", statp->st_last_txnid);
- set_int_field(jnienv, dbclass, retval,
- "st_maxtxns", statp->st_maxtxns);
- set_int_field(jnienv, dbclass, retval,
- "st_naborts", statp->st_naborts);
- set_int_field(jnienv, dbclass, retval,
- "st_nbegins", statp->st_nbegins);
- set_int_field(jnienv, dbclass, retval,
- "st_ncommits", statp->st_ncommits);
- set_int_field(jnienv, dbclass, retval,
- "st_nactive", statp->st_nactive);
- set_int_field(jnienv, dbclass, retval,
- "st_maxnactive", statp->st_maxnactive);
-
- active_class = get_class(jnienv, name_DB_TXN_STAT_ACTIVE);
- actives =
- (*jnienv)->NewObjectArray(jnienv, statp->st_nactive,
- active_class, 0);
-
- /* Set the st_txnarray field. This is a little more involved
+ /*
+ * Set the st_txnarray field. This is a little more involved
* than other fields, since the type is an array, so none
* of our utility functions help.
*/
- strncpy(active_signature, "[L", sizeof(active_signature));
- strncat(active_signature, DB_PACKAGE_NAME,
- sizeof(active_signature));
- strncat(active_signature, name_DB_TXN_STAT_ACTIVE,
- sizeof(active_signature));
- strncat(active_signature, ";", sizeof(active_signature));
+ (void)snprintf(active_signature, sizeof(active_signature),
+ "[L%s%s;", DB_PACKAGE_NAME, name_DB_TXN_STAT_ACTIVE);
arrid = (*jnienv)->GetFieldID(jnienv, dbclass, "st_txnarray",
active_signature);
@@ -1212,26 +1374,21 @@ JNIEXPORT jobject JNICALL Java_com_sleepycat_db_DbEnv_txn_1stat
/* Now fill the in the elements of st_txnarray. */
for (i=0; i<statp->st_nactive; i++) {
- obj = create_default_object(jnienv, name_DB_TXN_STAT_ACTIVE);
- (*jnienv)->SetObjectArrayElement(jnienv, actives, i, obj);
+ obj = create_default_object(jnienv,
+ name_DB_TXN_STAT_ACTIVE);
+ (*jnienv)->SetObjectArrayElement(jnienv,
+ actives, i, obj);
set_int_field(jnienv, active_class, obj,
"txnid", statp->st_txnarray[i].txnid);
- set_int_field(jnienv, active_class, obj,
- "parentid", statp->st_txnarray[i].parentid);
+ set_int_field(jnienv, active_class, obj, "parentid",
+ statp->st_txnarray[i].parentid);
set_lsn_field(jnienv, active_class, obj,
"lsn", statp->st_txnarray[i].lsn);
}
- set_int_field(jnienv, dbclass, retval,
- "st_region_wait", statp->st_region_wait);
- set_int_field(jnienv, dbclass, retval,
- "st_region_nowait", statp->st_region_nowait);
- set_int_field(jnienv, dbclass, retval,
- "st_regsize", statp->st_regsize);
-
- free(statp);
+
+err: __os_ufree(dbenv, statp);
}
- JAVADB_ENV_API_END(dbenv);
return (retval);
}
@@ -1247,10 +1404,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errcall
if (verify_non_null(jnienv, dbenv) &&
verify_non_null(jnienv, dbenvinfo)) {
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
dbjie_set_errcall(dbenvinfo, jnienv, errcall);
- JAVADB_ENV_API_END(dbenv);
}
}
@@ -1265,10 +1419,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1set_1errpfx
if (verify_non_null(jnienv, dbenv) &&
verify_non_null(jnienv, dbenvinfo)) {
-
- JAVADB_ENV_API_BEGIN(dbenv, jthis);
dbjie_set_errpfx(dbenvinfo, jnienv, str);
- JAVADB_ENV_API_END(dbenv);
}
}
@@ -1283,8 +1434,7 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbEnv__1finalize
envinfo = get_DB_ENV_JAVAINFO(jnienv, jthis);
DB_ASSERT(envinfo != NULL);
- /* Note: We detect unclosed DbEnvs and report it.
- */
+ /* Note: We detect and report unclosed DbEnvs. */
if (dbenv != NULL && envinfo != NULL && !dbjie_is_dbopen(envinfo)) {
/* If this error occurs, this object was never closed. */
diff --git a/bdb/libdb_java/java_DbLock.c b/bdb/libdb_java/java_DbLock.c
index 287ca6622e5..00a9836bfa0 100644
--- a/bdb/libdb_java/java_DbLock.c
+++ b/bdb/libdb_java/java_DbLock.c
@@ -1,55 +1,30 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_DbLock.c,v 11.4 2000/11/30 00:58:39 ubell Exp $";
+static const char revid[] = "$Id: java_DbLock.c,v 11.12 2002/02/28 21:27:38 ubell Exp $";
#endif /* not lint */
#include <jni.h>
#include <stdlib.h>
#include <string.h>
-#include "db.h"
+#include "db_int.h"
#include "java_util.h"
#include "com_sleepycat_db_DbLock.h"
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_put
- (JNIEnv *jnienv, jobject jthis, /*DbEnv*/ jobject env)
-{
- int err;
- DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
- DB_ENV *dbenv = get_DB_ENV(jnienv, env);
-
- if (!verify_non_null(jnienv, dbenv))
- return;
-
- if (!verify_non_null(jnienv, dblock))
- return;
-
- err = lock_put(dbenv, dblock);
- if (verify_return(jnienv, err, 0)) {
- /* After a successful put, the DbLock can no longer
- * be used, so we release the storage related to it
- * (allocated in DbEnv.lock_get() or lock_tget()).
- */
- free(dblock);
-
- set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0);
- }
-}
-
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLock_finalize
(JNIEnv *jnienv, jobject jthis)
{
DB_LOCK *dblock = get_DB_LOCK(jnienv, jthis);
if (dblock) {
/* Free any data related to DB_LOCK here */
- free(dblock);
+ __os_free(NULL, dblock);
}
set_private_dbobj(jnienv, name_DB_LOCK, jthis, 0); /* paranoia */
}
diff --git a/bdb/libdb_java/java_DbLogc.c b/bdb/libdb_java/java_DbLogc.c
new file mode 100644
index 00000000000..69294d9baac
--- /dev/null
+++ b/bdb/libdb_java/java_DbLogc.c
@@ -0,0 +1,110 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbLogc.c,v 11.6 2002/07/02 12:03:03 mjc Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLogc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc = get_DB_LOGC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dblogc))
+ return;
+ err = dblogc->close(dblogc, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB_LOGC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*DbLsn*/ jobject lsn, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry;
+ DB_LOGC *dblogc;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+ OpKind dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ dataop = outOp;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (locked_dbt_get(&ldata, jnienv, dblogc->dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dblogc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dblogc->get(dblogc, dblsn, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&ldata, jnienv, dblogc->dbenv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv, dblogc->dbenv);
+ if (!DB_RETOK_LGGET(err)) {
+ if (verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /*
+ * Free any data related to DB_LOGC here.
+ * If we ever have java-only data embedded in the DB_LOGC
+ * and need to do this, we'll have to track DbLogc's
+ * according to which DbEnv owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DB_LOGC *dblogc;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ if (dblogc != NULL)
+ fprintf(stderr, "Java API: DbLogc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/bdb/libdb_java/java_DbLsn.c b/bdb/libdb_java/java_DbLsn.c
index 8f26f2ecb58..d53082826f4 100644
--- a/bdb/libdb_java/java_DbLsn.c
+++ b/bdb/libdb_java/java_DbLsn.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_DbLsn.c,v 11.5 2000/11/30 00:58:39 ubell Exp $";
+static const char revid[] = "$Id: java_DbLsn.c,v 11.12 2002/05/07 16:12:41 dda Exp $";
#endif /* not lint */
#include <jni.h>
@@ -15,7 +15,6 @@ static const char revid[] = "$Id: java_DbLsn.c,v 11.5 2000/11/30 00:58:39 ubell
#include <string.h>
#include <stdio.h> /* needed for FILENAME_MAX */
-#include "db.h"
#include "db_int.h"
#include "java_util.h"
#include "com_sleepycat_db_DbLsn.h"
@@ -23,8 +22,9 @@ static const char revid[] = "$Id: java_DbLsn.c,v 11.5 2000/11/30 00:58:39 ubell
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_init_1lsn
(JNIEnv *jnienv, /*DbLsn*/ jobject jthis)
{
- /* Note: the DB_LSN object stored in the private_dbobj_
- * is allocated in get_DbLsn().
+ /*
+ * Note: the DB_LSN object stored in the private_dbobj_
+ * is allocated in get_DbLsn() or get_DB_LSN().
*/
COMPQUIET(jnienv, NULL);
@@ -38,6 +38,6 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLsn_finalize
dblsn = get_DB_LSN(jnienv, jthis);
if (dblsn) {
- free(dblsn);
+ (void)__os_free(NULL, dblsn);
}
}
diff --git a/bdb/libdb_java/java_DbTxn.c b/bdb/libdb_java/java_DbTxn.c
index 67c2599a6fc..51195501b77 100644
--- a/bdb/libdb_java/java_DbTxn.c
+++ b/bdb/libdb_java/java_DbTxn.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_DbTxn.c,v 11.3 2000/09/18 18:32:25 dda Exp $";
+static const char revid[] = "$Id: java_DbTxn.c,v 11.16 2002/08/06 05:19:05 bostic Exp $";
#endif /* not lint */
#include <jni.h>
@@ -15,33 +15,16 @@ static const char revid[] = "$Id: java_DbTxn.c,v 11.3 2000/09/18 18:32:25 dda Ex
#include <stdlib.h>
#include <string.h>
-#include "db.h"
+#include "db_int.h"
#include "java_util.h"
#include "com_sleepycat_db_DbTxn.h"
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_abort
- (JNIEnv *jnienv, jobject jthis)
-{
- int err;
- DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
- if (!verify_non_null(jnienv, dbtxn))
- return;
-
- err = txn_abort(dbtxn);
- verify_return(jnienv, err, 0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_commit
- (JNIEnv *jnienv, jobject jthis, jint flags)
-{
- int err;
- DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
- if (!verify_non_null(jnienv, dbtxn))
- return;
-
- err = txn_commit(dbtxn, flags);
- verify_return(jnienv, err, 0);
-}
+JAVADB_METHOD(DbTxn_abort, (JAVADB_ARGS), DB_TXN,
+ abort, (c_this))
+JAVADB_METHOD(DbTxn_commit, (JAVADB_ARGS, jint flags), DB_TXN,
+ commit, (c_this, flags))
+JAVADB_METHOD(DbTxn_discard, (JAVADB_ARGS, jint flags), DB_TXN,
+ discard, (c_this, flags))
JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
(JNIEnv *jnienv, jobject jthis)
@@ -51,32 +34,34 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbTxn_id
if (!verify_non_null(jnienv, dbtxn))
return (-1);
- /* No error to check for from txn_id */
- retval = txn_id(dbtxn);
+ /* No error to check for from DB_TXN->id */
+ retval = dbtxn->id(dbtxn);
return (retval);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_prepare
- (JNIEnv *jnienv, jobject jthis)
+ (JNIEnv *jnienv, jobject jthis, jbyteArray gid)
{
int err;
- DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
+ DB_TXN *dbtxn;
+ jbyte *c_array;
+
+ dbtxn = get_DB_TXN(jnienv, jthis);
if (!verify_non_null(jnienv, dbtxn))
return;
- err = txn_prepare(dbtxn);
+ if (gid == NULL ||
+ (*jnienv)->GetArrayLength(jnienv, gid) < DB_XIDDATASIZE) {
+ report_exception(jnienv, "DbTxn.prepare gid array "
+ "must be >= 128 bytes", EINVAL, 0);
+ return;
+ }
+ c_array = (*jnienv)->GetByteArrayElements(jnienv, gid, NULL);
+ err = dbtxn->prepare(dbtxn, (u_int8_t *)c_array);
+ (*jnienv)->ReleaseByteArrayElements(jnienv, gid, c_array, 0);
verify_return(jnienv, err, 0);
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_DbTxn_finalize
- (JNIEnv *jnienv, jobject jthis)
-{
- DB_TXN *dbtxn = get_DB_TXN(jnienv, jthis);
- if (dbtxn) {
- /* Free any data related to DB_TXN here
- * Note: we don't make a policy of doing
- * a commit or abort here. The txnmgr
- * should be closed, and DB will clean up.
- */
- }
-}
+JAVADB_METHOD(DbTxn_set_1timeout,
+ (JAVADB_ARGS, jlong timeout, jint flags), DB_TXN,
+ set_timeout, (c_this, (u_int32_t)timeout, flags))
diff --git a/bdb/libdb_java/java_DbUtil.c b/bdb/libdb_java/java_DbUtil.c
new file mode 100644
index 00000000000..edcbc6d9f15
--- /dev/null
+++ b/bdb/libdb_java/java_DbUtil.c
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbUtil.c,v 1.5 2002/01/11 15:52:44 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbUtil.h"
+
+JNIEXPORT jboolean JNICALL
+Java_com_sleepycat_db_DbUtil_is_1big_1endian (JNIEnv *jnienv,
+ jclass jthis_class)
+{
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis_class, NULL);
+
+ return (__db_isbigendian() ? JNI_TRUE : JNI_FALSE);
+}
diff --git a/bdb/libdb_java/java_DbXAResource.c b/bdb/libdb_java/java_DbXAResource.c
new file mode 100644
index 00000000000..609529bfe83
--- /dev/null
+++ b/bdb/libdb_java/java_DbXAResource.c
@@ -0,0 +1,288 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: java_DbXAResource.c,v 11.6 2002/08/06 05:19:06 bostic Exp $";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "dbinc/xa.h"
+#include "dbinc_auto/xa_ext.h"
+#include "com_sleepycat_db_xa_DbXAResource.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1init
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_open((char *)ls_home.string,
+ rmid, flags)) != XA_OK) {
+ verify_return(jnienv, err, EXCEPTION_XA);
+ }
+
+ /*
+ * Now create the DbEnv object, it will get attached
+ * to the DB_ENV just made in __db_xa_open.
+ */
+ if ((cl = get_class(jnienv, name_DB_ENV)) == NULL)
+ goto out;
+
+ mid = (*jnienv)->GetStaticMethodID(jnienv, cl,
+ "_create_DbEnv_for_XA", "(II)V");
+ (*jnienv)->CallStaticVoidMethod(jnienv, cl, mid, 0, rmid);
+
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1close
+ (JNIEnv *jnienv, jobject jthis, jstring home, jint rmid, jint flags)
+{
+ int err;
+ LOCKED_STRING ls_home;
+
+ COMPQUIET(jthis, NULL);
+ if (locked_string_get(&ls_home, jnienv, home) != 0)
+ goto out;
+ if ((err = __db_xa_close((char *)ls_home.string,
+ rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+ out:
+ locked_string_put(&ls_home, jnienv);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1commit
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid,
+ jboolean onePhase)
+{
+ XID xid;
+ long flags;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ flags = 0;
+ if (onePhase == JNI_TRUE)
+ flags |= TMONEPHASE;
+ if ((err = __db_xa_commit(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1end
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_end(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1forget
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_forget(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_xa_DbXAResource__1prepare
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return (0);
+ err = __db_xa_prepare(&xid, rmid, 0);
+ if (err != XA_OK && err != XA_RDONLY)
+ verify_return(jnienv, err, EXCEPTION_XA);
+
+ return (err);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_com_sleepycat_db_xa_DbXAResource__1recover
+ (JNIEnv *jnienv, jobject jthis, jint rmid, jint flags)
+{
+ XID *xids;
+ int err;
+ int total;
+ int cnt;
+ int i;
+ int curflags;
+ size_t nbytes;
+ jclass xid_class;
+ jmethodID mid;
+ jobject obj;
+ jobjectArray retval;
+
+ COMPQUIET(jthis, NULL);
+ total = 0;
+ cnt = 0;
+ xids = NULL;
+ flags &= ~(DB_FIRST | DB_LAST | DB_NEXT);
+
+ /* Repeatedly call __db_xa_recover to fill up an array of XIDs */
+ curflags = flags | DB_FIRST;
+ do {
+ total += cnt;
+ nbytes = sizeof(XID) * (total + 10);
+ if ((err = __os_realloc(NULL, nbytes, &xids)) != 0) {
+ if (xids != NULL)
+ __os_free(NULL, xids);
+ verify_return(jnienv, XAER_NOTA, EXCEPTION_XA);
+ return (NULL);
+ }
+ cnt = __db_xa_recover(&xids[total], 10, rmid, curflags);
+ curflags = flags | DB_NEXT;
+ } while (cnt > 0);
+
+ if (xids != NULL)
+ __os_free(NULL, xids);
+
+ if (cnt < 0) {
+ verify_return(jnienv, cnt, EXCEPTION_XA);
+ return (NULL);
+ }
+
+ /* Create the java DbXid array and fill it up */
+ if ((xid_class = get_class(jnienv, name_DB_XID)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, xid_class, "<init>",
+ "(I[B[B)V");
+ if ((retval = (*jnienv)->NewObjectArray(jnienv, total, xid_class, 0))
+ == NULL)
+ goto out;
+
+ for (i = 0; i < total; i++) {
+ jobject gtrid;
+ jobject bqual;
+ jsize gtrid_len;
+ jsize bqual_len;
+
+ gtrid_len = (jsize)xids[i].gtrid_length;
+ bqual_len = (jsize)xids[i].bqual_length;
+ gtrid = (*jnienv)->NewByteArray(jnienv, gtrid_len);
+ bqual = (*jnienv)->NewByteArray(jnienv, bqual_len);
+ if (gtrid == NULL || bqual == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, gtrid, 0, gtrid_len,
+ (jbyte *)&xids[i].data[0]);
+ (*jnienv)->SetByteArrayRegion(jnienv, bqual, 0, bqual_len,
+ (jbyte *)&xids[i].data[gtrid_len]);
+ if ((obj = (*jnienv)->NewObject(jnienv, xid_class, mid,
+ (jint)xids[i].formatID, gtrid, bqual)) == NULL)
+ goto out;
+ (*jnienv)->SetObjectArrayElement(jnienv, retval, i, obj);
+ }
+out: return (retval);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1rollback
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+ if ((err = __db_xa_rollback(&xid, rmid, 0)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_xa_DbXAResource__1start
+ (JNIEnv *jnienv, jobject jthis, jobject jxid, jint rmid, jint flags)
+{
+ XID xid;
+ int err;
+
+ COMPQUIET(jthis, NULL);
+ if (!get_XID(jnienv, jxid, &xid))
+ return;
+
+ if ((err = __db_xa_start(&xid, rmid, flags)) != XA_OK)
+ verify_return(jnienv, err, EXCEPTION_XA);
+}
+
+JNIEXPORT jobject JNICALL Java_com_sleepycat_db_xa_DbXAResource_xa_1attach
+ (JNIEnv *jnienv, jclass jthisclass, jobject jxid, jobject jrmid)
+{
+ XID xid;
+ XID *xidp;
+ int ret;
+ DB_ENV *env;
+ DB_TXN *txn;
+ int rmid;
+ int *rmidp;
+ jobject jtxn;
+ jobject jenv;
+ jclass cl;
+ jmethodID mid;
+
+ COMPQUIET(jthisclass, NULL);
+ if (jxid == NULL) {
+ xidp = NULL;
+ }
+ else {
+ xidp = &xid;
+ if (!get_XID(jnienv, jxid, &xid))
+ return (NULL);
+ }
+ if (jrmid == NULL) {
+ rmidp = NULL;
+ }
+ else {
+ rmidp = &rmid;
+ rmid = (int)(*jnienv)->CallIntMethod(jnienv, jrmid,
+ mid_Integer_intValue);
+ }
+
+ if ((ret = db_env_xa_attach(rmidp, xidp, &env, &txn)) != 0) {
+ /*
+ * DB_NOTFOUND is a normal return, it means we
+ * have no current transaction,
+ */
+ if (ret != DB_NOTFOUND)
+ verify_return(jnienv, ret, 0);
+ return (NULL);
+ }
+
+ jenv = ((DB_ENV_JAVAINFO *)env->api2_internal)->jenvref;
+ jtxn = get_DbTxn(jnienv, txn);
+ if ((cl = get_class(jnienv, name_DB_XAATTACH)) == NULL)
+ return (NULL);
+ mid = (*jnienv)->GetMethodID(jnienv, cl, "<init>",
+ "(Lcom/sleepycat/db/DbEnv;Lcom/sleepycat/db/DbTxn;)V");
+ return (*jnienv)->NewObject(jnienv, cl, mid, jenv, jtxn);
+}
diff --git a/bdb/libdb_java/java_Dbc.c b/bdb/libdb_java/java_Dbc.c
index f1d0acdec85..63ab368fc03 100644
--- a/bdb/libdb_java/java_Dbc.c
+++ b/bdb/libdb_java/java_Dbc.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_Dbc.c,v 11.10 2000/10/25 19:54:55 dda Exp $";
+static const char revid[] = "$Id: java_Dbc.c,v 11.23 2002/08/06 05:19:06 bostic Exp $";
#endif /* not lint */
#include <jni.h>
@@ -18,7 +18,6 @@ static const char revid[] = "$Id: java_Dbc.c,v 11.10 2000/10/25 19:54:55 dda Exp
#include <stdio.h>
#endif
-#include "db.h"
#include "db_int.h"
#include "java_util.h"
#include "com_sleepycat_db_Dbc.h"
@@ -51,20 +50,8 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_count
return (count);
}
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_del
- (JNIEnv *jnienv, jobject jthis, jint flags)
-{
- int err;
- DBC *dbc = get_DBC(jnienv, jthis);
-
- if (!verify_non_null(jnienv, dbc))
- return (0);
- err = dbc->c_del(dbc, flags);
- if (err != DB_KEYEMPTY) {
- verify_return(jnienv, err, 0);
- }
- return (err);
-}
+JAVADB_METHOD_INT(Dbc_del, (JAVADB_ARGS, jint flags), DBC,
+ c_del, (c_this, flags), DB_RETOK_DBCDEL)
JNIEXPORT jobject JNICALL Java_com_sleepycat_db_Dbc_dup
(JNIEnv *jnienv, jobject jthis, jint flags)
@@ -88,10 +75,12 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
{
int err, retry, op_flags;
DBC *dbc;
- JDBT dbkey, dbdata;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
OpKind keyop, dataop;
- /* Depending on flags, the user may be supplying the key,
+ /*
+ * Depending on flags, the user may be supplying the key,
* or else we may have to retrieve it.
*/
err = 0;
@@ -106,38 +95,124 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_get
op_flags == DB_SET_RECNO) {
keyop = inOutOp;
}
- else if (op_flags == DB_GET_BOTH) {
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
keyop = inOutOp;
dataop = inOutOp;
}
dbc = get_DBC(jnienv, jthis);
- if (jdbt_lock(&dbkey, jnienv, key, keyop) != 0)
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
goto out2;
- if (jdbt_lock(&dbdata, jnienv, data, dataop) != 0)
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
goto out1;
if (!verify_non_null(jnienv, dbc))
goto out1;
for (retry = 0; retry < 3; retry++) {
- err = dbc->c_get(dbc, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
+ err = dbc->c_get(dbc,
+ &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
- /* If we failed due to lack of memory in our DBT arrays,
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
* retry.
*/
if (err != ENOMEM)
break;
- if (!jdbt_realloc(&dbkey, jnienv) && !jdbt_realloc(&dbdata, jnienv))
+ if (!locked_dbt_realloc(&lkey, jnienv,
+ dbenv) && !locked_dbt_realloc(&ldata, jnienv, dbenv))
break;
}
- if (err != DB_NOTFOUND) {
- verify_return(jnienv, err, 0);
+ out1:
+ locked_dbt_put(&ldata, jnienv, dbenv);
+ out2:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_pget
+ (JNIEnv *jnienv, jobject jthis,
+ /*Dbt*/ jobject key, /*Dbt*/ jobject pkey, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry, op_flags;
+ DBC *dbc;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, lpkey, ldata;
+ OpKind keyop, pkeyop, dataop;
+
+ /*
+ * Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ keyop = outOp;
+ pkeyop = outOp;
+ dataop = outOp;
+
+ op_flags = flags & DB_OPFLAGS_MASK;
+ if (op_flags == DB_SET) {
+ keyop = inOp;
+ }
+ else if (op_flags == DB_SET_RANGE ||
+ op_flags == DB_SET_RECNO) {
+ keyop = inOutOp;
+ }
+ else if (op_flags == DB_GET_BOTH || op_flags == DB_GET_BOTH_RANGE) {
+ pkeyop = inOutOp;
+ keyop = inOutOp;
+ dataop = inOutOp;
+ }
+
+ dbc = get_DBC(jnienv, jthis);
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
+ goto out3;
+ if (locked_dbt_get(&lpkey, jnienv, dbenv, pkey, pkeyop) != 0)
+ goto out2;
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dbc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dbc->c_pget(dbc, &lkey.javainfo->dbt,
+ &lpkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+
+ /*
+ * If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&lkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&lpkey, jnienv, dbenv) &&
+ !locked_dbt_realloc(&ldata, jnienv, dbenv))
+ break;
}
out1:
- jdbt_unlock(&dbdata, jnienv);
+ locked_dbt_put(&ldata, jnienv, dbenv);
out2:
- jdbt_unlock(&dbkey, jnienv);
+ locked_dbt_put(&lpkey, jnienv, dbenv);
+ out3:
+ locked_dbt_put(&lkey, jnienv, dbenv);
+ if (!DB_RETOK_DBCGET(err)) {
+ if (verify_dbt(jnienv, err, &lkey) &&
+ verify_dbt(jnienv, err, &lpkey) &&
+ verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
return (err);
}
@@ -147,32 +222,39 @@ JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbc_put
{
int err;
DBC *dbc;
- JDBT dbkey, dbdata;
+ DB_ENV *dbenv;
+ LOCKED_DBT lkey, ldata;
+ OpKind keyop;
err = 0;
dbc = get_DBC(jnienv, jthis);
- if (jdbt_lock(&dbkey, jnienv, key, inOp) != 0)
+ if (!verify_non_null(jnienv, dbc))
+ return (0);
+ dbenv = dbc->dbp->dbenv;
+ keyop = (dbc->dbp->type == DB_RECNO &&
+ (flags == DB_BEFORE || flags == DB_AFTER)) ? outOp : inOp;
+ if (locked_dbt_get(&lkey, jnienv, dbenv, key, keyop) != 0)
goto out2;
- if (jdbt_lock(&dbdata, jnienv, data, inOp) != 0)
+ if (locked_dbt_get(&ldata, jnienv, dbenv, data, inOp) != 0)
goto out1;
if (!verify_non_null(jnienv, dbc))
goto out1;
- err = dbc->c_put(dbc, &dbkey.dbt->dbt, &dbdata.dbt->dbt, flags);
- if (err != DB_KEYEXIST) {
+ err = dbc->c_put(dbc, &lkey.javainfo->dbt, &ldata.javainfo->dbt, flags);
+ if (!DB_RETOK_DBCPUT(err))
verify_return(jnienv, err, 0);
- }
out1:
- jdbt_unlock(&dbdata, jnienv);
+ locked_dbt_put(&ldata, jnienv, dbenv);
out2:
- jdbt_unlock(&dbkey, jnienv);
+ locked_dbt_put(&lkey, jnienv, dbenv);
return (err);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbc_finalize
(JNIEnv *jnienv, jobject jthis)
{
- /* Free any data related to DBC here.
+ /*
+ * Free any data related to DBC here.
* If we ever have java-only data embedded in the DBC
* and need to do this, we'll have to track Dbc's
* according to which Db owns them, just as
diff --git a/bdb/libdb_java/java_Dbt.c b/bdb/libdb_java/java_Dbt.c
index 0e094da6a2d..d21109f3408 100644
--- a/bdb/libdb_java/java_Dbt.c
+++ b/bdb/libdb_java/java_Dbt.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_Dbt.c,v 11.10 2000/10/25 19:54:55 dda Exp $";
+static const char revid[] = "$Id: java_Dbt.c,v 11.18 2002/06/20 11:11:55 mjc Exp $";
#endif /* not lint */
#include <jni.h>
@@ -15,16 +15,10 @@ static const char revid[] = "$Id: java_Dbt.c,v 11.10 2000/10/25 19:54:55 dda Exp
#include <stdlib.h>
#include <string.h>
-#include "db.h"
+#include "db_int.h"
#include "java_util.h"
#include "com_sleepycat_db_Dbt.h"
-JAVADB_RW_ACCESS(Dbt, jint, size, DBT, size)
-JAVADB_RW_ACCESS(Dbt, jint, ulen, DBT, ulen)
-JAVADB_RW_ACCESS(Dbt, jint, dlen, DBT, dlen)
-JAVADB_RW_ACCESS(Dbt, jint, doff, DBT, doff)
-JAVADB_RW_ACCESS(Dbt, jint, flags, DBT, flags)
-
JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
(JNIEnv *jnienv, jobject jthis)
{
@@ -34,129 +28,22 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_init
set_private_dbobj(jnienv, name_DBT, jthis, dbtji);
}
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_internal_1set_1data
- (JNIEnv *jnienv, jobject jthis, jbyteArray array)
-{
- DBT_JAVAINFO *db_this;
-
- db_this = get_DBT_JAVAINFO(jnienv, jthis);
- if (verify_non_null(jnienv, db_this)) {
-
- /* If we previously allocated an array for java,
- * must release reference.
- */
- dbjit_release(db_this, jnienv);
-
- /* Make the array a global ref,
- * it won't be GC'd till we release it.
- */
- if (array)
- array = (jbyteArray)NEW_GLOBAL_REF(jnienv, array);
- db_this->array_ = array;
- }
-}
-
-JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_get_1data
+JNIEXPORT jbyteArray JNICALL Java_com_sleepycat_db_Dbt_create_1data
(JNIEnv *jnienv, jobject jthis)
{
DBT_JAVAINFO *db_this;
- jbyteArray arr;
+ jbyteArray arr = NULL;
int len;
db_this = get_DBT_JAVAINFO(jnienv, jthis);
if (verify_non_null(jnienv, db_this)) {
- /* XXX this will copy the data on each call to get_data,
- * even if it is unchanged.
- */
- if (db_this->create_array_ != 0) {
- /* XXX we should reuse the existing array if we can */
- len = db_this->dbt.size;
- if (db_this->array_ != NULL)
- DELETE_GLOBAL_REF(jnienv, db_this->array_);
- arr = (*jnienv)->NewByteArray(jnienv, len);
- db_this->array_ =
- (jbyteArray)NEW_GLOBAL_REF(jnienv, arr);
- (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
- db_this->dbt.data);
- }
- return (db_this->array_);
- }
- return (0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1offset
- (JNIEnv *jnienv, jobject jthis, jint offset)
-{
- DBT_JAVAINFO *db_this;
-
- db_this = get_DBT_JAVAINFO(jnienv, jthis);
- if (verify_non_null(jnienv, db_this)) {
- db_this->offset_ = offset;
- }
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1offset
- (JNIEnv *jnienv, jobject jthis)
-{
- DBT_JAVAINFO *db_this;
-
- db_this = get_DBT_JAVAINFO(jnienv, jthis);
- if (verify_non_null(jnienv, db_this)) {
- return db_this->offset_;
- }
- return (0);
-}
-
-JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_set_1recno_1key_1data(JNIEnv *jnienv, jobject jthis, jint value)
-{
- JDBT jdbt;
-
- if (jdbt_lock(&jdbt, jnienv, jthis, inOp) != 0)
- goto out;
-
- if (!jdbt.dbt->dbt.data ||
- jdbt.java_array_len_ < sizeof(db_recno_t)) {
- char buf[200];
- sprintf(buf, "set_recno_key_data error: %p %p %d %d",
- &jdbt.dbt->dbt, jdbt.dbt->dbt.data,
- jdbt.dbt->dbt.ulen, sizeof(db_recno_t));
- report_exception(jnienv, buf, 0, 0);
- }
- else {
- *(db_recno_t*)(jdbt.dbt->dbt.data) = value;
+ len = db_this->dbt.size;
+ if ((arr = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ goto out;
+ (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, len,
+ db_this->dbt.data);
}
- out:
- jdbt_unlock(&jdbt, jnienv);
-}
-
-JNIEXPORT jint JNICALL Java_com_sleepycat_db_Dbt_get_1recno_1key_1data(JNIEnv *jnienv, jobject jthis)
-{
- jint ret;
- JDBT jdbt;
-
- ret = 0;
-
- /* Although this is kind of like "retrieve", we don't support
- * DB_DBT_MALLOC for this operation, so we tell jdbt_lock
- * that is not a retrieve.
- */
- if (jdbt_lock(&jdbt, jnienv, jthis, inOp) != 0)
- goto out;
-
- if (!jdbt.dbt->dbt.data ||
- jdbt.java_array_len_ < sizeof(db_recno_t)) {
- char buf[200];
- sprintf(buf, "get_recno_key_data error: %p %p %d %d",
- &jdbt.dbt->dbt, jdbt.dbt->dbt.data,
- jdbt.dbt->dbt.ulen, sizeof(db_recno_t));
- report_exception(jnienv, buf, 0, 0);
- }
- else {
- ret = *(db_recno_t*)(jdbt.dbt->dbt.data);
- }
- out:
- jdbt_unlock(&jdbt, jnienv);
- return (ret);
+out: return (arr);
}
JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
@@ -167,10 +54,6 @@ JNIEXPORT void JNICALL Java_com_sleepycat_db_Dbt_finalize
dbtji = get_DBT_JAVAINFO(jnienv, jthis);
if (dbtji) {
/* Free any data related to DBT here */
- dbjit_release(dbtji, jnienv);
-
- /* Extra paranoia */
- memset(dbtji, 0, sizeof(DBT_JAVAINFO));
- free(dbtji);
+ dbjit_destroy(dbtji);
}
}
diff --git a/bdb/libdb_java/java_info.c b/bdb/libdb_java/java_info.c
index ccd469fa256..22fcbd23d46 100644
--- a/bdb/libdb_java/java_info.c
+++ b/bdb/libdb_java/java_info.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_info.c,v 11.18 2000/10/28 13:09:39 dda Exp $";
+static const char revid[] = "$Id: java_info.c,v 11.46 2002/08/29 14:22:23 margo Exp $";
#endif /* not lint */
#include <jni.h>
@@ -15,63 +15,74 @@ static const char revid[] = "$Id: java_info.c,v 11.18 2000/10/28 13:09:39 dda Ex
#include <stdlib.h>
#include <string.h>
-#include "db.h"
#include "db_int.h"
#include "java_util.h"
/****************************************************************
*
* Callback functions
- *
*/
+static int Db_assoc_callback(DB *db,
+ const DBT *key,
+ const DBT *data,
+ DBT *retval)
+{
+ DB_JAVAINFO *dbinfo;
+
+ DB_ASSERT(db != NULL);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_assoc(dbinfo, db, dbinfo->jdbref,
+ key, data, retval));
+}
+
static void Db_feedback_callback(DB *db, int opcode, int percent)
{
DB_JAVAINFO *dbinfo;
DB_ASSERT(db != NULL);
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- dbji_call_feedback(dbinfo, db, dbinfo->jdbref_, opcode, percent);
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ dbji_call_feedback(dbinfo, db, dbinfo->jdbref, opcode, percent);
}
static int Db_append_recno_callback(DB *db, DBT *dbt, db_recno_t recno)
{
DB_JAVAINFO *dbinfo;
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref_, dbt, recno));
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_append_recno(dbinfo, db, dbinfo->jdbref, dbt, recno));
}
static int Db_bt_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
{
DB_JAVAINFO *dbinfo;
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
}
static size_t Db_bt_prefix_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
{
DB_JAVAINFO *dbinfo;
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_bt_prefix(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
}
static int Db_dup_compare_callback(DB *db, const DBT *dbt1, const DBT *dbt2)
{
DB_JAVAINFO *dbinfo;
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref_, dbt1, dbt2));
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_dup_compare(dbinfo, db, dbinfo->jdbref, dbt1, dbt2));
}
static u_int32_t Db_h_hash_callback(DB *db, const void *data, u_int32_t len)
{
DB_JAVAINFO *dbinfo;
- dbinfo = (DB_JAVAINFO *)db->cj_internal;
- return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref_, data, len));
+ dbinfo = (DB_JAVAINFO *)db->api_internal;
+ return (dbji_call_h_hash(dbinfo, db, dbinfo->jdbref, data, len));
}
static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
@@ -79,91 +90,87 @@ static void DbEnv_feedback_callback(DB_ENV *dbenv, int opcode, int percent)
DB_ENV_JAVAINFO *dbinfo;
DB_ASSERT(dbenv != NULL);
- dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
- dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref_, opcode, percent);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ dbjie_call_feedback(dbinfo, dbenv, dbinfo->jenvref, opcode, percent);
}
-static int DbEnv_recovery_init_callback(DB_ENV *dbenv)
+static int DbEnv_rep_transport_callback(DB_ENV *dbenv,
+ const DBT *control, const DBT *rec,
+ int envid, u_int32_t flags)
{
DB_ENV_JAVAINFO *dbinfo;
- dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
- return (dbjie_call_recovery_init(dbinfo, dbenv, dbinfo->jenvref_));
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_rep_transport(dbinfo, dbenv,
+ dbinfo->jenvref, control, rec, envid, (int)flags));
}
-static int DbEnv_tx_recover_callback(DB_ENV *dbenv, DBT *dbt,
+static int DbEnv_app_dispatch_callback(DB_ENV *dbenv, DBT *dbt,
DB_LSN *lsn, db_recops recops)
{
DB_ENV_JAVAINFO *dbinfo;
DB_ASSERT(dbenv != NULL);
- dbinfo = (DB_ENV_JAVAINFO *)dbenv->cj_internal;
- return dbjie_call_tx_recover(dbinfo, dbenv, dbinfo->jenvref_, dbt,
- lsn, recops);
+ dbinfo = (DB_ENV_JAVAINFO *)dbenv->api2_internal;
+ return (dbjie_call_app_dispatch(dbinfo, dbenv, dbinfo->jenvref, dbt,
+ lsn, recops));
}
/****************************************************************
*
* Implementation of class DBT_javainfo
- *
*/
DBT_JAVAINFO *
dbjit_construct()
{
DBT_JAVAINFO *dbjit;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DBT_JAVAINFO), &dbjit)) != 0)
+ return (NULL);
- dbjit = (DBT_JAVAINFO *)malloc(sizeof(DBT_JAVAINFO));
memset(dbjit, 0, sizeof(DBT_JAVAINFO));
return (dbjit);
}
void dbjit_destroy(DBT_JAVAINFO *dbjit)
{
- /* Sanity check:
- * We cannot delete the global ref because we don't have a JNIEnv.
- */
- if (dbjit->array_ != NULL) {
- fprintf(stderr, "object is not freed\n");
- }
-
+ DB_ASSERT(!F_ISSET(dbjit, DBT_JAVAINFO_LOCKED));
/* Extra paranoia */
- memset(dbjit, 0, sizeof(DB_JAVAINFO));
- free(dbjit);
-}
-
-void dbjit_release(DBT_JAVAINFO *dbjit, JNIEnv *jnienv)
-{
- if (dbjit->array_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjit->array_);
- dbjit->array_ = NULL;
- }
+ memset(dbjit, 0, sizeof(DBT_JAVAINFO));
+ (void)__os_free(NULL, dbjit);
}
/****************************************************************
*
* Implementation of class DB_ENV_JAVAINFO
- *
*/
/* create/initialize an object */
DB_ENV_JAVAINFO *
dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
jobject default_errcall,
int is_dbopen)
{
DB_ENV_JAVAINFO *dbjie;
+ int err;
- dbjie = (DB_ENV_JAVAINFO *)malloc(sizeof(DB_ENV_JAVAINFO));
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_ENV_JAVAINFO), &dbjie)) != 0)
+ return (NULL);
memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
- dbjie->is_dbopen_ = is_dbopen;
+ dbjie->is_dbopen = is_dbopen;
- if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm_) != 0) {
- free(dbjie);
+ if ((*jnienv)->GetJavaVM(jnienv, &dbjie->javavm) != 0) {
+ __os_free(NULL, dbjie);
report_exception(jnienv, "cannot get Java VM", 0, 0);
return (NULL);
}
- /* The default error call just prints to the 'System.err'
+ /*
+ * The default error call just prints to the 'System.err'
* stream. If the user does set_errcall to null, we'll
* want to have a reference to set it back to.
*
@@ -172,42 +179,44 @@ dbjie_construct(JNIEnv *jnienv,
* error prefix, error stream, and user's error callback
* that much easier.
*/
- dbjie->default_errcall_ = NEW_GLOBAL_REF(jnienv, default_errcall);
- dbjie->errcall_ = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->default_errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, default_errcall);
+ dbjie->jenvref = NEW_GLOBAL_REF(jnienv, jenv);
return (dbjie);
}
/* release all objects held by this this one */
void dbjie_dealloc(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
{
- if (dbjie->recovery_init_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->recovery_init_);
- dbjie->recovery_init_ = NULL;
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
+ dbjie->feedback = NULL;
}
- if (dbjie->feedback_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->feedback_);
- dbjie->feedback_ = NULL;
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
+ dbjie->app_dispatch = NULL;
}
- if (dbjie->tx_recover_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->tx_recover_);
- dbjie->tx_recover_ = NULL;
+ if (dbjie->errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NULL;
}
- if (dbjie->errcall_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->errcall_);
- dbjie->errcall_ = NULL;
+ if (dbjie->default_errcall != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall);
+ dbjie->default_errcall = NULL;
}
- if (dbjie->default_errcall_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->default_errcall_);
- dbjie->default_errcall_ = NULL;
+ if (dbjie->jenvref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->jenvref);
+ dbjie->jenvref = NULL;
}
- if (dbjie->conflict_ != NULL) {
- free(dbjie->conflict_);
- dbjie->conflict_ = NULL;
+ if (dbjie->conflict != NULL) {
+ __os_free(NULL, dbjie->conflict);
+ dbjie->conflict = NULL;
+ dbjie->conflict_size = 0;
}
- if (dbjie->errpfx_ != NULL) {
- free(dbjie->errpfx_);
- dbjie->errpfx_ = NULL;
+ if (dbjie->errpfx != NULL) {
+ __os_free(NULL, dbjie->errpfx);
+ dbjie->errpfx = NULL;
}
}
@@ -218,17 +227,19 @@ void dbjie_destroy(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
/* Extra paranoia */
memset(dbjie, 0, sizeof(DB_ENV_JAVAINFO));
- free(dbjie);
+ (void)__os_free(NULL, dbjie);
}
-/* Attach to the current thread that is running and
+/*
+ * Attach to the current thread that is running and
* return that. We use the java virtual machine
* that we saved in the constructor.
*/
JNIEnv *
dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
{
- /* Note:
+ /*
+ * Note:
* Different versions of the JNI disagree on the signature
* for AttachCurrentThread. The most recent documentation
* seems to say that (JNIEnv **) is correct, but newer
@@ -240,10 +251,12 @@ dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
JNIEnv *attachret = 0;
#endif
- /* This should always succeed, as we are called via
+ /*
+ * This should always succeed, as we are called via
* some Java activity. I think therefore I am (a thread).
*/
- if ((*dbjie->javavm_)->AttachCurrentThread(dbjie->javavm_, &attachret, 0) != 0)
+ if ((*dbjie->javavm)->AttachCurrentThread(dbjie->javavm, &attachret, 0)
+ != 0)
return (0);
return ((JNIEnv *)attachret);
@@ -252,40 +265,42 @@ dbjie_get_jnienv(DB_ENV_JAVAINFO *dbjie)
jstring
dbjie_get_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv)
{
- return (get_java_string(jnienv, dbjie->errpfx_));
+ return (get_java_string(jnienv, dbjie->errpfx));
}
void
dbjie_set_errcall(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jobject new_errcall)
{
- /* If the new_errcall is null, we'll set the error call
+ /*
+ * If the new_errcall is null, we'll set the error call
* to the default one.
*/
if (new_errcall == NULL)
- new_errcall = dbjie->default_errcall_;
+ new_errcall = dbjie->default_errcall;
- DELETE_GLOBAL_REF(jnienv, dbjie->errcall_);
- dbjie->errcall_ = NEW_GLOBAL_REF(jnienv, new_errcall);
+ DELETE_GLOBAL_REF(jnienv, dbjie->errcall);
+ dbjie->errcall = NEW_GLOBAL_REF(jnienv, new_errcall);
}
void
dbjie_set_errpfx(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv, jstring errpfx)
{
- if (dbjie->errpfx_ != NULL)
- free(dbjie->errpfx_);
+ if (dbjie->errpfx != NULL)
+ __os_free(NULL, dbjie->errpfx);
if (errpfx)
- dbjie->errpfx_ = get_c_string(jnienv, errpfx);
+ dbjie->errpfx = get_c_string(jnienv, errpfx);
else
- dbjie->errpfx_ = NULL;
+ dbjie->errpfx = NULL;
}
void
-dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, unsigned char *newarr)
+dbjie_set_conflict(DB_ENV_JAVAINFO *dbjie, u_char *newarr, size_t size)
{
- if (dbjie->conflict_)
- free(dbjie->conflict_);
- dbjie->conflict_ = newarr;
+ if (dbjie->conflict != NULL)
+ (void)__os_free(NULL, dbjie->conflict);
+ dbjie->conflict = newarr;
+ dbjie->conflict_size = size;
}
void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
@@ -293,8 +308,8 @@ void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
{
int err;
- if (dbjie->feedback_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->feedback_);
+ if (dbjie->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->feedback);
}
if (jfeedback == NULL) {
if ((err = dbenv->set_feedback(dbenv, NULL)) != 0)
@@ -308,7 +323,7 @@ void dbjie_set_feedback_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
err, 0);
}
- dbjie->feedback_ = NEW_GLOBAL_REF(jnienv, jfeedback);
+ dbjie->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
}
void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
@@ -325,97 +340,107 @@ void dbjie_call_feedback(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
return;
}
- feedback_class = get_class(jnienv, name_DbEnvFeedback);
+ if ((feedback_class =
+ get_class(jnienv, name_DbEnvFeedback)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbEnvFeedback);
+ return; /* An exception has been posted. */
+ }
id = (*jnienv)->GetMethodID(jnienv, feedback_class,
"feedback",
"(Lcom/sleepycat/db/DbEnv;II)V");
if (!id) {
- fprintf(stderr, "Cannot find callback class\n");
+ fprintf(stderr, "Cannot find callback method feedback\n");
return;
}
- (*jnienv)->CallVoidMethod(jnienv, dbjie->feedback_, id,
+ (*jnienv)->CallVoidMethod(jnienv, dbjie->feedback, id,
jenv, (jint)opcode, (jint)percent);
}
-void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *dbjie,
- JNIEnv *jnienv, DB_ENV *dbenv,
- jobject jrecovery_init)
+void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject jtransport)
{
int err;
- if (dbjie->recovery_init_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->recovery_init_);
- }
- if (jrecovery_init == NULL) {
- if ((err = dbenv->set_recovery_init(dbenv, NULL)) != 0)
- report_exception(jnienv, "set_recovery_init failed",
- err, 0);
- }
- else {
- if ((err = dbenv->set_recovery_init(dbenv,
- DbEnv_recovery_init_callback)) != 0)
- report_exception(jnienv, "set_recovery_init failed",
- err, 0);
- }
+ if (dbjie->rep_transport != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbjie->rep_transport);
+
+ err = dbenv->set_rep_transport(dbenv, id,
+ DbEnv_rep_transport_callback);
+ verify_return(jnienv, err, 0);
- dbjie->recovery_init_ = NEW_GLOBAL_REF(jnienv, jrecovery_init);
+ dbjie->rep_transport = NEW_GLOBAL_REF(jnienv, jtransport);
}
-int dbjie_call_recovery_init(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
- jobject jenv)
+int dbjie_call_rep_transport(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int flags, int envid)
{
JNIEnv *jnienv;
- jclass recovery_init_class;
- jmethodID id;
+ jclass rep_transport_class;
+ jmethodID jid;
+ jobject jcdbt, jrdbt;
COMPQUIET(dbenv, NULL);
jnienv = dbjie_get_jnienv(dbjie);
if (jnienv == NULL) {
fprintf(stderr, "Cannot attach to current thread!\n");
- return (EINVAL);
+ return (0);
}
- recovery_init_class = get_class(jnienv, name_DbRecoveryInit);
- id = (*jnienv)->GetMethodID(jnienv, recovery_init_class,
- "recovery_init",
- "(Lcom/sleepycat/db/DbEnv;)V");
- if (!id) {
- fprintf(stderr, "Cannot find callback class\n");
- return (EINVAL);
+ if ((rep_transport_class =
+ get_class(jnienv, name_DbRepTransport)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbRepTransport);
+ return (0); /* An exception has been posted. */
}
- return (*jnienv)->CallIntMethod(jnienv, dbjie->recovery_init_,
- id, jenv);
+ jid = (*jnienv)->GetMethodID(jnienv, rep_transport_class,
+ "send",
+ "(Lcom/sleepycat/db/DbEnv;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;II)I");
+
+ if (!jid) {
+ fprintf(stderr, "Cannot find callback method send\n");
+ return (0);
+ }
+
+ jcdbt = get_const_Dbt(jnienv, control, NULL);
+ jrdbt = get_const_Dbt(jnienv, rec, NULL);
+
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->rep_transport, jid, jenv,
+ jcdbt, jrdbt, flags, envid);
}
-void dbjie_set_tx_recover_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
- DB_ENV *dbenv, jobject jtx_recover)
+void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *dbjie, JNIEnv *jnienv,
+ DB_ENV *dbenv, jobject japp_dispatch)
{
int err;
- if (dbjie->tx_recover_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbjie->tx_recover_);
+ if (dbjie->app_dispatch != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbjie->app_dispatch);
}
- if (jtx_recover == NULL) {
- if ((err = dbenv->set_tx_recover(dbenv, NULL)) != 0)
- report_exception(jnienv, "set_tx_recover failed",
+ if (japp_dispatch == NULL) {
+ if ((err = dbenv->set_app_dispatch(dbenv, NULL)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
err, 0);
}
else {
- if ((err = dbenv->set_tx_recover(dbenv,
- DbEnv_tx_recover_callback)) != 0)
- report_exception(jnienv, "set_tx_recover failed",
+ if ((err = dbenv->set_app_dispatch(dbenv,
+ DbEnv_app_dispatch_callback)) != 0)
+ report_exception(jnienv, "set_app_dispatch failed",
err, 0);
}
- dbjie->tx_recover_ = NEW_GLOBAL_REF(jnienv, jtx_recover);
+ dbjie->app_dispatch = NEW_GLOBAL_REF(jnienv, japp_dispatch);
}
-int dbjie_call_tx_recover(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
+int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
DBT *dbt, DB_LSN *lsn, int recops)
{
JNIEnv *jnienv;
- jclass tx_recover_class;
+ jclass app_dispatch_class;
jmethodID id;
jobject jdbt;
jobject jlsn;
@@ -427,90 +452,104 @@ int dbjie_call_tx_recover(DB_ENV_JAVAINFO *dbjie, DB_ENV *dbenv, jobject jenv,
return (0);
}
- tx_recover_class = get_class(jnienv, name_DbTxnRecover);
- id = (*jnienv)->GetMethodID(jnienv, tx_recover_class,
- "tx_recover",
+ if ((app_dispatch_class =
+ get_class(jnienv, name_DbTxnRecover)) == NULL) {
+ fprintf(stderr, "Cannot find callback class %s\n",
+ name_DbTxnRecover);
+ return (0); /* An exception has been posted. */
+ }
+ id = (*jnienv)->GetMethodID(jnienv, app_dispatch_class,
+ "app_dispatch",
"(Lcom/sleepycat/db/DbEnv;"
"Lcom/sleepycat/db/Dbt;"
"Lcom/sleepycat/db/DbLsn;"
"I)I");
if (!id) {
- fprintf(stderr, "Cannot find callback class\n");
+ fprintf(stderr, "Cannot find callback method app_dispatch\n");
return (0);
}
- if (dbt == NULL)
- jdbt = NULL;
- else
- jdbt = get_Dbt(jnienv, dbt);
+ jdbt = get_Dbt(jnienv, dbt, NULL);
if (lsn == NULL)
jlsn = NULL;
else
jlsn = get_DbLsn(jnienv, *lsn);
- return (*jnienv)->CallIntMethod(jnienv, dbjie->tx_recover_, id, jenv,
+ return (*jnienv)->CallIntMethod(jnienv, dbjie->app_dispatch, id, jenv,
jdbt, jlsn, recops);
}
jobject dbjie_get_errcall(DB_ENV_JAVAINFO *dbjie)
{
- return (dbjie->errcall_);
+ return (dbjie->errcall);
}
-int dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
+jint dbjie_is_dbopen(DB_ENV_JAVAINFO *dbjie)
{
- return (dbjie->is_dbopen_);
+ return (dbjie->is_dbopen);
}
/****************************************************************
*
* Implementation of class DB_JAVAINFO
- *
*/
-DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jint flags)
+DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags)
{
DB_JAVAINFO *dbji;
+ int err;
+
+ /*XXX should return err*/
+ if ((err = __os_malloc(NULL, sizeof(DB_JAVAINFO), &dbji)) != 0)
+ return (NULL);
- dbji = (DB_JAVAINFO *)malloc(sizeof(DB_JAVAINFO));
memset(dbji, 0, sizeof(DB_JAVAINFO));
- if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm_) != 0) {
+ if ((*jnienv)->GetJavaVM(jnienv, &dbji->javavm) != 0) {
report_exception(jnienv, "cannot get Java VM", 0, 0);
- free(dbji);
+ (void)__os_free(NULL, dbji);
return (NULL);
}
- dbji->construct_flags_ = flags;
+ dbji->jdbref = NEW_GLOBAL_REF(jnienv, jdb);
+ dbji->construct_flags = flags;
return (dbji);
}
void
dbji_dealloc(DB_JAVAINFO *dbji, JNIEnv *jnienv)
{
- if (dbji->append_recno_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->append_recno_);
- dbji->append_recno_ = NULL;
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
+ dbji->append_recno = NULL;
+ }
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
+ }
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
+ dbji->bt_compare = NULL;
}
- if (dbji->bt_compare_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->bt_compare_);
- dbji->bt_compare_ = NULL;
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
+ dbji->bt_prefix = NULL;
}
- if (dbji->bt_prefix_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix_);
- dbji->bt_prefix_ = NULL;
+ if (dbji->dup_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
+ dbji->dup_compare = NULL;
}
- if (dbji->dup_compare_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->dup_compare_);
- dbji->dup_compare_ = NULL;
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
+ dbji->feedback = NULL;
}
- if (dbji->feedback_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->feedback_);
- dbji->feedback_ = NULL;
+ if (dbji->h_hash != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
+ dbji->h_hash = NULL;
}
- if (dbji->h_hash_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->h_hash_);
- dbji->h_hash_ = NULL;
+ if (dbji->jdbref != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->jdbref);
+ dbji->jdbref = NULL;
}
}
@@ -518,12 +557,13 @@ void
dbji_destroy(DB_JAVAINFO *dbji, JNIEnv *jnienv)
{
dbji_dealloc(dbji, jnienv);
- free(dbji);
+ __os_free(NULL, dbji);
}
JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
{
- /* Note:
+ /*
+ * Note:
* Different versions of the JNI disagree on the signature
* for AttachCurrentThread. The most recent documentation
* seems to say that (JNIEnv **) is correct, but newer
@@ -535,10 +575,12 @@ JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
JNIEnv *attachret = 0;
#endif
- /* This should always succeed, as we are called via
+ /*
+ * This should always succeed, as we are called via
* some Java activity. I think therefore I am (a thread).
*/
- if ((*dbji->javavm_)->AttachCurrentThread(dbji->javavm_, &attachret, 0) != 0)
+ if ((*dbji->javavm)->AttachCurrentThread(dbji->javavm, &attachret, 0)
+ != 0)
return (0);
return ((JNIEnv *)attachret);
@@ -546,7 +588,7 @@ JNIEnv *dbji_get_jnienv(DB_JAVAINFO *dbji)
jint dbji_get_flags(DB_JAVAINFO *dbji)
{
- return (dbji->construct_flags_);
+ return (dbji->construct_flags);
}
void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
@@ -554,14 +596,17 @@ void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass feedback_class;
- if (dbji->feedback_method_id_ == NULL) {
- feedback_class = get_class(jnienv, name_DbFeedback);
- dbji->feedback_method_id_ =
+ if (dbji->feedback_method_id == NULL) {
+ if ((feedback_class =
+ get_class(jnienv, name_DbFeedback)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->feedback_method_id =
(*jnienv)->GetMethodID(jnienv, feedback_class,
"feedback",
"(Lcom/sleepycat/db/Db;II)V");
- if (dbji->feedback_method_id_ != NULL) {
- /* XXX
+ if (dbji->feedback_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -572,8 +617,8 @@ void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->feedback_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->feedback_);
+ if (dbji->feedback != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->feedback);
}
if (jfeedback == NULL) {
db->set_feedback(db, NULL);
@@ -582,7 +627,7 @@ void dbji_set_feedback_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
db->set_feedback(db, Db_feedback_callback);
}
- dbji->feedback_ = NEW_GLOBAL_REF(jnienv, jfeedback);
+ dbji->feedback = NEW_GLOBAL_REF(jnienv, jfeedback);
}
@@ -598,9 +643,9 @@ void dbji_call_feedback(DB_JAVAINFO *dbji, DB *db, jobject jdb,
return;
}
- DB_ASSERT(dbji->feedback_method_id_ != NULL);
- (*jnienv)->CallVoidMethod(jnienv, dbji->feedback_,
- dbji->feedback_method_id_,
+ DB_ASSERT(dbji->feedback_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->feedback,
+ dbji->feedback_method_id,
jdb, (jint)opcode, (jint)percent);
}
@@ -609,15 +654,18 @@ void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass append_recno_class;
- if (dbji->append_recno_method_id_ == NULL) {
- append_recno_class = get_class(jnienv, name_DbAppendRecno);
- dbji->append_recno_method_id_ =
+ if (dbji->append_recno_method_id == NULL) {
+ if ((append_recno_class =
+ get_class(jnienv, name_DbAppendRecno)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->append_recno_method_id =
(*jnienv)->GetMethodID(jnienv, append_recno_class,
"db_append_recno",
"(Lcom/sleepycat/db/Db;"
"Lcom/sleepycat/db/Dbt;I)V");
- if (dbji->append_recno_method_id_ == NULL) {
- /* XXX
+ if (dbji->append_recno_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -628,8 +676,8 @@ void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->append_recno_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->append_recno_);
+ if (dbji->append_recno != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->append_recno);
}
if (jcallback == NULL) {
db->set_append_recno(db, NULL);
@@ -638,51 +686,36 @@ void dbji_set_append_recno_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
db->set_append_recno(db, Db_append_recno_callback);
}
- dbji->append_recno_ = NEW_GLOBAL_REF(jnienv, jcallback);
+ dbji->append_recno = NEW_GLOBAL_REF(jnienv, jcallback);
}
extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
DBT *dbt, jint recno)
{
JNIEnv *jnienv;
- jobject jdbt;
+ jobject jresult;
DBT_JAVAINFO *dbtji;
- jbyteArray arr;
- unsigned int arraylen;
- unsigned char *data;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ u_char *bytearray;
+ int err;
- COMPQUIET(db, NULL);
jnienv = dbji_get_jnienv(dbji);
+ dbenv = db->dbenv;
if (jnienv == NULL) {
fprintf(stderr, "Cannot attach to current thread!\n");
return (0);
}
- /* XXX
- * We should have a pool of Dbt objects used for this purpose
- * instead of creating new ones each time. Because of
- * multithreading, we may need an arbitrary number (more than two).
- * We might also have a byte arrays that grow as needed,
- * so we don't need to allocate those either.
- *
- * Note, we do not set the 'create_array_' flag as on other
- * callbacks as we are creating the array here.
- */
- jdbt = create_default_object(jnienv, name_DBT);
- dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
- memcpy(&dbtji->dbt, dbt, sizeof(DBT));
- dbtji->dbt.data = NULL;
- arr = (*jnienv)->NewByteArray(jnienv, dbt->size);
- (*jnienv)->SetByteArrayRegion(jnienv, arr, 0, dbt->size,
- (jbyte *)dbt->data);
- dbtji->array_ = (jbyteArray)NEW_GLOBAL_REF(jnienv, arr);
-
- DB_ASSERT(dbji->append_recno_method_id_ != NULL);
- (*jnienv)->CallVoidMethod(jnienv, dbji->append_recno_,
- dbji->append_recno_method_id_,
- jdb, jdbt, recno);
-
- /* The underlying C API requires that an errno be returned
+ jresult = get_Dbt(jnienv, dbt, &dbtji);
+
+ DB_ASSERT(dbji->append_recno_method_id != NULL);
+ (*jnienv)->CallVoidMethod(jnienv, dbji->append_recno,
+ dbji->append_recno_method_id,
+ jdb, jresult, recno);
+
+ /*
+ * The underlying C API requires that an errno be returned
* on error. Java users know nothing of errnos, so we
* allow them to throw exceptions instead. We leave the
* exception in place and return DB_JAVA_CALLBACK to the C API
@@ -701,26 +734,146 @@ extern int dbji_call_append_recno(DB_JAVAINFO *dbji, DB *db, jobject jdb,
if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
return (DB_JAVA_CALLBACK);
- if (dbtji->array_ == NULL) {
- report_exception(jnienv, "Dbt.data is null", 0, 0);
- return (EFAULT);
+ /*
+ * Now get the DBT back from java, because the user probably
+ * changed it. We'll have to copy back the array too and let
+ * our caller free it.
+ *
+ * We expect that the user *has* changed the DBT (why else would
+ * they set up an append_recno callback?) so we don't
+ * worry about optimizing the unchanged case.
+ */
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ memcpy(dbt, &lresult.javainfo->dbt, sizeof(DBT));
+ if ((err = __os_malloc(dbenv, dbt->size, &bytearray)) != 0)
+ goto out;
+
+ memcpy(bytearray, dbt->data, dbt->size);
+ dbt->data = bytearray;
+ dbt->flags |= DB_DBT_APPMALLOC;
+
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
+}
+
+void dbji_set_assoc_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject jcallback, int flags)
+{
+ jclass assoc_class;
+ int err;
+
+ if (dbji->assoc_method_id == NULL) {
+ if ((assoc_class =
+ get_class(jnienv, name_DbSecondaryKeyCreate)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->assoc_method_id =
+ (*jnienv)->GetMethodID(jnienv, assoc_class,
+ "secondary_key_create",
+ "(Lcom/sleepycat/db/Db;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/Dbt;)I");
+ if (dbji->assoc_method_id == NULL) {
+ /*
+ * XXX
+ * We should really have a better way
+ * to translate this to a Java exception class.
+ * In theory, it shouldn't happen.
+ */
+ report_exception(jnienv, "Cannot find callback method",
+ EFAULT, 0);
+ return;
+ }
}
- arraylen = (*jnienv)->GetArrayLength(jnienv, dbtji->array_);
- if (dbtji->offset_ < 0 ) {
- report_exception(jnienv, "Dbt.offset illegal", 0, 0);
- return (EFAULT);
+ if (dbji->assoc != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->assoc);
+ dbji->assoc = NULL;
}
- if (dbt->ulen + dbtji->offset_ > arraylen) {
- report_exception(jnienv,
- "Dbt.ulen + Dbt.offset greater than array length", 0, 0);
- return (EFAULT);
+
+ if (jcallback == NULL)
+ err = db->associate(db, txn, second, NULL, flags);
+ else
+ err = db->associate(db, txn, second, Db_assoc_callback, flags);
+
+ if (verify_return(jnienv, err, 0))
+ dbji->assoc = NEW_GLOBAL_REF(jnienv, jcallback);
+}
+
+extern int dbji_call_assoc(DB_JAVAINFO *dbji, DB *db, jobject jdb,
+ const DBT *key, const DBT *value, DBT *result)
+{
+ JNIEnv *jnienv;
+ jobject jresult;
+ LOCKED_DBT lresult;
+ DB_ENV *dbenv;
+ int err;
+ int sz;
+ u_char *bytearray;
+ jint retval;
+
+ jnienv = dbji_get_jnienv(dbji);
+ if (jnienv == NULL) {
+ fprintf(stderr, "Cannot attach to current thread!\n");
+ return (0);
}
- data = (*jnienv)->GetByteArrayElements(jnienv, dbtji->array_,
- (jboolean *)0);
- dbt->data = data + dbtji->offset_;
- return (0);
+ DB_ASSERT(dbji->assoc_method_id != NULL);
+
+ dbenv = db->dbenv;
+ jresult = create_default_object(jnienv, name_DBT);
+
+ retval = (*jnienv)->CallIntMethod(jnienv, dbji->assoc,
+ dbji->assoc_method_id, jdb,
+ get_const_Dbt(jnienv, key, NULL),
+ get_const_Dbt(jnienv, value, NULL),
+ jresult);
+ if (retval != 0)
+ return (retval);
+
+ if ((*jnienv)->ExceptionOccurred(jnienv) != NULL)
+ return (DB_JAVA_CALLBACK);
+
+ if ((err = locked_dbt_get(&lresult, jnienv, dbenv, jresult, inOp)) != 0)
+ return (err);
+
+ sz = lresult.javainfo->dbt.size;
+ if (sz > 0) {
+ bytearray = (u_char *)lresult.javainfo->dbt.data;
+
+ /*
+ * If the byte array is in the range of one of the
+ * arrays passed to us we can use it directly.
+ * If not, we must create our own array and
+ * fill it in with the java array. Since
+ * the java array may disappear and we don't
+ * want to keep its memory locked indefinitely,
+ * we cannot just pin the array.
+ *
+ * XXX consider pinning the array, and having
+ * some way for the C layer to notify the java
+ * layer when it can be unpinned.
+ */
+ if ((bytearray < (u_char *)key->data ||
+ bytearray + sz > (u_char *)key->data + key->size) &&
+ (bytearray < (u_char *)value->data ||
+ bytearray + sz > (u_char *)value->data + value->size)) {
+
+ result->flags |= DB_DBT_APPMALLOC;
+ if ((err = __os_malloc(dbenv, sz, &bytearray)) != 0)
+ goto out;
+ memcpy(bytearray, lresult.javainfo->dbt.data, sz);
+ }
+ result->data = bytearray;
+ result->size = sz;
+ }
+ out:
+ locked_dbt_put(&lresult, jnienv, dbenv);
+ return (err);
}
void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
@@ -728,16 +881,19 @@ void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass bt_compare_class;
- if (dbji->bt_compare_method_id_ == NULL) {
- bt_compare_class = get_class(jnienv, name_DbBtreeCompare);
- dbji->bt_compare_method_id_ =
+ if (dbji->bt_compare_method_id == NULL) {
+ if ((bt_compare_class =
+ get_class(jnienv, name_DbBtreeCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_compare_method_id =
(*jnienv)->GetMethodID(jnienv, bt_compare_class,
"bt_compare",
"(Lcom/sleepycat/db/Db;"
"Lcom/sleepycat/db/Dbt;"
"Lcom/sleepycat/db/Dbt;)I");
- if (dbji->bt_compare_method_id_ == NULL) {
- /* XXX
+ if (dbji->bt_compare_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -748,8 +904,8 @@ void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->bt_compare_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->bt_compare_);
+ if (dbji->bt_compare != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_compare);
}
if (jcompare == NULL) {
db->set_bt_compare(db, NULL);
@@ -758,7 +914,7 @@ void dbji_set_bt_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
db->set_bt_compare(db, Db_bt_compare_callback);
}
- dbji->bt_compare_ = NEW_GLOBAL_REF(jnienv, jcompare);
+ dbji->bt_compare = NEW_GLOBAL_REF(jnienv, jcompare);
}
int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
@@ -766,7 +922,6 @@ int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
{
JNIEnv *jnienv;
jobject jdbt1, jdbt2;
- DBT_JAVAINFO *dbtji1, *dbtji2;
COMPQUIET(db, NULL);
jnienv = dbji_get_jnienv(dbji);
@@ -775,25 +930,12 @@ int dbji_call_bt_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
return (0);
}
- /* XXX
- * We should have a pool of Dbt objects used for this purpose
- * instead of creating new ones each time. Because of
- * multithreading, we may need an arbitrary number (more than two).
- * We might also have a byte arrays that grow as needed,
- * so we don't need to allocate those either.
- */
- jdbt1 = create_default_object(jnienv, name_DBT);
- jdbt2 = create_default_object(jnienv, name_DBT);
- dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
- memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
- dbtji1->create_array_ = 1;
- dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
- memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
- dbtji2->create_array_ = 1;
-
- DB_ASSERT(dbji->bt_compare_method_id_ != NULL);
- return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare_,
- dbji->bt_compare_method_id_,
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->bt_compare,
+ dbji->bt_compare_method_id,
jdb, jdbt1, jdbt2);
}
@@ -802,16 +944,19 @@ void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass bt_prefix_class;
- if (dbji->bt_prefix_method_id_ == NULL) {
- bt_prefix_class = get_class(jnienv, name_DbBtreePrefix);
- dbji->bt_prefix_method_id_ =
+ if (dbji->bt_prefix_method_id == NULL) {
+ if ((bt_prefix_class =
+ get_class(jnienv, name_DbBtreePrefix)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->bt_prefix_method_id =
(*jnienv)->GetMethodID(jnienv, bt_prefix_class,
"bt_prefix",
"(Lcom/sleepycat/db/Db;"
"Lcom/sleepycat/db/Dbt;"
"Lcom/sleepycat/db/Dbt;)I");
- if (dbji->bt_prefix_method_id_ == NULL) {
- /* XXX
+ if (dbji->bt_prefix_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -822,8 +967,8 @@ void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->bt_prefix_ != NULL) {
- DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix_);
+ if (dbji->bt_prefix != NULL) {
+ DELETE_GLOBAL_REF(jnienv, dbji->bt_prefix);
}
if (jprefix == NULL) {
db->set_bt_prefix(db, NULL);
@@ -832,7 +977,7 @@ void dbji_set_bt_prefix_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
db->set_bt_prefix(db, Db_bt_prefix_callback);
}
- dbji->bt_prefix_ = NEW_GLOBAL_REF(jnienv, jprefix);
+ dbji->bt_prefix = NEW_GLOBAL_REF(jnienv, jprefix);
}
size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
@@ -840,7 +985,6 @@ size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
{
JNIEnv *jnienv;
jobject jdbt1, jdbt2;
- DBT_JAVAINFO *dbtji1, *dbtji2;
COMPQUIET(db, NULL);
jnienv = dbji_get_jnienv(dbji);
@@ -849,25 +993,12 @@ size_t dbji_call_bt_prefix(DB_JAVAINFO *dbji, DB *db, jobject jdb,
return (0);
}
- /* XXX
- * We should have a pool of Dbt objects used for this purpose
- * instead of creating new ones each time. Because of
- * multithreading, we may need an arbitrary number (more than two).
- * We might also have a byte arrays that grow as needed,
- * so we don't need to allocate those either.
- */
- jdbt1 = create_default_object(jnienv, name_DBT);
- jdbt2 = create_default_object(jnienv, name_DBT);
- dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
- memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
- dbtji1->create_array_ = 1;
- dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
- memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
- dbtji2->create_array_ = 1;
-
- DB_ASSERT(dbji->bt_prefix_method_id_ != NULL);
- return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix_,
- dbji->bt_prefix_method_id_,
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->bt_prefix_method_id != NULL);
+ return (size_t)(*jnienv)->CallIntMethod(jnienv, dbji->bt_prefix,
+ dbji->bt_prefix_method_id,
jdb, jdbt1, jdbt2);
}
@@ -876,16 +1007,19 @@ void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass dup_compare_class;
- if (dbji->dup_compare_method_id_ == NULL) {
- dup_compare_class = get_class(jnienv, name_DbDupCompare);
- dbji->dup_compare_method_id_ =
+ if (dbji->dup_compare_method_id == NULL) {
+ if ((dup_compare_class =
+ get_class(jnienv, name_DbDupCompare)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->dup_compare_method_id =
(*jnienv)->GetMethodID(jnienv, dup_compare_class,
"dup_compare",
"(Lcom/sleepycat/db/Db;"
"Lcom/sleepycat/db/Dbt;"
"Lcom/sleepycat/db/Dbt;)I");
- if (dbji->dup_compare_method_id_ == NULL) {
- /* XXX
+ if (dbji->dup_compare_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -896,15 +1030,15 @@ void dbji_set_dup_compare_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->dup_compare_ != NULL)
- DELETE_GLOBAL_REF(jnienv, dbji->dup_compare_);
+ if (dbji->dup_compare != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->dup_compare);
if (jcompare == NULL)
db->set_dup_compare(db, NULL);
else
db->set_dup_compare(db, Db_dup_compare_callback);
- dbji->dup_compare_ = NEW_GLOBAL_REF(jnienv, jcompare);
+ dbji->dup_compare = NEW_GLOBAL_REF(jnienv, jcompare);
}
int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
@@ -912,7 +1046,6 @@ int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
{
JNIEnv *jnienv;
jobject jdbt1, jdbt2;
- DBT_JAVAINFO *dbtji1, *dbtji2;
COMPQUIET(db, NULL);
jnienv = dbji_get_jnienv(dbji);
@@ -921,25 +1054,12 @@ int dbji_call_dup_compare(DB_JAVAINFO *dbji, DB *db, jobject jdb,
return (0);
}
- /* XXX
- * We should have a pool of Dbt objects used for this purpose
- * instead of creating new ones each time. Because of
- * multithreading, we may need an arbitrary number (more than two).
- * We might also have a byte arrays that grow as needed,
- * so we don't need to allocate those either.
- */
- jdbt1 = create_default_object(jnienv, name_DBT);
- jdbt2 = create_default_object(jnienv, name_DBT);
- dbtji1 = get_DBT_JAVAINFO(jnienv, jdbt1);
- memcpy(&dbtji1->dbt, dbt1, sizeof(DBT));
- dbtji1->create_array_ = 1;
- dbtji2 = get_DBT_JAVAINFO(jnienv, jdbt2);
- memcpy(&dbtji2->dbt, dbt2, sizeof(DBT));
- dbtji2->create_array_ = 1;
-
- DB_ASSERT(dbji->dup_compare_method_id_ != NULL);
- return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare_,
- dbji->dup_compare_method_id_,
+ jdbt1 = get_const_Dbt(jnienv, dbt1, NULL);
+ jdbt2 = get_const_Dbt(jnienv, dbt2, NULL);
+
+ DB_ASSERT(dbji->dup_compare_method_id != NULL);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->dup_compare,
+ dbji->dup_compare_method_id,
jdb, jdbt1, jdbt2);
}
@@ -948,15 +1068,18 @@ void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
{
jclass h_hash_class;
- if (dbji->h_hash_method_id_ == NULL) {
- h_hash_class = get_class(jnienv, name_DbHash);
- dbji->h_hash_method_id_ =
+ if (dbji->h_hash_method_id == NULL) {
+ if ((h_hash_class =
+ get_class(jnienv, name_DbHash)) == NULL)
+ return; /* An exception has been posted. */
+ dbji->h_hash_method_id =
(*jnienv)->GetMethodID(jnienv, h_hash_class,
"hash",
"(Lcom/sleepycat/db/Db;"
"[BI)I");
- if (dbji->h_hash_method_id_ == NULL) {
- /* XXX
+ if (dbji->h_hash_method_id == NULL) {
+ /*
+ * XXX
* We should really have a better way
* to translate this to a Java exception class.
* In theory, it shouldn't happen.
@@ -967,22 +1090,22 @@ void dbji_set_h_hash_object(DB_JAVAINFO *dbji, JNIEnv *jnienv,
}
}
- if (dbji->h_hash_ != NULL)
- DELETE_GLOBAL_REF(jnienv, dbji->h_hash_);
+ if (dbji->h_hash != NULL)
+ DELETE_GLOBAL_REF(jnienv, dbji->h_hash);
if (jhash == NULL)
db->set_h_hash(db, NULL);
else
db->set_h_hash(db, Db_h_hash_callback);
- dbji->h_hash_ = NEW_GLOBAL_REF(jnienv, jhash);
+ dbji->h_hash = NEW_GLOBAL_REF(jnienv, jhash);
}
int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
const void *data, int len)
{
JNIEnv *jnienv;
- jbyteArray jarray;
+ jbyteArray jdata;
COMPQUIET(db, NULL);
jnienv = dbji_get_jnienv(dbji);
@@ -991,11 +1114,12 @@ int dbji_call_h_hash(DB_JAVAINFO *dbji, DB *db, jobject jdb,
return (0);
}
- DB_ASSERT(dbji->h_hash_method_id_ != NULL);
+ DB_ASSERT(dbji->h_hash_method_id != NULL);
- jarray = (*jnienv)->NewByteArray(jnienv, len);
- (*jnienv)->SetByteArrayRegion(jnienv, jarray, 0, len, (void *)data);
- return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash_,
- dbji->h_hash_method_id_,
- jdb, jarray, len);
+ if ((jdata = (*jnienv)->NewByteArray(jnienv, len)) == NULL)
+ return (0); /* An exception has been posted by the JVM */
+ (*jnienv)->SetByteArrayRegion(jnienv, jdata, 0, len, (void *)data);
+ return (*jnienv)->CallIntMethod(jnienv, dbji->h_hash,
+ dbji->h_hash_method_id,
+ jdb, jdata, len);
}
diff --git a/bdb/libdb_java/java_info.h b/bdb/libdb_java/java_info.h
index 69032be80e6..bda83db420e 100644
--- a/bdb/libdb_java/java_info.h
+++ b/bdb/libdb_java/java_info.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: java_info.h,v 11.17 2000/07/31 20:28:30 dda Exp $
+ * $Id: java_info.h,v 11.35 2002/08/29 14:22:23 margo Exp $
*/
#ifndef _JAVA_INFO_H_
@@ -36,16 +36,22 @@
typedef struct _dbt_javainfo
{
DBT dbt;
- DB *db_; /* associated DB */
- jobject dbtref_; /* the java Dbt object */
- jbyteArray array_;
- int offset_;
- int create_array_; /* flag to create the array as needed */
+ DB *db; /* associated DB */
+ jobject dbtref; /* the java Dbt object */
+ jbyteArray array; /* the java array object -
+ this is only valid during the API call */
+ int offset; /* offset into the Java array */
+
+#define DBT_JAVAINFO_LOCKED 0x01 /* a LOCKED_DBT has been created */
+ u_int32_t flags;
}
-DBT_JAVAINFO; /* used with all 'dbtji' functions */
+DBT_JAVAINFO; /* used with all 'dbtji' functions */
+/* create/initialize a DBT_JAVAINFO object */
extern DBT_JAVAINFO *dbjit_construct();
-extern void dbjit_release(DBT_JAVAINFO *dbjit, JNIEnv *jnienv);
+
+/* free this DBT_JAVAINFO, releasing anything allocated on its behalf */
+extern void dbjit_destroy(DBT_JAVAINFO *dbjit);
/****************************************************************
*
@@ -82,22 +88,25 @@ extern void dbjit_release(DBT_JAVAINFO *dbjit, JNIEnv *jnienv);
*/
typedef struct _db_env_javainfo
{
- JavaVM *javavm_;
- int is_dbopen_;
- char *errpfx_;
- jobject jdbref_; /* temporary reference */
- jobject jenvref_; /* temporary reference */
- jobject default_errcall_; /* global reference */
- jobject errcall_; /* global reference */
- jobject feedback_; /* global reference */
- jobject tx_recover_; /* global reference */
- jobject recovery_init_; /* global reference */
- unsigned char *conflict_;
+ JavaVM *javavm;
+ int is_dbopen;
+ char *errpfx;
+ jobject jenvref; /* global reference */
+ jobject default_errcall; /* global reference */
+ jobject errcall; /* global reference */
+ jobject feedback; /* global reference */
+ jobject rep_transport; /* global reference */
+ jobject app_dispatch; /* global reference */
+ jobject recovery_init; /* global reference */
+ u_char *conflict;
+ size_t conflict_size;
+ jint construct_flags;
}
-DB_ENV_JAVAINFO; /* used with all 'dbjie' functions */
+DB_ENV_JAVAINFO; /* used with all 'dbjie' functions */
/* create/initialize an object */
extern DB_ENV_JAVAINFO *dbjie_construct(JNIEnv *jnienv,
+ jobject jenv,
jobject default_errcall,
int is_dbopen);
@@ -115,7 +124,7 @@ extern void dbjie_set_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
extern jstring dbjie_get_errpfx(DB_ENV_JAVAINFO *, JNIEnv *jnienv);
extern void dbjie_set_errcall(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
jobject new_errcall);
-extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, unsigned char *v);
+extern void dbjie_set_conflict(DB_ENV_JAVAINFO *, u_char *v, size_t sz);
extern void dbjie_set_feedback_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
DB_ENV *dbenv, jobject value);
extern void dbjie_call_feedback(DB_ENV_JAVAINFO *, DB_ENV *dbenv, jobject jenv,
@@ -124,13 +133,18 @@ extern void dbjie_set_recovery_init_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
DB_ENV *dbenv, jobject value);
extern int dbjie_call_recovery_init(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
jobject jenv);
-extern void dbjie_set_tx_recover_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+extern void dbjie_set_rep_transport_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
+ DB_ENV *dbenv, int id, jobject obj);
+extern int dbjie_call_rep_transport(DB_ENV_JAVAINFO *, DB_ENV *dbenv,
+ jobject jenv, const DBT *control,
+ const DBT *rec, int envid, int flags);
+extern void dbjie_set_app_dispatch_object(DB_ENV_JAVAINFO *, JNIEnv *jnienv,
DB_ENV *dbenv, jobject value);
-extern int dbjie_call_tx_recover(DB_ENV_JAVAINFO *,
+extern int dbjie_call_app_dispatch(DB_ENV_JAVAINFO *,
DB_ENV *dbenv, jobject jenv,
DBT *dbt, DB_LSN *lsn, int recops);
extern jobject dbjie_get_errcall(DB_ENV_JAVAINFO *) ;
-extern int dbjie_is_dbopen(DB_ENV_JAVAINFO *);
+extern jint dbjie_is_dbopen(DB_ENV_JAVAINFO *);
/****************************************************************
*
@@ -147,25 +161,27 @@ extern int dbjie_is_dbopen(DB_ENV_JAVAINFO *);
*/
typedef struct _db_javainfo
{
- JavaVM *javavm_;
- jobject jdbref_; /* temporary reference during callback */
- jobject feedback_; /* global reference */
- jobject append_recno_; /* global reference */
- jobject bt_compare_; /* global reference */
- jobject bt_prefix_; /* global reference */
- jobject dup_compare_; /* global reference */
- jobject h_hash_; /* global reference */
- jmethodID feedback_method_id_;
- jmethodID append_recno_method_id_;
- jmethodID bt_compare_method_id_;
- jmethodID bt_prefix_method_id_;
- jmethodID dup_compare_method_id_;
- jmethodID h_hash_method_id_;
- jint construct_flags_;
+ JavaVM *javavm;
+ jobject jdbref; /* global reference */
+ jobject append_recno; /* global reference */
+ jobject assoc; /* global reference */
+ jobject bt_compare; /* global reference */
+ jobject bt_prefix; /* global reference */
+ jobject dup_compare; /* global reference */
+ jobject feedback; /* global reference */
+ jobject h_hash; /* global reference */
+ jmethodID append_recno_method_id;
+ jmethodID assoc_method_id;
+ jmethodID bt_compare_method_id;
+ jmethodID bt_prefix_method_id;
+ jmethodID dup_compare_method_id;
+ jmethodID feedback_method_id;
+ jmethodID h_hash_method_id;
+ jint construct_flags;
} DB_JAVAINFO;
/* create/initialize an object */
-extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jint flags);
+extern DB_JAVAINFO *dbji_construct(JNIEnv *jnienv, jobject jdb, jint flags);
/* release all objects held by this this one */
extern void dbji_dealloc(DB_JAVAINFO *, JNIEnv *jnienv);
@@ -184,6 +200,11 @@ extern void dbji_call_feedback(DB_JAVAINFO *, DB *db, jobject jdb,
extern void dbji_set_append_recno_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
extern int dbji_call_append_recno(DB_JAVAINFO *, DB *db, jobject jdb,
DBT *dbt, jint recno);
+extern void dbji_set_assoc_object(DB_JAVAINFO *, JNIEnv *jnienv,
+ DB *db, DB_TXN *txn, DB *second,
+ jobject value, int flags);
+extern int dbji_call_assoc(DB_JAVAINFO *, DB *db, jobject jdb,
+ const DBT *key, const DBT* data, DBT *result);
extern void dbji_set_bt_compare_object(DB_JAVAINFO *, JNIEnv *jnienv, DB *db, jobject value);
extern int dbji_call_bt_compare(DB_JAVAINFO *, DB *db, jobject jdb,
const DBT *dbt1, const DBT *dbt2);
diff --git a/bdb/libdb_java/java_locked.c b/bdb/libdb_java/java_locked.c
index a5603df5d60..9534a387b40 100644
--- a/bdb/libdb_java/java_locked.c
+++ b/bdb/libdb_java/java_locked.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_locked.c,v 11.11 2000/10/25 19:54:55 dda Exp $";
+static const char revid[] = "$Id: java_locked.c,v 11.32 2002/08/06 05:19:07 bostic Exp $";
#endif /* not lint */
#include <jni.h>
@@ -15,280 +15,307 @@ static const char revid[] = "$Id: java_locked.c,v 11.11 2000/10/25 19:54:55 dda
#include <stdlib.h>
#include <string.h>
-#include "db.h"
+#include "db_int.h"
#include "java_util.h"
/****************************************************************
*
- * Implementation of class LockedDBT
- *
+ * Implementation of functions to manipulate LOCKED_DBT.
*/
int
-jdbt_lock(JDBT *jdbt, JNIEnv *jnienv, jobject obj, OpKind kind)
+locked_dbt_get(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv,
+ jobject jdbt, OpKind kind)
{
DBT *dbt;
- jdbt->obj_ = obj;
- jdbt->do_realloc_ = 0;
- jdbt->kind_ = kind;
- jdbt->java_array_len_= 0;
- jdbt->java_data_ = 0;
- jdbt->before_data_ = 0;
- jdbt->has_error_ = 0;
- jdbt->dbt = (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj);
-
- if (!verify_non_null(jnienv, jdbt->dbt)) {
- jdbt->has_error_ = 1;
+ COMPQUIET(dbenv, NULL);
+ ldbt->jdbt = jdbt;
+ ldbt->java_array_len = 0;
+ ldbt->flags = 0;
+ ldbt->kind = kind;
+ ldbt->java_data = 0;
+ ldbt->before_data = 0;
+ ldbt->javainfo =
+ (DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, jdbt);
+
+ if (!verify_non_null(jnienv, ldbt->javainfo)) {
+ report_exception(jnienv, "Dbt is gc'ed?", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
return (EINVAL);
}
- dbt = &jdbt->dbt->dbt;
-
- if (kind == outOp &&
- (dbt->flags & (DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC)) == 0) {
- report_exception(jnienv,
- "Dbt.flags must be set to Db.DB_DBT_USERMEM, "
- "Db.DB_DBT_MALLOC or Db.DB_DBT_REALLOC",
- 0, 0);
- jdbt->has_error_ = 1;
+ if (F_ISSET(ldbt->javainfo, DBT_JAVAINFO_LOCKED)) {
+ report_exception(jnienv, "Dbt is already in use", 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
return (EINVAL);
}
+ dbt = &ldbt->javainfo->dbt;
- /* If this is requested to be realloc, we cannot use the
- * underlying realloc, because the array we will pass in
- * is not allocated by us, but the Java VM, so it cannot
- * be successfully realloced. We simulate the reallocation,
- * by using USERMEM and reallocating the java array when a
- * ENOMEM error occurs. We change the flags during the operation,
- * and they are reset when the operation completes (in the
- * LockedDBT destructor.
+ if ((*jnienv)->GetBooleanField(jnienv,
+ jdbt, fid_Dbt_must_create_data) != 0)
+ F_SET(ldbt, LOCKED_CREATE_DATA);
+ else
+ ldbt->javainfo->array =
+ (*jnienv)->GetObjectField(jnienv, jdbt, fid_Dbt_data);
+
+ dbt->size = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_size);
+ dbt->ulen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_ulen);
+ dbt->dlen = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_dlen);
+ dbt->doff = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_doff);
+ dbt->flags = (*jnienv)->GetIntField(jnienv, jdbt, fid_Dbt_flags);
+ ldbt->javainfo->offset = (*jnienv)->GetIntField(jnienv, jdbt,
+ fid_Dbt_offset);
+
+ /*
+ * If no flags are set, use default behavior of DB_DBT_MALLOC.
+ * We can safely set dbt->flags because flags will never be copied
+ * back to the Java Dbt.
+ */
+ if (kind != inOp &&
+ !F_ISSET(dbt, DB_DBT_USERMEM | DB_DBT_MALLOC | DB_DBT_REALLOC))
+ F_SET(dbt, DB_DBT_MALLOC);
+
+ /*
+ * If this is requested to be realloc with an existing array,
+ * we cannot use the underlying realloc, because the array we
+ * will pass in is allocated by the Java VM, not us, so it
+ * cannot be realloced. We simulate the reallocation by using
+ * USERMEM and reallocating the java array when a ENOMEM error
+ * occurs. We change the flags during the operation, and they
+ * are reset when the operation completes (in locked_dbt_put).
*/
- if ((dbt->flags & DB_DBT_REALLOC) != 0) {
- dbt->flags &= ~DB_DBT_REALLOC;
- dbt->flags |= DB_DBT_USERMEM;
- jdbt->do_realloc_ = 1;
+ if (F_ISSET(dbt, DB_DBT_REALLOC) && ldbt->javainfo->array != NULL) {
+ F_CLR(dbt, DB_DBT_REALLOC);
+ F_SET(dbt, DB_DBT_USERMEM);
+ F_SET(ldbt, LOCKED_REALLOC_NONNULL);
}
- if ((dbt->flags & DB_DBT_USERMEM) || kind != outOp) {
+ if ((F_ISSET(dbt, DB_DBT_USERMEM) || kind != outOp) &&
+ !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
- /* If writing with DB_DBT_USERMEM/REALLOC
+ /*
+ * If writing with DB_DBT_USERMEM
* or it's a set (or get/set) operation,
* then the data should point to a java array.
* Note that outOp means data is coming out of the database
* (it's a get). inOp means data is going into the database
* (either a put, or a key input).
*/
- if (!jdbt->dbt->array_) {
+ if (!ldbt->javainfo->array) {
report_exception(jnienv, "Dbt.data is null", 0, 0);
- jdbt->has_error_ = 1;
+ F_SET(ldbt, LOCKED_ERROR);
return (EINVAL);
}
/* Verify other parameters */
- jdbt->java_array_len_ = (*jnienv)->GetArrayLength(jnienv, jdbt->dbt->array_);
- if (jdbt->dbt->offset_ < 0 ) {
+ ldbt->java_array_len = (*jnienv)->GetArrayLength(jnienv,
+ ldbt->javainfo->array);
+ if (ldbt->javainfo->offset < 0 ) {
report_exception(jnienv, "Dbt.offset illegal", 0, 0);
- jdbt->has_error_ = 1;
+ F_SET(ldbt, LOCKED_ERROR);
return (EINVAL);
}
- if (dbt->ulen + jdbt->dbt->offset_ > jdbt->java_array_len_) {
+ if (dbt->size + ldbt->javainfo->offset > ldbt->java_array_len) {
report_exception(jnienv,
- "Dbt.ulen + Dbt.offset greater than array length", 0, 0);
- jdbt->has_error_ = 1;
+ "Dbt.size + Dbt.offset greater than array length",
+ 0, 0);
+ F_SET(ldbt, LOCKED_ERROR);
return (EINVAL);
}
- jdbt->java_data_ = (*jnienv)->GetByteArrayElements(jnienv, jdbt->dbt->array_,
- (jboolean *)0);
- dbt->data = jdbt->before_data_ = jdbt->java_data_ + jdbt->dbt->offset_;
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ (jboolean *)0);
+
+ dbt->data = ldbt->before_data = ldbt->java_data +
+ ldbt->javainfo->offset;
}
- else {
+ else if (!F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
- /* If writing with DB_DBT_MALLOC, then the data is
- * allocated by DB.
+ /*
+ * If writing with DB_DBT_MALLOC or DB_DBT_REALLOC with
+ * a null array, then the data is allocated by DB.
*/
- dbt->data = jdbt->before_data_ = 0;
+ dbt->data = ldbt->before_data = 0;
}
+
+ /*
+ * RPC makes the assumption that if dbt->size is non-zero, there
+ * is data to copy from dbt->data. We may have set dbt->size
+ * to a non-zero integer above but decided not to point
+ * dbt->data at anything. (One example is if we're doing an outOp
+ * with an already-used Dbt whose values we expect to just
+ * overwrite.)
+ *
+ * Clean up the dbt fields so we don't run into trouble.
+ * (Note that doff, dlen, and flags all may contain meaningful
+ * values.)
+ */
+ if (dbt->data == NULL)
+ dbt->size = dbt->ulen = 0;
+
+ F_SET(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
return (0);
}
-/* The LockedDBT destructor is called when the java handler returns
- * to the user, since that's when the LockedDBT objects go out of scope.
- * Since it is thus called after any call to the underlying database,
- * it copies any information from temporary structures back to user
- * accessible arrays, and of course must free memory and remove references.
+/*
+ * locked_dbt_put must be called for any LOCKED_DBT struct before a
+ * java handler returns to the user. It can be thought of as the
+ * LOCKED_DBT destructor. It copies any information from temporary
+ * structures back to user accessible arrays, and of course must free
+ * memory and remove references. The LOCKED_DBT itself is not freed,
+ * as it is expected to be a stack variable.
+ *
+ * Note that after this call, the LOCKED_DBT can still be used in
+ * limited ways, e.g. to look at values in the C DBT.
*/
void
-jdbt_unlock(JDBT *jdbt, JNIEnv *jnienv)
+locked_dbt_put(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
{
DBT *dbt;
- dbt = &jdbt->dbt->dbt;
+ dbt = &ldbt->javainfo->dbt;
- /* Fix up the flags if we changed them. */
- if (jdbt->do_realloc_) {
- dbt->flags &= ~DB_DBT_USERMEM;
- dbt->flags |= DB_DBT_REALLOC;
- }
+ /*
+ * If the error flag was set, we never succeeded
+ * in allocating storage.
+ */
+ if (F_ISSET(ldbt, LOCKED_ERROR))
+ return;
- if ((dbt->flags & (DB_DBT_USERMEM | DB_DBT_REALLOC)) ||
- jdbt->kind_ == inOp) {
+ if (((F_ISSET(dbt, DB_DBT_USERMEM) ||
+ F_ISSET(ldbt, LOCKED_REALLOC_NONNULL)) ||
+ ldbt->kind == inOp) && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
- /* If writing with DB_DBT_USERMEM/REALLOC or it's a set
+ /*
+ * If writing with DB_DBT_USERMEM or it's a set
* (or get/set) operation, then the data may be already in
* the java array, in which case, we just need to release it.
* If DB didn't put it in the array (indicated by the
* dbt->data changing), we need to do that
*/
- if (jdbt->before_data_ != jdbt->java_data_) {
+ if (ldbt->before_data != ldbt->java_data) {
(*jnienv)->SetByteArrayRegion(jnienv,
- jdbt->dbt->array_,
- jdbt->dbt->offset_,
+ ldbt->javainfo->array,
+ ldbt->javainfo->offset,
dbt->ulen,
- jdbt->before_data_);
+ ldbt->before_data);
}
- (*jnienv)->ReleaseByteArrayElements(jnienv, jdbt->dbt->array_, jdbt->java_data_, 0);
+ (*jnienv)->ReleaseByteArrayElements(jnienv,
+ ldbt->javainfo->array,
+ ldbt->java_data, 0);
dbt->data = 0;
}
- if ((dbt->flags & DB_DBT_MALLOC) && jdbt->kind_ != inOp) {
-
- /* If writing with DB_DBT_MALLOC, then the data was allocated
- * by DB. If dbt->data is zero, it means an error occurred
- * (and should have been already reported).
+ else if (F_ISSET(dbt, DB_DBT_MALLOC | DB_DBT_REALLOC) &&
+ ldbt->kind != inOp && !F_ISSET(ldbt, LOCKED_CREATE_DATA)) {
+
+ /*
+ * If writing with DB_DBT_MALLOC, or DB_DBT_REALLOC
+ * with a zero buffer, then the data was allocated by
+ * DB. If dbt->data is zero, it means an error
+ * occurred (and should have been already reported).
*/
if (dbt->data) {
- /* Release any old references. */
- dbjit_release(jdbt->dbt, jnienv);
-
- /* In the case of SET_RANGE, the key is inOutOp
+ /*
+ * In the case of SET_RANGE, the key is inOutOp
* and when not found, its data will be left as
* its original value. Only copy and free it
* here if it has been allocated by DB
* (dbt->data has changed).
*/
- if (dbt->data != jdbt->before_data_) {
- jdbt->dbt->array_ = (jbyteArray)
- NEW_GLOBAL_REF(jnienv,
- (*jnienv)->NewByteArray(jnienv,
- dbt->size));
- jdbt->dbt->offset_ = 0;
+ if (dbt->data != ldbt->before_data) {
+ jbyteArray newarr;
+
+ if ((newarr = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ /* The JVM has posted an exception. */
+ F_SET(ldbt, LOCKED_ERROR);
+ return;
+ }
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt,
+ fid_Dbt_data,
+ newarr);
+ ldbt->javainfo->offset = 0;
(*jnienv)->SetByteArrayRegion(jnienv,
- jdbt->dbt->array_, 0, dbt->size,
+ newarr, 0, dbt->size,
(jbyte *)dbt->data);
- free(dbt->data);
+ (void)__os_ufree(dbenv, dbt->data);
dbt->data = 0;
}
}
}
+
+ /*
+ * The size field may have changed after a DB API call,
+ * so we set that back too.
+ */
+ (*jnienv)->SetIntField(jnienv, ldbt->jdbt, fid_Dbt_size, dbt->size);
+ ldbt->javainfo->array = NULL;
+ F_CLR(ldbt->javainfo, DBT_JAVAINFO_LOCKED);
}
-/* Realloc the java array to receive data if the DBT was marked
- * for realloc, and the last operation set the size field to an
- * amount greater than ulen.
+/*
+ * Realloc the java array to receive data if the DBT used
+ * DB_DBT_REALLOC flag with a non-null data array, and the last
+ * operation set the size field to an amount greater than ulen.
+ * Return 1 if these conditions are met, otherwise 0. This is used
+ * internally to simulate the operations needed for DB_DBT_REALLOC.
*/
-int jdbt_realloc(JDBT *jdbt, JNIEnv *jnienv)
+int locked_dbt_realloc(LOCKED_DBT *ldbt, JNIEnv *jnienv, DB_ENV *dbenv)
{
DBT *dbt;
- dbt = &jdbt->dbt->dbt;
+ COMPQUIET(dbenv, NULL);
+ dbt = &ldbt->javainfo->dbt;
- if (!jdbt->do_realloc_ || jdbt->has_error_ || dbt->size <= dbt->ulen)
+ if (!F_ISSET(ldbt, LOCKED_REALLOC_NONNULL) ||
+ F_ISSET(ldbt, LOCKED_ERROR) || dbt->size <= dbt->ulen)
return (0);
- (*jnienv)->ReleaseByteArrayElements(jnienv, jdbt->dbt->array_, jdbt->java_data_, 0);
- dbjit_release(jdbt->dbt, jnienv);
+ (*jnienv)->ReleaseByteArrayElements(jnienv, ldbt->javainfo->array,
+ ldbt->java_data, 0);
- /* We allocate a new array of the needed size.
+ /*
+ * We allocate a new array of the needed size.
* We'll set the offset to 0, as the old offset
* really doesn't make any sense.
*/
- jdbt->java_array_len_ = dbt->ulen = dbt->size;
- jdbt->dbt->offset_ = 0;
- jdbt->dbt->array_ = (jbyteArray)
- NEW_GLOBAL_REF(jnienv, (*jnienv)->NewByteArray(jnienv, dbt->size));
-
- jdbt->java_data_ = (*jnienv)->GetByteArrayElements(jnienv,
- jdbt->dbt->array_,
- (jboolean *)0);
- dbt->data = jdbt->before_data_ = jdbt->java_data_;
+ if ((ldbt->javainfo->array = (*jnienv)->NewByteArray(jnienv,
+ dbt->size)) == NULL) {
+ F_SET(ldbt, LOCKED_ERROR);
+ return (0);
+ }
+
+ ldbt->java_array_len = dbt->ulen = dbt->size;
+ ldbt->javainfo->offset = 0;
+ (*jnienv)->SetObjectField(jnienv, ldbt->jdbt, fid_Dbt_data,
+ ldbt->javainfo->array);
+ ldbt->java_data = (*jnienv)->GetByteArrayElements(jnienv,
+ ldbt->javainfo->array, (jboolean *)0);
+ memcpy(ldbt->java_data, ldbt->before_data, dbt->ulen);
+ dbt->data = ldbt->before_data = ldbt->java_data;
return (1);
}
/****************************************************************
*
- * Implementation of class JSTR
- *
+ * Implementation of functions to manipulate LOCKED_STRING.
*/
int
-jstr_lock(JSTR *js, JNIEnv *jnienv, jstring jstr)
+locked_string_get(LOCKED_STRING *ls, JNIEnv *jnienv, jstring jstr)
{
- js->jstr_ = jstr;
+ ls->jstr = jstr;
if (jstr == 0)
- js->string = 0;
+ ls->string = 0;
else
- js->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
+ ls->string = (*jnienv)->GetStringUTFChars(jnienv, jstr,
(jboolean *)0);
return (0);
}
-void jstr_unlock(JSTR *js, JNIEnv *jnienv)
+void locked_string_put(LOCKED_STRING *ls, JNIEnv *jnienv)
{
- if (js->jstr_)
- (*jnienv)->ReleaseStringUTFChars(jnienv, js->jstr_, js->string);
-}
-
-/****************************************************************
- *
- * Implementation of class JSTRARRAY
- *
- */
-int
-jstrarray_lock(JSTRARRAY *jsa, JNIEnv *jnienv, jobjectArray arr)
-{
- int i;
-
- jsa->arr_ = arr;
- jsa->array = 0;
-
- if (arr != 0) {
- int count = (*jnienv)->GetArrayLength(jnienv, arr);
- const char **new_array =
- (const char **)malloc((sizeof(const char *))*(count+1));
- for (i=0; i<count; i++) {
- jstring jstr = (jstring)(*jnienv)->GetObjectArrayElement(jnienv, arr, i);
- if (jstr == 0) {
- /*
- * An embedded null in the string array
- * is treated as an endpoint.
- */
- new_array[i] = 0;
- break;
- }
- else {
- new_array[i] =
- (*jnienv)->GetStringUTFChars(jnienv, jstr, (jboolean *)0);
- }
- }
- new_array[count] = 0;
- jsa->array = new_array;
- }
- return (0);
-}
-
-void jstrarray_unlock(JSTRARRAY *jsa, JNIEnv *jnienv)
-{
- int i;
- jstring jstr;
-
- if (jsa->arr_) {
- int count = (*jnienv)->GetArrayLength(jnienv, jsa->arr_);
- for (i=0; i<count; i++) {
- if (jsa->array[i] == 0)
- break;
- jstr = (jstring)(*jnienv)->GetObjectArrayElement(jnienv, jsa->arr_, i);
- (*jnienv)->ReleaseStringUTFChars(jnienv, jstr, jsa->array[i]);
- }
- free((void*)jsa->array);
- }
+ if (ls->jstr)
+ (*jnienv)->ReleaseStringUTFChars(jnienv, ls->jstr, ls->string);
}
diff --git a/bdb/libdb_java/java_locked.h b/bdb/libdb_java/java_locked.h
index 9b88cdd0619..a79d929abee 100644
--- a/bdb/libdb_java/java_locked.h
+++ b/bdb/libdb_java/java_locked.h
@@ -1,98 +1,82 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: java_locked.h,v 11.9 2000/10/25 19:54:55 dda Exp $
+ * $Id: java_locked.h,v 11.18 2002/05/07 16:12:42 dda Exp $
*/
#ifndef _JAVA_LOCKED_H_
#define _JAVA_LOCKED_H_
/*
- * Used internally by LockedDBT constructor.
+ * Used as argument to locked_dbt_get().
*/
typedef enum _OpKind {
- inOp, /* setting data in database (passing data in) */
- outOp, /* getting data from database to user memory */
- inOutOp /* both getting/setting data */
+ inOp, /* setting data in database (passing data in) */
+ outOp, /* getting data from database to user memory */
+ inOutOp /* both getting/setting data */
} OpKind;
/*
+ * LOCKED_DBT
*
- * Declaration of JDBT
- *
- * A JDBT object exists during a
- * single native call to the DB API. Its constructor's job is
- * to temporarily convert any java array found in the DBT_JAVAINFO
- * to actual bytes in memory that remain locked in place. These
- * bytes are used during the call to the underlying DB C layer,
- * and are released and/or copied back by the destructor.
- * Thus, a LockedDBT must be declared as a stack object to
- * function properly.
+ * A stack variable LOCKED_DBT should be declared for each Dbt used in a
+ * native call to the DB API. Before the DBT can be used, locked_dbt_get()
+ * must be called to temporarily convert any java array found in the
+ * Dbt (which has a pointer to a DBT_JAVAINFO struct) to actual bytes
+ * in memory that remain locked in place. These bytes are used during
+ * the call to the DB C API, and are released and/or copied back when
+ * locked_dbt_put is called.
*/
-typedef struct _jdbt
+typedef struct _locked_dbt
{
- /* these are accessed externally to ldbt_ functions */
- DBT_JAVAINFO *dbt;
- unsigned int java_array_len_;
+ /* these are accessed externally to locked_dbt_ functions */
+ DBT_JAVAINFO *javainfo;
+ unsigned int java_array_len;
+ jobject jdbt;
- /* these are for used internally by ldbt_ functions */
- jobject obj_;
- jbyte *java_data_;
- jbyte *before_data_;
- int has_error_;
- int do_realloc_;
- OpKind kind_;
-} JDBT;
+ /* these are for used internally by locked_dbt_ functions */
+ jbyte *java_data;
+ jbyte *before_data;
+ OpKind kind;
-extern int jdbt_lock(JDBT *, JNIEnv *jnienv, jobject obj, OpKind kind);
-extern void jdbt_unlock(JDBT *, JNIEnv *jnienv); /* this unlocks and frees the memory */
-extern int jdbt_realloc(JDBT *, JNIEnv *jnienv); /* returns 1 if reallocation took place */
+#define LOCKED_ERROR 0x01 /* error occurred */
+#define LOCKED_CREATE_DATA 0x02 /* must create data on the fly */
+#define LOCKED_REALLOC_NONNULL 0x04 /* DB_DBT_REALLOC flag, nonnull data */
+ u_int32_t flags;
+} LOCKED_DBT;
-/****************************************************************
- *
- * Declaration of JSTR
- *
- * A JSTR exists temporarily to convert a java jstring object
- * to a char *. Because the memory for the char * string is
- * managed by the JVM, it must be released when we are done
- * looking at it. Typically, jstr_lock() is called at the
- * beginning of a function for each jstring object, and jstr_unlock
- * is called at the end of each function for each JSTR.
- */
-typedef struct _jstr
-{
- /* this accessed externally to jstr_ functions */
- const char *string;
+/* Fill the LOCKED_DBT struct and lock the Java byte array */
+extern int locked_dbt_get(LOCKED_DBT *, JNIEnv *, DB_ENV *, jobject, OpKind);
- /* this is used internally by jstr_ functions */
- jstring jstr_;
-} JSTR;
+/* unlock the Java byte array */
+extern void locked_dbt_put(LOCKED_DBT *, JNIEnv *, DB_ENV *);
-extern int jstr_lock(JSTR *, JNIEnv *jnienv, jstring jstr);
-extern void jstr_unlock(JSTR *, JNIEnv *jnienv); /* this unlocks and frees mem */
+/* realloc the Java byte array */
+extern int locked_dbt_realloc(LOCKED_DBT *, JNIEnv *, DB_ENV *);
-/****************************************************************
- *
- * Declaration of class LockedStrarray
+/*
+ * LOCKED_STRING
*
- * Given a java jobjectArray object (that must be a String[]),
- * we extract the individual strings and build a const char **
- * When the LockedStrarray object is destroyed, the individual
- * strings are released.
+ * A LOCKED_STRING exists temporarily to convert a java jstring object
+ * to a char *. Because the memory for the char * string is
+ * managed by the JVM, it must be released when we are done
+ * looking at it. Typically, locked_string_get() is called at the
+ * beginning of a function for each jstring object, and locked_string_put
+ * is called at the end of each function for each LOCKED_STRING.
*/
-typedef struct _jstrarray
+typedef struct _locked_string
{
- /* this accessed externally to jstrarray_ functions */
- const char **array;
+ /* this accessed externally to locked_string_ functions */
+ const char *string;
- /* this is used internally by jstrarray_ functions */
- jobjectArray arr_;
-} JSTRARRAY;
+ /* this is used internally by locked_string_ functions */
+ jstring jstr;
+} LOCKED_STRING;
-extern int jstrarray_lock(JSTRARRAY *, JNIEnv *jnienv, jobjectArray arr);
-extern void jstrarray_unlock(JSTRARRAY *, JNIEnv *jnienv); /* this unlocks and frees mem */
+extern int locked_string_get(LOCKED_STRING *, JNIEnv *jnienv, jstring jstr);
+extern void locked_string_put(LOCKED_STRING *, JNIEnv *jnienv); /* this unlocks and frees mem */
#endif /* !_JAVA_LOCKED_H_ */
diff --git a/bdb/libdb_java/java_stat_auto.c b/bdb/libdb_java/java_stat_auto.c
new file mode 100644
index 00000000000..c1412232e85
--- /dev/null
+++ b/bdb/libdb_java/java_stat_auto.c
@@ -0,0 +1,207 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+#include "java_util.h"
+int __jv_fill_bt_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_bt_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_maxkey);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_minkey);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_len);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_re_pad);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_levels);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pg);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_int_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_leaf_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_dup_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, bt_over_pgfree);
+ return (0);
+}
+int __jv_fill_h_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_h_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ffactor);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_bigpages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_big_bfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_overflows);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_ovfl_free);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, hash_dup_free);
+ return (0);
+}
+int __jv_fill_lock_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_lock_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_id);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_maxid);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nmodes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnlockers);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnobjects);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nconflicts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrequests);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nreleases);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nnowaits);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ndeadlocks);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_locktimeout);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nlocktimeouts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txntimeout);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ntxntimeouts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ return (0);
+}
+int __jv_fill_log_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_log_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mode);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_bsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_lg_size);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_w_mbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wc_mbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_wcount_fill);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_scount);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_file);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cur_offset);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_file);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_disk_offset);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxcommitperflush);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_mincommitperflush);
+ return (0);
+}
+int __jv_fill_mpool_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_mpool_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gbytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_bytes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncache);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_map);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_hit);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_cache_miss);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_create);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_in);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_out);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ro_evict);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_rw_evict);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_trickle);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_clean);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_page_dirty);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_searches);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_longest);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_examined);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_hash_max_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_buckets);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_alloc_max_pages);
+ return (0);
+}
+int __jv_fill_qam_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_qam_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_magic);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_version);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_metaflags);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_nkeys);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_ndata);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pagesize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_extentsize);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pages);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_len);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_re_pad);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_pgfree);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_first_recno);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, qs_cur_recno);
+ return (0);
+}
+int __jv_fill_rep_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_rep_stat *statp) {
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_status);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_next_lsn);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_waiting_lsn);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_dupmasters);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_id);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_env_priority);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_gen);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_duplicated);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_max);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_queued_total);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_records);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_log_requested);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_master_changes);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_badgen);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_processed);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_recover);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_send_failures);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_msgs_sent);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_newsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nthrottles);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_outdated);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_txns_applied);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_elections_won);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_cur_winner);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_gen);
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_election_lsn);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_nsites);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_priority);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_status);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_tiebreaker);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_election_votes);
+ return (0);
+}
+int __jv_fill_txn_stat(JNIEnv *jnienv, jclass cl,
+ jobject jobj, struct __db_txn_stat *statp) {
+ JAVADB_STAT_LSN(jnienv, cl, jobj, statp, st_last_ckp);
+ JAVADB_STAT_LONG(jnienv, cl, jobj, statp, st_time_ckp);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_last_txnid);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxtxns);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_naborts);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nbegins);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_ncommits);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nactive);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_nrestores);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_maxnactive);
+ JAVADB_STAT_ACTIVE(jnienv, cl, jobj, statp, st_txnarray);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_wait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_region_nowait);
+ JAVADB_STAT_INT(jnienv, cl, jobj, statp, st_regsize);
+ return (0);
+}
diff --git a/bdb/libdb_java/java_stat_auto.h b/bdb/libdb_java/java_stat_auto.h
new file mode 100644
index 00000000000..20eecf1e212
--- /dev/null
+++ b/bdb/libdb_java/java_stat_auto.h
@@ -0,0 +1,9 @@
+/* DO NOT EDIT: automatically built by dist/s_java. */
+extern int __jv_fill_bt_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_bt_stat *statp);
+extern int __jv_fill_h_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_h_stat *statp);
+extern int __jv_fill_lock_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_lock_stat *statp);
+extern int __jv_fill_log_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_log_stat *statp);
+extern int __jv_fill_mpool_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_mpool_stat *statp);
+extern int __jv_fill_qam_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_qam_stat *statp);
+extern int __jv_fill_rep_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_rep_stat *statp);
+extern int __jv_fill_txn_stat(JNIEnv *jnienv, jclass cl, jobject jobj, struct __db_txn_stat *statp);
diff --git a/bdb/libdb_java/java_util.c b/bdb/libdb_java/java_util.c
index f42ceafbee8..5a538ee0785 100644
--- a/bdb/libdb_java/java_util.c
+++ b/bdb/libdb_java/java_util.c
@@ -1,21 +1,19 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: java_util.c,v 11.17 2000/10/28 13:09:39 dda Exp $";
+static const char revid[] = "$Id: java_util.c,v 11.49 2002/09/13 03:09:30 mjc Exp $";
#endif /* not lint */
#include <jni.h>
#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include "db.h"
+#include "db_int.h"
#include "java_util.h"
#ifdef DB_WIN32
@@ -32,17 +30,22 @@ const char * const name_DB_EXCEPTION = "DbException";
const char * const name_DB_HASH_STAT = "DbHashStat";
const char * const name_DB_LOCK = "DbLock";
const char * const name_DB_LOCK_STAT = "DbLockStat";
+const char * const name_DB_LOCKNOTGRANTED_EX = "DbLockNotGrantedException";
+const char * const name_DB_LOGC = "DbLogc";
const char * const name_DB_LOG_STAT = "DbLogStat";
const char * const name_DB_LSN = "DbLsn";
const char * const name_DB_MEMORY_EX = "DbMemoryException";
const char * const name_DB_MPOOL_FSTAT = "DbMpoolFStat";
const char * const name_DB_MPOOL_STAT = "DbMpoolStat";
+const char * const name_DB_PREPLIST = "DbPreplist";
const char * const name_DB_QUEUE_STAT = "DbQueueStat";
+const char * const name_DB_REP_STAT = "DbRepStat";
const char * const name_DB_RUNRECOVERY_EX = "DbRunRecoveryException";
const char * const name_DBT = "Dbt";
const char * const name_DB_TXN = "DbTxn";
const char * const name_DB_TXN_STAT = "DbTxnStat";
const char * const name_DB_TXN_STAT_ACTIVE = "DbTxnStat$Active";
+const char * const name_DB_UTIL = "DbUtil";
const char * const name_DbAppendRecno = "DbAppendRecno";
const char * const name_DbBtreeCompare = "DbBtreeCompare";
const char * const name_DbBtreePrefix = "DbBtreePrefix";
@@ -50,24 +53,84 @@ const char * const name_DbDupCompare = "DbDupCompare";
const char * const name_DbEnvFeedback = "DbEnvFeedback";
const char * const name_DbErrcall = "DbErrcall";
const char * const name_DbHash = "DbHash";
+const char * const name_DbLockRequest = "DbLockRequest";
const char * const name_DbFeedback = "DbFeedback";
const char * const name_DbRecoveryInit = "DbRecoveryInit";
+const char * const name_DbRepTransport = "DbRepTransport";
+const char * const name_DbSecondaryKeyCreate = "DbSecondaryKeyCreate";
const char * const name_DbTxnRecover = "DbTxnRecover";
+const char * const name_RepElectResult = "DbEnv$RepElectResult";
+const char * const name_RepProcessMessage = "DbEnv$RepProcessMessage";
const char * const string_signature = "Ljava/lang/String;";
+jfieldID fid_Dbt_data;
+jfieldID fid_Dbt_offset;
+jfieldID fid_Dbt_size;
+jfieldID fid_Dbt_ulen;
+jfieldID fid_Dbt_dlen;
+jfieldID fid_Dbt_doff;
+jfieldID fid_Dbt_flags;
+jfieldID fid_Dbt_private_dbobj_;
+jfieldID fid_Dbt_must_create_data;
+jfieldID fid_DbLockRequest_op;
+jfieldID fid_DbLockRequest_mode;
+jfieldID fid_DbLockRequest_timeout;
+jfieldID fid_DbLockRequest_obj;
+jfieldID fid_DbLockRequest_lock;
+jfieldID fid_RepProcessMessage_envid;
+
/****************************************************************
*
* Utility functions used by "glue" functions.
- *
*/
-/* Get the private data from a Db* object that points back to a C DB_* object.
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv)
+{
+ jclass cl;
+
+ if ((cl = get_class(jnienv, name_DBT)) == NULL)
+ return; /* An exception has been posted. */
+ fid_Dbt_data = (*jnienv)->GetFieldID(jnienv, cl, "data", "[B");
+ fid_Dbt_offset = (*jnienv)->GetFieldID(jnienv, cl, "offset", "I");
+ fid_Dbt_size = (*jnienv)->GetFieldID(jnienv, cl, "size", "I");
+ fid_Dbt_ulen = (*jnienv)->GetFieldID(jnienv, cl, "ulen", "I");
+ fid_Dbt_dlen = (*jnienv)->GetFieldID(jnienv, cl, "dlen", "I");
+ fid_Dbt_doff = (*jnienv)->GetFieldID(jnienv, cl, "doff", "I");
+ fid_Dbt_flags = (*jnienv)->GetFieldID(jnienv, cl, "flags", "I");
+ fid_Dbt_must_create_data = (*jnienv)->GetFieldID(jnienv, cl,
+ "must_create_data", "Z");
+ fid_Dbt_private_dbobj_ =
+ (*jnienv)->GetFieldID(jnienv, cl, "private_dbobj_", "J");
+
+ if ((cl = get_class(jnienv, name_DbLockRequest)) == NULL)
+ return; /* An exception has been posted. */
+ fid_DbLockRequest_op = (*jnienv)->GetFieldID(jnienv, cl, "op", "I");
+ fid_DbLockRequest_mode = (*jnienv)->GetFieldID(jnienv, cl, "mode", "I");
+ fid_DbLockRequest_timeout =
+ (*jnienv)->GetFieldID(jnienv, cl, "timeout", "I");
+ fid_DbLockRequest_obj = (*jnienv)->GetFieldID(jnienv, cl, "obj",
+ "Lcom/sleepycat/db/Dbt;");
+ fid_DbLockRequest_lock = (*jnienv)->GetFieldID(jnienv, cl, "lock",
+ "Lcom/sleepycat/db/DbLock;");
+
+ if ((cl = get_class(jnienv, name_RepProcessMessage)) == NULL)
+ return; /* An exception has been posted. */
+ fid_RepProcessMessage_envid =
+ (*jnienv)->GetFieldID(jnienv, cl, "envid", "I");
+}
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
- jobject obj)
+ jobject obj)
{
jclass dbClass;
jfieldID id;
@@ -76,18 +139,20 @@ void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
if (!obj)
return (0);
- dbClass = get_class(jnienv, classname);
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
return (lp.ptr);
}
-/* Set the private data in a Db* object that points back to a C DB_* object.
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
void set_private_dbobj(JNIEnv *jnienv, const char *classname,
- jobject obj, void *value)
+ jobject obj, void *value)
{
long_to_ptr lp;
jclass dbClass;
@@ -95,12 +160,14 @@ void set_private_dbobj(JNIEnv *jnienv, const char *classname,
lp.java_long = 0; /* no junk in case sizes mismatch */
lp.ptr = value;
- dbClass = get_class(jnienv, classname);
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_dbobj_", "J");
(*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
}
-/* Get the private data in a Db/DbEnv object that holds additional 'side data'.
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
@@ -112,15 +179,17 @@ void *get_private_info(JNIEnv *jnienv, const char *classname,
long_to_ptr lp;
if (!obj)
- return (0);
+ return (NULL);
- dbClass = get_class(jnienv, classname);
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return (NULL); /* An exception has been posted. */
id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
lp.java_long = (*jnienv)->GetLongField(jnienv, obj, id);
return (lp.ptr);
}
-/* Set the private data in a Db/DbEnv object that holds additional 'side data'.
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
@@ -133,7 +202,8 @@ void set_private_info(JNIEnv *jnienv, const char *classname,
lp.java_long = 0; /* no junk in case sizes mismatch */
lp.ptr = value;
- dbClass = get_class(jnienv, classname);
+ if ((dbClass = get_class(jnienv, classname)) == NULL)
+ return; /* An exception has been posted. */
id = (*jnienv)->GetFieldID(jnienv, dbClass, "private_info_", "J");
(*jnienv)->SetLongField(jnienv, obj, id, lp.java_long);
}
@@ -144,19 +214,48 @@ void set_private_info(JNIEnv *jnienv, const char *classname,
*/
jclass get_class(JNIEnv *jnienv, const char *classname)
{
- /* Note: PERFORMANCE: It should be possible to cache jclass's.
+ /*
+ * Note: PERFORMANCE: It should be possible to cache jclass's.
* If we do a NewGlobalRef on each one, we can keep them
* around in a table. A jclass is a jobject, and
* since NewGlobalRef returns a jobject, it isn't
* technically right, but it would likely work with
* most implementations. Possibly make it configurable.
*/
- char fullname[128] = DB_PACKAGE_NAME;
- strncat(fullname, classname, sizeof(fullname));
+ char fullname[128];
+
+ (void)snprintf(fullname, sizeof(fullname),
+ "%s%s", DB_PACKAGE_NAME, classname);
return ((*jnienv)->FindClass(jnienv, fullname));
}
-/* Set an individual field in a Db* object.
+/*
+ * Given a fully qualified name (e.g. "java.util.Hashtable")
+ * return the jclass object. If it can't be found, an
+ * exception is raised and NULL is return.
+ * This is appropriate to be used for classes that may
+ * not be present.
+ */
+jclass get_fully_qualified_class(JNIEnv *jnienv, const char *classname)
+{
+ jclass result;
+
+ result = ((*jnienv)->FindClass(jnienv, classname));
+ if (result == NULL) {
+ jclass cnfe;
+ char message[1024];
+
+ cnfe = (*jnienv)->FindClass(jnienv,
+ "java/lang/ClassNotFoundException");
+ strncpy(message, classname, sizeof(message));
+ strncat(message, ": class not found", sizeof(message));
+ (*jnienv)->ThrowNew(jnienv, cnfe, message);
+ }
+ return (result);
+}
+
+/*
+ * Set an individual field in a Db* object.
* The field must be a DB object type.
*/
void set_object_field(JNIEnv *jnienv, jclass class_of_this,
@@ -166,36 +265,39 @@ void set_object_field(JNIEnv *jnienv, jclass class_of_this,
char signature[512];
jfieldID id;
- strncpy(signature, "L", sizeof(signature));
- strncat(signature, DB_PACKAGE_NAME, sizeof(signature));
- strncat(signature, object_classname, sizeof(signature));
- strncat(signature, ";", sizeof(signature));
-
- id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, signature);
+ (void)snprintf(signature, sizeof(signature),
+ "L%s%s;", DB_PACKAGE_NAME, object_classname);
+ id = (*jnienv)->GetFieldID(
+ jnienv, class_of_this, name_of_field, signature);
(*jnienv)->SetObjectField(jnienv, jthis, id, obj);
}
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an integer type.
*/
void set_int_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *name_of_field, jint value)
{
- jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
+ jfieldID id =
+ (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "I");
(*jnienv)->SetIntField(jnienv, jthis, id, value);
}
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an integer type.
*/
void set_long_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *name_of_field, jlong value)
{
- jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this, name_of_field, "J");
+ jfieldID id = (*jnienv)->GetFieldID(jnienv, class_of_this,
+ name_of_field, "J");
(*jnienv)->SetLongField(jnienv, jthis, id, value);
}
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an integer type.
*/
void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
@@ -205,89 +307,144 @@ void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
name_of_field, get_DbLsn(jnienv, value));
}
-/* Report an exception back to the java side.
+/*
+ * Report an exception back to the java side.
*/
-void report_exception(JNIEnv *jnienv, const char *text, int err,
- unsigned long expect_mask)
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask)
{
jstring textString;
jclass dbexcept;
jclass javaexcept;
- jmethodID constructId;
jthrowable obj;
textString = NULL;
dbexcept = NULL;
javaexcept = NULL;
- constructId = NULL;
- obj = NULL;
switch (err) {
- /* DB_JAVA_CALLBACK is returned by dbji_call_append_recno()
- * (the append_recno callback) when the Java version of the
- * callback has thrown an exception, and we want to pass the
- * exception on. The exception has already been thrown, we
+ /*
+ * DB_JAVA_CALLBACK is returned by
+ * dbji_call_append_recno() (the append_recno callback)
+ * when the Java version of the callback has thrown
+ * an exception, and we want to pass the exception on.
+ * The exception has already been thrown, we
* don't want to throw a new one.
*/
- case DB_JAVA_CALLBACK:
- break;
- case ENOMEM:
- dbexcept = get_class(jnienv, name_DB_MEMORY_EX);
- break;
- case ENOENT:
- /* In this case there is a corresponding standard java
- * exception type that we'll use. First we make sure
- * that the calling function expected this kind of error,
- * if not we give an 'internal error' DbException, since
- * we must not throw an exception type that isn't
- * declared in the signature.
- *
- * We'll make this a little more general if/when we add
- * more java standard exceptions.
- */
- if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) == 0) {
- char errstr[1024];
-
- strncpy(errstr, "internal error: unexpected errno: ",
- sizeof(errstr));
- strncat(errstr, text, sizeof(errstr));
- textString = get_java_string(jnienv, errstr);
+ case DB_JAVA_CALLBACK:
+ break;
+ case ENOENT:
+ /*
+ * In this case there is a corresponding
+ * standard java exception type that we'll use.
+ * First we make sure that the calling function
+ * expected this kind of error, if not we give
+ * an 'internal error' DbException, since
+ * we must not throw an exception type that isn't
+ * declared in the signature.
+ *
+ * We'll make this a little more general if/when
+ * we add more java standard exceptions.
+ */
+ if ((expect_mask & EXCEPTION_FILE_NOT_FOUND) != 0) {
+ javaexcept = (*jnienv)->FindClass(jnienv,
+ "java/io/FileNotFoundException");
+ }
+ else {
+ char errstr[1024];
+
+ snprintf(errstr, sizeof(errstr),
+ "internal error: unexpected errno: %s",
+ text);
+ textString = get_java_string(jnienv,
+ errstr);
+ dbexcept = get_class(jnienv,
+ name_DB_EXCEPTION);
+ }
+ break;
+ case DB_RUNRECOVERY:
+ dbexcept = get_class(jnienv,
+ name_DB_RUNRECOVERY_EX);
+ break;
+ case DB_LOCK_DEADLOCK:
+ dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
+ break;
+ default:
dbexcept = get_class(jnienv, name_DB_EXCEPTION);
- }
- else {
- javaexcept =
- (*jnienv)->FindClass(jnienv, "java/io/FileNotFoundException");
- }
- break;
- case DB_RUNRECOVERY:
- dbexcept = get_class(jnienv, name_DB_RUNRECOVERY_EX);
- break;
- case DB_LOCK_DEADLOCK:
- dbexcept = get_class(jnienv, name_DB_DEADLOCK_EX);
- break;
- default:
- dbexcept = get_class(jnienv, name_DB_EXCEPTION);
- break;
+ break;
}
if (dbexcept != NULL) {
if (textString == NULL)
textString = get_java_string(jnienv, text);
- constructId = (*jnienv)->GetMethodID(jnienv, dbexcept,
- "<init>",
- "(Ljava/lang/String;I)V");
- obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
- constructId, textString,
- err);
- (*jnienv)->Throw(jnienv, obj);
+ if ((obj = create_exception(jnienv, textString, err, dbexcept))
+ != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ /* Otherwise, an exception has been posted. */
}
- else if (javaexcept != NULL) {
- javaexcept =
- (*jnienv)->FindClass(jnienv, "java/io/FileNotFoundException");
+ else if (javaexcept != NULL)
(*jnienv)->ThrowNew(jnienv, javaexcept, text);
+ else
+ fprintf(stderr,
+ "report_exception: failed to create an exception\n");
+}
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
+ */
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index)
+{
+ jstring textString;
+ jclass dbexcept;
+ jthrowable obj;
+ jmethodID mid;
+
+ if ((dbexcept = get_class(jnienv, name_DB_LOCKNOTGRANTED_EX)) == NULL)
+ return; /* An exception has been posted. */
+ textString = get_java_string(jnienv, text);
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;II"
+ "Lcom/sleepycat/db/Dbt;"
+ "Lcom/sleepycat/db/DbLock;I)V");
+ if ((obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept,
+ mid, textString, op, mode, jdbt, jlock, index)) != NULL)
+ (*jnienv)->Throw(jnienv, obj);
+ else
+ fprintf(stderr,
+ "report_notgranted_exception: failed to create an exception\n");
+}
+
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
+ */
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept)
+{
+ jthrowable obj;
+ jmethodID mid;
+
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "<init>",
+ "(Ljava/lang/String;I)V");
+ if (mid != NULL)
+ obj = (jthrowable)(*jnienv)->NewObject(jnienv, dbexcept, mid,
+ text, err);
+ else {
+ fprintf(stderr, "Cannot get exception init method ID!\n");
+ obj = NULL;
}
+
+ return (obj);
}
-/* Report an error via the errcall mechanism.
+/*
+ * Report an error via the errcall mechanism.
*/
void report_errcall(JNIEnv *jnienv, jobject errcall,
jstring prefix, const char *message)
@@ -296,7 +453,8 @@ void report_errcall(JNIEnv *jnienv, jobject errcall,
jclass errcall_class;
jstring msg;
- errcall_class = get_class(jnienv, name_DbErrcall);
+ if ((errcall_class = get_class(jnienv, name_DbErrcall)) == NULL)
+ return; /* An exception has been posted. */
msg = get_java_string(jnienv, message);
id = (*jnienv)->GetMethodID(jnienv, errcall_class,
@@ -311,7 +469,8 @@ void report_errcall(JNIEnv *jnienv, jobject errcall,
(*jnienv)->CallVoidMethod(jnienv, errcall, id, prefix, msg);
}
-/* If the object is null, report an exception and return false (0),
+/*
+ * If the object is null, report an exception and return false (0),
* otherwise return true (1).
*/
int verify_non_null(JNIEnv *jnienv, void *obj)
@@ -323,29 +482,70 @@ int verify_non_null(JNIEnv *jnienv, void *obj)
return (1);
}
-/* If the error code is non-zero, report an exception and return false (0),
+/*
+ * If the error code is non-zero, report an exception and return false (0),
* otherwise return true (1).
*/
int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask)
{
if (err == 0)
- return 1;
+ return (1);
report_exception(jnienv, db_strerror(err), err, expect_mask);
- return 0;
+ return (0);
}
-/* Create an object of the given class, calling its default constructor.
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *ldbt)
+{
+ DBT *dbt;
+ jobject exception;
+ jstring text;
+ jclass dbexcept;
+ jmethodID mid;
+
+ if (err != ENOMEM)
+ return (1);
+
+ dbt = &ldbt->javainfo->dbt;
+ if (!F_ISSET(dbt, DB_DBT_USERMEM) || dbt->size <= dbt->ulen)
+ return (1);
+
+ /* Create/throw an exception of type DbMemoryException */
+ if ((dbexcept = get_class(jnienv, name_DB_MEMORY_EX)) == NULL)
+ return (1); /* An exception has been posted. */
+ text = get_java_string(jnienv,
+ "Dbt not large enough for available data");
+ exception = create_exception(jnienv, text, ENOMEM, dbexcept);
+
+ /* Attach the dbt to the exception */
+ mid = (*jnienv)->GetMethodID(jnienv, dbexcept, "set_dbt",
+ "(L" DB_PACKAGE_NAME "Dbt;)V");
+ (*jnienv)->CallVoidMethod(jnienv, exception, mid, ldbt->jdbt);
+ (*jnienv)->Throw(jnienv, exception);
+ return (0);
+}
+
+/*
+ * Create an object of the given class, calling its default constructor.
*/
jobject create_default_object(JNIEnv *jnienv, const char *class_name)
{
- jclass dbclass = get_class(jnienv, class_name);
- jmethodID id = (*jnienv)->GetMethodID(jnienv, dbclass, "<init>", "()V");
- jobject object = (*jnienv)->NewObject(jnienv, dbclass, id);
- return (object);
+ jmethodID id;
+ jclass dbclass;
+
+ if ((dbclass = get_class(jnienv, class_name)) == NULL)
+ return (NULL); /* An exception has been posted. */
+ id = (*jnienv)->GetMethodID(jnienv, dbclass, "<init>", "()V");
+ return ((*jnienv)->NewObject(jnienv, dbclass, id));
}
-/* Convert an DB object to a Java encapsulation of that object.
+/*
+ * Convert an DB object to a Java encapsulation of that object.
* Note: This implementation creates a new Java object on each call,
* so it is generally useful when a new DB object has just been created.
*/
@@ -361,20 +561,24 @@ jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj)
return (jo);
}
-/* Create a copy of the string
+/*
+ * Create a copy of the string
*/
char *dup_string(const char *str)
{
int len;
char *retval;
+ int err;
len = strlen(str) + 1;
- retval = (char *)malloc(sizeof(char)*len);
+ if ((err = __os_malloc(NULL, sizeof(char)*len, &retval)) != 0)
+ return (NULL);
strncpy(retval, str, len);
return (retval);
}
-/* Create a java string from the given string
+/*
+ * Create a java string from the given string
*/
jstring get_java_string(JNIEnv *jnienv, const char* string)
{
@@ -383,21 +587,23 @@ jstring get_java_string(JNIEnv *jnienv, const char* string)
return ((*jnienv)->NewStringUTF(jnienv, string));
}
-/* Create a malloc'ed copy of the java string.
+/*
+ * Create a copy of the java string using __os_malloc.
* Caller must free it.
*/
char *get_c_string(JNIEnv *jnienv, jstring jstr)
{
- const jbyte *utf;
+ const char *utf;
char *retval;
utf = (*jnienv)->GetStringUTFChars(jnienv, jstr, NULL);
- retval = dup_string((const char *)utf);
+ retval = dup_string(utf);
(*jnienv)->ReleaseStringUTFChars(jnienv, jstr, utf);
- return retval;
+ return (retval);
}
-/* Convert a java object to the various C pointers they represent.
+/*
+ * Convert a java object to the various C pointers they represent.
*/
DB *get_DB(JNIEnv *jnienv, jobject obj)
{
@@ -406,7 +612,8 @@ DB *get_DB(JNIEnv *jnienv, jobject obj)
DB_BTREE_STAT *get_DB_BTREE_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_BTREE_STAT *)get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
+ return ((DB_BTREE_STAT *)
+ get_private_dbobj(jnienv, name_DB_BTREE_STAT, obj));
}
DBC *get_DBC(JNIEnv *jnienv, jobject obj)
@@ -426,7 +633,8 @@ DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO(JNIEnv *jnienv, jobject obj)
DB_HASH_STAT *get_DB_HASH_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_HASH_STAT *)get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
+ return ((DB_HASH_STAT *)
+ get_private_dbobj(jnienv, name_DB_HASH_STAT, obj));
}
DB_JAVAINFO *get_DB_JAVAINFO(JNIEnv *jnienv, jobject obj)
@@ -439,29 +647,71 @@ DB_LOCK *get_DB_LOCK(JNIEnv *jnienv, jobject obj)
return ((DB_LOCK *)get_private_dbobj(jnienv, name_DB_LOCK, obj));
}
-DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
+DB_LOGC *get_DB_LOGC(JNIEnv *jnienv, jobject obj)
{
- return ((DB_LOG_STAT *)get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
+ return ((DB_LOGC *)get_private_dbobj(jnienv, name_DB_LOGC, obj));
}
-DB_LSN *get_DB_LSN(JNIEnv *jnienv, jobject obj)
+DB_LOG_STAT *get_DB_LOG_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj));
+ return ((DB_LOG_STAT *)
+ get_private_dbobj(jnienv, name_DB_LOG_STAT, obj));
+}
+
+DB_LSN *get_DB_LSN(JNIEnv *jnienv, /* DbLsn */ jobject obj) {
+ /*
+ * DbLsns that are created from within java (new DbLsn()) rather
+ * than from within C (get_DbLsn()) may not have a "private" DB_LSN
+ * structure allocated for them yet. We can't do this in the
+ * actual constructor (init_lsn()), because there's no way to pass
+ * in an initializing value in, and because the get_DbLsn()/
+ * convert_object() code path needs a copy of the pointer before
+ * the constructor gets called. Thus, get_DbLsn() allocates and
+ * fills a DB_LSN for the object it's about to create.
+ *
+ * Since "new DbLsn()" may reasonably be passed as an argument to
+ * functions such as DbEnv.log_put(), though, we need to make sure
+ * that DB_LSN's get allocated when the object was created from
+ * Java, too. Here, we lazily allocate a new private DB_LSN if
+ * and only if it turns out that we don't already have one.
+ *
+ * The only exception is if the DbLsn object is a Java null
+ * (in which case the jobject will also be NULL). Then a NULL
+ * DB_LSN is legitimate.
+ */
+ DB_LSN *lsnp;
+ int err;
+
+ if (obj == NULL)
+ return (NULL);
+
+ lsnp = (DB_LSN *)get_private_dbobj(jnienv, name_DB_LSN, obj);
+ if (lsnp == NULL) {
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+ memset(lsnp, 0, sizeof(DB_LSN));
+ set_private_dbobj(jnienv, name_DB_LSN, obj, lsnp);
+ }
+
+ return (lsnp);
}
DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_MPOOL_FSTAT *)get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
+ return ((DB_MPOOL_FSTAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_FSTAT, obj));
}
DB_MPOOL_STAT *get_DB_MPOOL_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_MPOOL_STAT *)get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
+ return ((DB_MPOOL_STAT *)
+ get_private_dbobj(jnienv, name_DB_MPOOL_STAT, obj));
}
DB_QUEUE_STAT *get_DB_QUEUE_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_QUEUE_STAT *)get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
+ return ((DB_QUEUE_STAT *)
+ get_private_dbobj(jnienv, name_DB_QUEUE_STAT, obj));
}
DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
@@ -471,7 +721,8 @@ DB_TXN *get_DB_TXN(JNIEnv *jnienv, jobject obj)
DB_TXN_STAT *get_DB_TXN_STAT(JNIEnv *jnienv, jobject obj)
{
- return ((DB_TXN_STAT *)get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
+ return ((DB_TXN_STAT *)
+ get_private_dbobj(jnienv, name_DB_TXN_STAT, obj));
}
DBT *get_DBT(JNIEnv *jnienv, jobject obj)
@@ -490,7 +741,8 @@ DBT_JAVAINFO *get_DBT_JAVAINFO(JNIEnv *jnienv, jobject obj)
return ((DBT_JAVAINFO *)get_private_dbobj(jnienv, name_DBT, obj));
}
-/* Convert a C pointer to the various Java objects they represent.
+/*
+ * Convert a C pointer to the various Java objects they represent.
*/
jobject get_DbBtreeStat(JNIEnv *jnienv, DB_BTREE_STAT *dbobj)
{
@@ -507,27 +759,109 @@ jobject get_DbHashStat(JNIEnv *jnienv, DB_HASH_STAT *dbobj)
return (convert_object(jnienv, name_DB_HASH_STAT, dbobj));
}
+jobject get_DbLogc(JNIEnv *jnienv, DB_LOGC *dbobj)
+{
+ return (convert_object(jnienv, name_DB_LOGC, dbobj));
+}
+
jobject get_DbLogStat(JNIEnv *jnienv, DB_LOG_STAT *dbobj)
{
return (convert_object(jnienv, name_DB_LOG_STAT, dbobj));
}
-/* LSNs are different since they are really normally
+/*
+ * LSNs are different since they are really normally
* treated as by-value objects. We actually create
* a pointer to the LSN and store that, deleting it
* when the LSN is GC'd.
*/
jobject get_DbLsn(JNIEnv *jnienv, DB_LSN dbobj)
{
- DB_LSN *lsnp = (DB_LSN *)malloc(sizeof(DB_LSN));
+ DB_LSN *lsnp;
+ int err;
+
+ if ((err = __os_malloc(NULL, sizeof(DB_LSN), &lsnp)) != 0)
+ return (NULL);
+
memset(lsnp, 0, sizeof(DB_LSN));
*lsnp = dbobj;
return (convert_object(jnienv, name_DB_LSN, lsnp));
}
-jobject get_Dbt(JNIEnv *jnienv, DBT *dbt)
+/*
+ * Shared code for get_Dbt and get_const_Dbt.
+ *
+ * XXX
+ * Currently we make no distinction in implementation of these
+ * two kinds of Dbts, although in the future we may want to.
+ * (It's probably easier to make the optimizations listed below
+ * with readonly Dbts).
+ *
+ * Dbt's created via this function are only used for a short lifetime,
+ * during callback functions. In the future, we should consider taking
+ * advantage of this by having a pool of Dbt objects instead of creating
+ * new ones each time. Because of multithreading, we may need an
+ * arbitrary number. We might also have sharing of the byte arrays
+ * used by the Dbts.
+ */
+static jobject get_Dbt_shared(JNIEnv *jnienv, const DBT *dbt, int readonly,
+ DBT_JAVAINFO **ret_info)
+{
+ jobject jdbt;
+ DBT_JAVAINFO *dbtji;
+
+ COMPQUIET(readonly, 0);
+
+ /* A NULL DBT should become a null Dbt. */
+ if (dbt == NULL)
+ return (NULL);
+
+ /*
+ * Note that a side effect of creating a Dbt object
+ * is the creation of the attached DBT_JAVAINFO object
+ * (see the native implementation of Dbt.init())
+ * A DBT_JAVAINFO object contains its own DBT.
+ */
+ jdbt = create_default_object(jnienv, name_DBT);
+ dbtji = get_DBT_JAVAINFO(jnienv, jdbt);
+ memcpy(&dbtji->dbt, dbt, sizeof(DBT));
+
+ /*
+ * Set the boolean indicator so that the Java side knows to
+ * call back when it wants to look at the array. This avoids
+ * needlessly creating/copying arrays that may never be looked at.
+ */
+ (*jnienv)->SetBooleanField(jnienv, jdbt, fid_Dbt_must_create_data, 1);
+ (*jnienv)->SetIntField(jnienv, jdbt, fid_Dbt_size, dbt->size);
+
+ if (ret_info != NULL)
+ *ret_info = dbtji;
+ return (jdbt);
+}
+
+/*
+ * Get a writeable Dbt.
+ *
+ * Currently we're sharing code with get_const_Dbt.
+ * It really shouldn't be this way, we have a DBT that we can
+ * change, and have some mechanism for copying back
+ * any changes to the original DBT.
+ */
+jobject get_Dbt(JNIEnv *jnienv, DBT *dbt,
+ DBT_JAVAINFO **ret_info)
+{
+ return (get_Dbt_shared(jnienv, dbt, 0, ret_info));
+}
+
+/*
+ * Get a Dbt that we promise not to change, or at least
+ * if there are changes, they don't matter and won't get
+ * seen by anyone.
+ */
+jobject get_const_Dbt(JNIEnv *jnienv, const DBT *dbt,
+ DBT_JAVAINFO **ret_info)
{
- return (convert_object(jnienv, name_DBT, dbt));
+ return (get_Dbt_shared(jnienv, dbt, 1, ret_info));
}
jobject get_DbMpoolFStat(JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj)
diff --git a/bdb/libdb_java/java_util.h b/bdb/libdb_java/java_util.h
index eb47dc67629..08187f6b51f 100644
--- a/bdb/libdb_java/java_util.h
+++ b/bdb/libdb_java/java_util.h
@@ -1,10 +1,10 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: java_util.h,v 11.22 2001/01/11 18:19:53 bostic Exp $
+ * $Id: java_util.h,v 11.44 2002/08/29 14:22:24 margo Exp $
*/
#ifndef _JAVA_UTIL_H_
@@ -12,7 +12,8 @@
#ifdef _MSC_VER
-/* These are level 4 warnings that are explicitly disabled.
+/*
+ * These are level 4 warnings that are explicitly disabled.
* With Visual C++, by default you do not see above level 3 unless
* you use /W4. But we like to compile with the highest level
* warnings to catch other errors.
@@ -35,15 +36,15 @@
#include "db_config.h"
#include "db.h"
+#include "db_int.h"
+#include <jni.h>
#include "java_info.h"
#include "java_locked.h"
-#include <jni.h>
#include <string.h> /* needed for memset */
#define DB_PACKAGE_NAME "com/sleepycat/db/"
-/* Union to convert longs to pointers (see {get,set}_private_dbobj).
- */
+/* Union to convert longs to pointers (see {get,set}_private_dbobj). */
typedef union {
jlong java_long;
void *ptr;
@@ -52,13 +53,13 @@ typedef union {
/****************************************************************
*
* Utility functions and definitions used by "glue" functions.
- *
*/
#define NOT_IMPLEMENTED(str) \
report_exception(jnienv, str /*concatenate*/ ": not implemented", 0)
-/* Get, delete a global reference.
+/*
+ * Get, delete a global reference.
* Making this operation a function call allows for
* easier tracking for debugging. Global references
* are mostly grabbed at 'open' and 'close' points,
@@ -78,7 +79,7 @@ static void wrdebug(const char *str)
static jobject debug_new_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
{
wrdebug(s);
- return (*jnienv)->NewGlobalRef(jnienv, obj);
+ return ((*jnienv)->NewGlobalRef(jnienv, obj));
}
static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
@@ -97,28 +98,45 @@ static void debug_delete_global_ref(JNIEnv *jnienv, jobject obj, const char *s)
#define wrdebug(x)
#endif
-/* Get the private data from a Db* object that points back to a C DB_* object.
+/*
+ * Do any one time initialization, especially initializing any
+ * unchanging methodIds, fieldIds, etc.
+ */
+void one_time_init(JNIEnv *jnienv);
+
+/*
+ * Get the current JNIEnv from the java VM.
+ * If the jvm argument is null, uses the default
+ * jvm stored during the first invocation.
+ */
+JNIEnv *get_jnienv(JavaVM *jvm);
+
+/*
+ * Get the private data from a Db* object that points back to a C DB_* object.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
void *get_private_dbobj(JNIEnv *jnienv, const char *classname,
jobject obj);
-/* Set the private data in a Db* object that points back to a C DB_* object.
+/*
+ * Set the private data in a Db* object that points back to a C DB_* object.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
void set_private_dbobj(JNIEnv *jnienv, const char *classname,
jobject obj, void *value);
-/* Get the private data in a Db/DbEnv object that holds additional 'side data'.
+/*
+ * Get the private data in a Db/DbEnv object that holds additional 'side data'.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
void *get_private_info(JNIEnv *jnienv, const char *classname,
jobject obj);
-/* Set the private data in a Db/DbEnv object that holds additional 'side data'.
+/*
+ * Set the private data in a Db/DbEnv object that holds additional 'side data'.
* The private data is stored in the object as a Java long (64 bits),
* which is long enough to store a pointer on current architectures.
*/
@@ -126,84 +144,126 @@ void set_private_info(JNIEnv *jnienv, const char *classname,
jobject obj, void *value);
/*
- * Given a non-qualified name (e.g. "foo"), get the class handl
+ * Given a non-qualified name (e.g. "foo"), get the class handle
* for the fully qualified name (e.g. "com.sleepycat.db.foo")
*/
jclass get_class(JNIEnv *jnienv, const char *classname);
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be a DB object type.
*/
void set_object_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *object_classname,
const char *name_of_field, jobject obj);
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an integer type.
*/
void set_int_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *name_of_field, jint value);
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an integer type.
*/
void set_long_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *name_of_field, jlong value);
-/* Set an individual field in a Db* object.
+/*
+ * Set an individual field in a Db* object.
* The field must be an DbLsn type.
*/
void set_lsn_field(JNIEnv *jnienv, jclass class_of_this,
jobject jthis, const char *name_of_field, DB_LSN value);
-/* Values of expect_mask
+/*
+ * Values of flags for verify_return() and report_exception().
+ * These indicate what sort of exceptions the method may throw
+ * (in addition to DbException).
+ */
+static const u_int32_t EXCEPTION_FILE_NOT_FOUND = 0x0001; /*FileNotFound*/
+
+/*
+ * Report an exception back to the java side.
+ */
+void report_exception(JNIEnv *jnienv, const char *text,
+ int err, unsigned long expect_mask);
+
+/*
+ * Report an exception back to the java side, for the specific
+ * case of DB_LOCK_NOTGRANTED, as more things are added to the
+ * constructor of this type of exception.
*/
-static const int EXCEPTION_FILE_NOT_FOUND = 0x0001;
+void report_notgranted_exception(JNIEnv *jnienv, const char *text,
+ db_lockop_t op, db_lockmode_t mode,
+ jobject jdbt, jobject jlock, int index);
-/* Report an exception back to the java side.
+/*
+ * Create an exception object and return it.
+ * The given class must have a constructor that has a
+ * constructor with args (java.lang.String text, int errno);
+ * DbException and its subclasses fit this bill.
*/
-void report_exception(JNIEnv *jnienv, const char *text, int err,
- unsigned long expect_mask);
+jobject create_exception(JNIEnv *jnienv, jstring text,
+ int err, jclass dbexcept);
-/* Report an error via the errcall mechanism.
+/*
+ * Report an error via the errcall mechanism.
*/
void report_errcall(JNIEnv *jnienv, jobject errcall,
jstring prefix, const char *message);
-/* If the object is null, report an exception and return false (0),
+/*
+ * If the object is null, report an exception and return false (0),
* otherwise return true (1).
*/
int verify_non_null(JNIEnv *jnienv, void *obj);
-/* If the error code is non-zero, report an exception and return false (0),
+/*
+ * If the error code is non-zero, report an exception and return false (0),
* otherwise return true (1).
*/
-int verify_return(JNIEnv *jnienv, int err, unsigned long expect_mask);
+int verify_return(JNIEnv *jnienv, int err, unsigned long flags);
-/* Create an object of the given class, calling its default constructor.
+/*
+ * Verify that there was no memory error due to undersized Dbt.
+ * If there is report a DbMemoryException, with the Dbt attached
+ * and return false (0), otherwise return true (1).
+ */
+int verify_dbt(JNIEnv *jnienv, int err, LOCKED_DBT *locked_dbt);
+
+/*
+ * Create an object of the given class, calling its default constructor.
*/
jobject create_default_object(JNIEnv *jnienv, const char *class_name);
-/* Convert an DB object to a Java encapsulation of that object.
+/*
+ * Create a Dbt object, , calling its default constructor.
+ */
+jobject create_dbt(JNIEnv *jnienv, const char *class_name);
+
+/*
+ * Convert an DB object to a Java encapsulation of that object.
* Note: This implementation creates a new Java object on each call,
* so it is generally useful when a new DB object has just been created.
*/
jobject convert_object(JNIEnv *jnienv, const char *class_name, void *dbobj);
-/* Create a copy of the string
- */
-char *dup_string(const char *str);
-
-/* Create a malloc'ed copy of the java string.
+/*
+ * Create a copy of the java string using __os_malloc.
* Caller must free it.
*/
char *get_c_string(JNIEnv *jnienv, jstring jstr);
-/* Create a java string from the given string
+/*
+ * Create a java string from the given string
*/
jstring get_java_string(JNIEnv *jnienv, const char* string);
-/* Convert a java object to the various C pointers they represent.
+/*
+ * Convert a java object to the various C pointers they represent.
*/
DB *get_DB (JNIEnv *jnienv, jobject obj);
DB_BTREE_STAT *get_DB_BTREE_STAT (JNIEnv *jnienv, jobject obj);
@@ -213,6 +273,7 @@ DB_ENV_JAVAINFO *get_DB_ENV_JAVAINFO (JNIEnv *jnienv, jobject obj);
DB_HASH_STAT *get_DB_HASH_STAT (JNIEnv *jnienv, jobject obj);
DB_JAVAINFO *get_DB_JAVAINFO (JNIEnv *jnienv, jobject obj);
DB_LOCK *get_DB_LOCK (JNIEnv *jnienv, jobject obj);
+DB_LOGC *get_DB_LOGC (JNIEnv *jnienv, jobject obj);
DB_LOG_STAT *get_DB_LOG_STAT (JNIEnv *jnienv, jobject obj);
DB_LSN *get_DB_LSN (JNIEnv *jnienv, jobject obj);
DB_MPOOL_FSTAT *get_DB_MPOOL_FSTAT(JNIEnv *jnienv, jobject obj);
@@ -223,17 +284,20 @@ DB_TXN_STAT *get_DB_TXN_STAT (JNIEnv *jnienv, jobject obj);
DBT *get_DBT (JNIEnv *jnienv, jobject obj);
DBT_JAVAINFO *get_DBT_JAVAINFO (JNIEnv *jnienv, jobject obj);
-/* From a C object, create a Java object.
+/*
+ * From a C object, create a Java object.
*/
jobject get_DbBtreeStat (JNIEnv *jnienv, DB_BTREE_STAT *dbobj);
jobject get_Dbc (JNIEnv *jnienv, DBC *dbobj);
jobject get_DbHashStat (JNIEnv *jnienv, DB_HASH_STAT *dbobj);
+jobject get_DbLogc (JNIEnv *jnienv, DB_LOGC *dbobj);
jobject get_DbLogStat (JNIEnv *jnienv, DB_LOG_STAT *dbobj);
jobject get_DbLsn (JNIEnv *jnienv, DB_LSN dbobj);
jobject get_DbMpoolStat (JNIEnv *jnienv, DB_MPOOL_STAT *dbobj);
jobject get_DbMpoolFStat (JNIEnv *jnienv, DB_MPOOL_FSTAT *dbobj);
jobject get_DbQueueStat (JNIEnv *jnienv, DB_QUEUE_STAT *dbobj);
-jobject get_Dbt (JNIEnv *jnienv, DBT *dbt);
+jobject get_const_Dbt (JNIEnv *jnienv, const DBT *dbt, DBT_JAVAINFO **retp);
+jobject get_Dbt (JNIEnv *jnienv, DBT *dbt, DBT_JAVAINFO **retp);
jobject get_DbTxn (JNIEnv *jnienv, DB_TXN *dbobj);
jobject get_DbTxnStat (JNIEnv *jnienv, DB_TXN_STAT *dbobj);
@@ -247,17 +311,22 @@ extern const char * const name_DB_EXCEPTION;
extern const char * const name_DB_HASH_STAT;
extern const char * const name_DB_LOCK;
extern const char * const name_DB_LOCK_STAT;
+extern const char * const name_DB_LOGC;
extern const char * const name_DB_LOG_STAT;
extern const char * const name_DB_LSN;
extern const char * const name_DB_MEMORY_EX;
extern const char * const name_DB_MPOOL_FSTAT;
extern const char * const name_DB_MPOOL_STAT;
+extern const char * const name_DB_LOCKNOTGRANTED_EX;
+extern const char * const name_DB_PREPLIST;
extern const char * const name_DB_QUEUE_STAT;
+extern const char * const name_DB_REP_STAT;
extern const char * const name_DB_RUNRECOVERY_EX;
extern const char * const name_DBT;
extern const char * const name_DB_TXN;
extern const char * const name_DB_TXN_STAT;
extern const char * const name_DB_TXN_STAT_ACTIVE;
+extern const char * const name_DB_UTIL;
extern const char * const name_DbAppendRecno;
extern const char * const name_DbBtreeCompare;
extern const char * const name_DbBtreePrefix;
@@ -267,93 +336,106 @@ extern const char * const name_DbErrcall;
extern const char * const name_DbFeedback;
extern const char * const name_DbHash;
extern const char * const name_DbRecoveryInit;
+extern const char * const name_DbRepTransport;
+extern const char * const name_DbSecondaryKeyCreate;
extern const char * const name_DbTxnRecover;
+extern const char * const name_RepElectResult;
+extern const char * const name_RepProcessMessage;
extern const char * const string_signature;
-#define JAVADB_RO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
-JNIEXPORT j_fieldtype JNICALL \
- Java_com_sleepycat_db_##j_class##_get_1##j_field \
- (JNIEnv *jnienv, jobject jthis) \
-{ \
- c_type *db_this = get_##c_type(jnienv, jthis); \
- \
- if (verify_non_null(jnienv, db_this)) { \
- return db_this->c_field; \
- } \
- return 0; \
+extern jfieldID fid_Dbt_data;
+extern jfieldID fid_Dbt_offset;
+extern jfieldID fid_Dbt_size;
+extern jfieldID fid_Dbt_ulen;
+extern jfieldID fid_Dbt_dlen;
+extern jfieldID fid_Dbt_doff;
+extern jfieldID fid_Dbt_flags;
+extern jfieldID fid_Dbt_must_create_data;
+extern jfieldID fid_DbLockRequest_op;
+extern jfieldID fid_DbLockRequest_mode;
+extern jfieldID fid_DbLockRequest_timeout;
+extern jfieldID fid_DbLockRequest_obj;
+extern jfieldID fid_DbLockRequest_lock;
+extern jfieldID fid_RepProcessMessage_envid;
+
+#define JAVADB_ARGS JNIEnv *jnienv, jobject jthis
+
+#define JAVADB_GET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT j_fieldtype JNICALL \
+ Java_com_sleepycat_db_##j_class##_get_1##j_field \
+ (JAVADB_ARGS) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ return (db->c_field); \
+ return (0); \
}
-#define JAVADB_WO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
-JNIEXPORT void JNICALL \
- Java_com_sleepycat_db_##j_class##_set_1##j_field \
- (JNIEnv *jnienv, jobject jthis, j_fieldtype value) \
-{ \
- c_type *db_this = get_##c_type(jnienv, jthis); \
- \
- if (verify_non_null(jnienv, db_this)) { \
- db_this->c_field = value; \
- } \
+#define JAVADB_SET_FLD(j_class, j_fieldtype, j_field, c_type, c_field) \
+JNIEXPORT void JNICALL \
+ Java_com_sleepycat_db_##j_class##_set_1##j_field \
+ (JAVADB_ARGS, j_fieldtype value) \
+{ \
+ c_type *db= get_##c_type(jnienv, jthis); \
+ \
+ if (verify_non_null(jnienv, db)) \
+ db->c_field = value; \
}
-/* This is a variant of the JAVADB_WO_ACCESS macro to define a simple set_
- * method using a C "method" call. These should be used with set_
- * methods that cannot invoke java 'callbacks' (no set_ method currently
- * does that). That assumption allows us to optimize (and simplify)
- * by not calling API_BEGIN/END macros.
- */
-#define JAVADB_WO_ACCESS_METHOD(j_class, j_fieldtype, \
- j_field, c_type, c_field) \
-JNIEXPORT void JNICALL \
- Java_com_sleepycat_db_##j_class##_set_1##j_field \
- (JNIEnv *jnienv, jobject jthis, j_fieldtype value) \
-{ \
- c_type *db_this; \
- int err; \
- \
- db_this = get_##c_type(jnienv, jthis); \
- if (verify_non_null(jnienv, db_this)) { \
- err = db_this->set_##c_field(db_this, value); \
- verify_return(jnienv, err, 0); \
- } \
+#define JAVADB_METHOD(_meth, _argspec, c_type, c_meth, _args) \
+JNIEXPORT void JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return; \
+ ret = c_this->c_meth _args; \
+ if (!DB_RETOK_STD(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
}
-#define JAVADB_RW_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
- JAVADB_RO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field) \
- JAVADB_WO_ACCESS(j_class, j_fieldtype, j_field, c_type, c_field)
-
-#define JAVADB_WO_ACCESS_STRING(j_class, j_field, c_type, c_field) \
-JNIEXPORT void JNICALL \
- Java_com_sleepycat_db_##j_class##_set_1##j_field \
- (JNIEnv *jnienv, jobject jthis, jstring value) \
-{ \
- c_type *db_this; \
- int err; \
- \
- db_this = get_##c_type(jnienv, jthis); \
- if (verify_non_null(jnienv, db_this)) { \
- err = db_this->set_##c_field(db_this, \
- (*jnienv)->GetStringUTFChars(jnienv, value, NULL)); \
- verify_return(jnienv, err, 0); \
- } \
+#define JAVADB_METHOD_INT(_meth, _argspec, c_type, c_meth, _args, _retok) \
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_##_meth _argspec \
+{ \
+ c_type *c_this = get_##c_type(jnienv, jthis); \
+ int ret; \
+ \
+ if (!verify_non_null(jnienv, c_this)) \
+ return (0); \
+ ret = c_this->c_meth _args; \
+ if (!_retok(ret)) \
+ report_exception(jnienv, db_strerror(ret), ret, 0); \
+ return ((jint)ret); \
}
-#define JAVADB_API_BEGIN(db, jthis) \
- if ((db) != NULL) \
- ((DB_JAVAINFO*)(db)->cj_internal)->jdbref_ = \
- ((DB_ENV_JAVAINFO*)((db)->dbenv->cj_internal))->jdbref_ = (jthis)
+#define JAVADB_SET_METH(j_class, j_type, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, j_type val), c_type, \
+ set_##c_field, (c_this, val))
-#define JAVADB_API_END(db) \
- if ((db) != NULL) \
- ((DB_JAVAINFO*)(db)->cj_internal)->jdbref_ = \
- ((DB_ENV_JAVAINFO*)((db)->dbenv->cj_internal))->jdbref_ = 0
+#define JAVADB_SET_METH_STR(j_class, j_fld, c_type, c_field) \
+ JAVADB_METHOD(j_class##_set_1##j_fld, (JAVADB_ARGS, jstring val), c_type, \
+ set_##c_field, (c_this, (*jnienv)->GetStringUTFChars(jnienv, val, NULL)))
-#define JAVADB_ENV_API_BEGIN(dbenv, jthis) \
- if ((dbenv) != NULL) \
- ((DB_ENV_JAVAINFO*)((dbenv)->cj_internal))->jenvref_ = (jthis)
-#define JAVADB_ENV_API_END(dbenv) \
- if ((dbenv) != NULL) \
- ((DB_ENV_JAVAINFO*)((dbenv)->cj_internal))->jenvref_ = 0
+/*
+ * These macros are used by code generated by the s_java script.
+ */
+#define JAVADB_STAT_INT(env, cl, jobj, statp, name) \
+ set_int_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LSN(env, cl, jobj, statp, name) \
+ set_lsn_field(jnienv, cl, jobj, #name, statp->name)
+
+#define JAVADB_STAT_LONG(env, cl, jobj, statp, name) \
+ set_long_field(jnienv, cl, jobj, #name, statp->name)
+
+/*
+ * We build the active list separately.
+ */
+#define JAVADB_STAT_ACTIVE(env, cl, jobj, statp, name) \
+ do {} while(0)
#endif /* !_JAVA_UTIL_H_ */
diff --git a/bdb/lock/Design b/bdb/lock/Design
index ac8f0b02fbf..f0bb5c6e99c 100644
--- a/bdb/lock/Design
+++ b/bdb/lock/Design
@@ -1,7 +1,15 @@
-# $Id: Design,v 11.3 2000/02/19 20:58:03 bostic Exp $
+# $Id: Design,v 11.5 2002/02/01 19:07:18 bostic Exp $
Synchronization in the Locking Subsystem
+This is a document that describes how we implemented fine-grain locking
+in the lock manager (that is, locking on a hash bucket level instead of
+locking the entire region). We found that the increase in concurrency
+was not sufficient to warrant the increase in complexity or the additional
+cost of performing each lock operation. Therefore, we don't use this
+any more. Should we have to do fine-grain locking in a future release,
+this would be a reasonable starting point.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
1. Data structures
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
diff --git a/bdb/lock/lock.c b/bdb/lock/lock.c
index 8d246f7ded3..8eda155b822 100644
--- a/bdb/lock/lock.c
+++ b/bdb/lock/lock.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock.c,v 11.40 2000/12/19 23:18:58 ubell Exp $";
+static const char revid[] = "$Id: lock.c,v 11.108 2002/08/06 06:11:34 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,118 +17,183 @@ static const char revid[] = "$Id: lock.c,v 11.40 2000/12/19 23:18:58 ubell Exp $
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "log.h"
-#include "db_am.h"
-#include "txn.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
static int __lock_checklocker __P((DB_LOCKTAB *,
- struct __db_lock *, u_int32_t, u_int32_t, int *));
-static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t,
- u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
-static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
-static int __lock_put_internal __P((DB_LOCKTAB *,
- struct __db_lock *, u_int32_t, u_int32_t));
-static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, int));
-static void __lock_remove_waiter __P((DB_ENV *,
- DB_LOCKOBJ *, struct __db_lock *, db_status_t));
+ struct __db_lock *, u_int32_t, u_int32_t));
+static void __lock_expires __P((DB_ENV *, db_timeval_t *, db_timeout_t));
+static void __lock_freelocker
+ __P((DB_LOCKTAB *, DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
+static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t,
+ const DBT *, db_lockmode_t, db_timeout_t, DB_LOCK *));
+static int __lock_getobj
+ __P((DB_LOCKTAB *, const DBT *, u_int32_t, int, DB_LOCKOBJ **));
+static int __lock_is_parent __P((DB_LOCKTAB *, u_int32_t, DB_LOCKER *));
+static int __lock_put_internal __P((DB_LOCKTAB *,
+ struct __db_lock *, u_int32_t, u_int32_t));
+static int __lock_put_nolock __P((DB_ENV *, DB_LOCK *, int *, u_int32_t));
+static void __lock_remove_waiter __P((DB_LOCKTAB *,
+ DB_LOCKOBJ *, struct __db_lock *, db_status_t));
+static int __lock_trade __P((DB_ENV *, DB_LOCK *, u_int32_t));
static const char __db_lock_err[] = "Lock table is out of available %s";
static const char __db_lock_invalid[] = "%s: Lock is no longer valid";
static const char __db_locker_invalid[] = "Locker is not valid";
/*
- * lock_id --
+ * __lock_id --
* Generate a unique locker id.
+ *
+ * PUBLIC: int __lock_id __P((DB_ENV *, u_int32_t *));
*/
int
-lock_id(dbenv, idp)
+__lock_id(dbenv, idp)
DB_ENV *dbenv;
u_int32_t *idp;
{
+ DB_LOCKER *lk;
DB_LOCKTAB *lt;
DB_LOCKREGION *region;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_id(dbenv, idp));
-#endif
+ u_int32_t *ids, locker_ndx;
+ int nids, ret;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id", DB_INIT_LOCK);
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
+ ret = 0;
/*
- * Note that we are letting locker IDs wrap.
- *
- * This is potentially dangerous in that it's conceivable that you
- * could be allocating a new locker id and still have someone using
- * it. However, the alternatives are that we keep a bitmap of
- * locker ids or we forbid wrapping. Both are probably bad. The
- * bitmap of locker ids will take up 64 MB of space. Forbidding
- * wrapping means that we'll run out of locker IDs after 2 billion.
- * In order for the wrap bug to fire, we'd need to have something
- * that stayed open while 2 billion locker ids were used up. Since
- * we cache cursors it means that something would have to stay open
- * sufficiently long that we open and close a lot of files and a
- * lot of cursors within them. Betting that this won't happen seems
- * to the lesser of the evils.
+ * Allocate a new lock id. If we wrap around then we
+ * find the minimum currently in use and make sure we
+ * can stay below that. This code is similar to code
+ * in __txn_begin_int for recovering txn ids.
*/
LOCKREGION(dbenv, lt);
- if (region->id >= DB_LOCK_MAXID)
- region->id = 0;
- *idp = ++region->id;
- UNLOCKREGION(dbenv, lt);
+ /*
+ * Our current valid range can span the maximum valid value, so check
+ * for it and wrap manually.
+ */
+ if (region->stat.st_id == DB_LOCK_MAXID &&
+ region->stat.st_cur_maxid != DB_LOCK_MAXID)
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ if (region->stat.st_id == region->stat.st_cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->stat.st_nlockers, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (lk = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lk != NULL;
+ lk = SH_TAILQ_NEXT(lk, ulinks, __db_locker))
+ ids[nids++] = lk->id;
+ region->stat.st_id = DB_LOCK_INVALIDID;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->stat.st_id, &region->stat.st_cur_maxid);
+ __os_free(dbenv, ids);
+ }
+ *idp = ++region->stat.st_id;
- return (0);
+ /* Allocate a locker for this id. */
+ LOCKER_LOCK(lt, region, *idp, locker_ndx);
+ ret = __lock_getlocker(lt, *idp, locker_ndx, 1, &lk);
+
+err: UNLOCKREGION(dbenv, lt);
+
+ return (ret);
}
/*
- * Vector lock routine. This function takes a set of operations
- * and performs them all at once. In addition, lock_vec provides
- * functionality for lock inheritance, releasing all locks for a
- * given locker (used during transaction commit/abort), releasing
- * all locks on a given object, and generating debugging information.
+ * __lock_id_free --
+ * Free a locker id.
+ *
+ * PUBLIC: int __lock_id_free __P((DB_ENV *, u_int32_t));
*/
int
-lock_vec(dbenv, locker, flags, list, nlist, elistp)
+__lock_id_free(dbenv, id)
+ DB_ENV *dbenv;
+ u_int32_t id;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
+ u_int32_t locker_ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_id_free", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ LOCKREGION(dbenv, lt);
+ LOCKER_LOCK(lt, region, id, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, id, locker_ndx, 0, &sh_locker)) != 0)
+ goto err;
+ if (sh_locker == NULL) {
+ ret = EINVAL;
+ goto err;
+ }
+
+ if (sh_locker->nlocks != 0) {
+ __db_err(dbenv, "Locker still has locks");
+ ret = EINVAL;
+ goto err;
+ }
+
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+
+err: UNLOCKREGION(dbenv, lt);
+ return (ret);
+}
+
+/*
+ * __lock_vec --
+ * Vector lock routine. This function takes a set of operations
+ * and performs them all at once. In addition, lock_vec provides
+ * functionality for lock inheritance, releasing all locks for a
+ * given locker (used during transaction commit/abort), releasing
+ * all locks on a given object, and generating debugging information.
+ *
+ * PUBLIC: int __lock_vec __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, DB_LOCKREQ *, int, DB_LOCKREQ **));
+ */
+int
+__lock_vec(dbenv, locker, flags, list, nlist, elistp)
DB_ENV *dbenv;
u_int32_t locker, flags;
int nlist;
DB_LOCKREQ *list, **elistp;
{
struct __db_lock *lp, *next_lock;
+ DB_LOCK lock;
DB_LOCKER *sh_locker, *sh_parent;
DB_LOCKOBJ *obj, *sh_obj;
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
u_int32_t lndx, ndx;
- int did_abort, i, ret, run_dd;
+ int did_abort, i, ret, run_dd, upgrade, writes;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_vec(dbenv, locker,
- flags, list, nlist, elistp));
-#endif
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_vec", DB_INIT_LOCK);
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
/* Validate arguments. */
- if ((ret = __db_fchk(dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0)
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_vec",
+ flags, DB_LOCK_FREE_LOCKER | DB_LOCK_NOWAIT)) != 0)
return (ret);
lt = dbenv->lk_handle;
@@ -138,13 +203,14 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
for (i = 0, ret = 0; i < nlist && ret == 0; i++)
switch (list[i].op) {
+ case DB_LOCK_GET_TIMEOUT:
+ LF_SET(DB_LOCK_SET_TIMEOUT);
case DB_LOCK_GET:
ret = __lock_get_internal(dbenv->lk_handle,
- locker, flags,
- list[i].obj, list[i].mode, &list[i].lock);
+ locker, flags, list[i].obj,
+ list[i].mode, list[i].timeout, &list[i].lock);
break;
case DB_LOCK_INHERIT:
-
/*
* Get the committing locker and mark it as deleted.
* This allows us to traverse the locker links without
@@ -159,7 +225,7 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
sh_locker == NULL ||
F_ISSET(sh_locker, DB_LOCKER_DELETED)) {
if (ret == 0 && sh_locker != NULL)
- ret = EACCES;
+ ret = EINVAL;
__db_err(dbenv, __db_locker_invalid);
break;
}
@@ -182,8 +248,8 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
if (F_ISSET(sh_parent, DB_LOCKER_DELETED)) {
if (ret == 0) {
__db_err(dbenv,
- "Parent locker is not valid");
- ret = EACCES;
+ "Parent locker is not valid");
+ ret = EINVAL;
}
break;
}
@@ -203,15 +269,21 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
LF_ISSET(DB_LOCK_NOWAITERS));
}
+ /* Transfer child counts to parent. */
+ sh_parent->nlocks += sh_locker->nlocks;
+ sh_parent->nwrites += sh_locker->nwrites;
+
/* Now free the original locker. */
ret = __lock_checklocker(lt,
- NULL, locker, DB_LOCK_IGNOREDEL, NULL);
+ NULL, locker, DB_LOCK_IGNOREDEL);
break;
case DB_LOCK_PUT:
- ret =
- __lock_put_nolock(dbenv, &list[i].lock, &run_dd, 0);
+ ret = __lock_put_nolock(dbenv,
+ &list[i].lock, &run_dd, flags);
break;
case DB_LOCK_PUT_ALL:
+ case DB_LOCK_PUT_READ:
+ case DB_LOCK_UPGRADE_WRITE:
/*
* Get the locker and mark it as deleted. This
* allows us to traverse the locker links without
@@ -232,23 +304,79 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
* to do.
*/
break;
+ upgrade = 0;
+ writes = 1;
+ if (list[i].op == DB_LOCK_PUT_READ)
+ writes = 0;
+ else if (list[i].op == DB_LOCK_UPGRADE_WRITE) {
+ if (F_ISSET(sh_locker, DB_LOCKER_DIRTY))
+ upgrade = 1;
+ writes = 0;
+ }
+
F_SET(sh_locker, DB_LOCKER_DELETED);
/* Now traverse the locks, releasing each one. */
for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
- lp != NULL;
- lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) {
- SH_LIST_REMOVE(lp, locker_links, __db_lock);
- sh_obj =
- (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
- SHOBJECT_LOCK(lt, region, sh_obj, lndx);
- ret = __lock_put_internal(lt,
- lp, lndx, DB_LOCK_FREE | DB_LOCK_DOALL);
- if (ret != 0)
- break;
+ lp != NULL;) {
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ if (writes == 1 || lp->mode == DB_LOCK_READ) {
+ SH_LIST_REMOVE(lp,
+ locker_links, __db_lock);
+ sh_obj = (DB_LOCKOBJ *)
+ ((u_int8_t *)lp + lp->obj);
+ SHOBJECT_LOCK(lt, region, sh_obj, lndx);
+ /*
+ * We are not letting lock_put_internal
+ * unlink the lock, so we'll have to
+ * update counts here.
+ */
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites--;
+ ret = __lock_put_internal(lt, lp,
+ lndx, DB_LOCK_FREE | DB_LOCK_DOALL);
+ if (ret != 0)
+ break;
+ lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ } else
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock);
+ }
+ switch (list[i].op) {
+ case DB_LOCK_UPGRADE_WRITE:
+ if (upgrade != 1)
+ goto up_done;
+ for (lp = SH_LIST_FIRST(
+ &sh_locker->heldby, __db_lock);
+ lp != NULL;
+ lp = SH_LIST_NEXT(lp,
+ locker_links, __db_lock)) {
+ if (ret != 0)
+ break;
+ lock.off = R_OFFSET(&lt->reginfo, lp);
+ lock.gen = lp->gen;
+ F_SET(sh_locker, DB_LOCKER_INABORT);
+ ret = __lock_get_internal(lt,
+ locker, DB_LOCK_UPGRADE,
+ NULL, DB_LOCK_WRITE, 0, &lock);
+ }
+ up_done:
+ /* FALL THROUGH */
+ case DB_LOCK_PUT_READ:
+ F_CLR(sh_locker, DB_LOCKER_DELETED);
+ break;
+
+ case DB_LOCK_PUT_ALL:
+ if (ret == 0)
+ ret = __lock_checklocker(lt,
+ NULL, locker, DB_LOCK_IGNOREDEL);
+ break;
+ default:
+ break;
}
- ret = __lock_checklocker(lt,
- NULL, locker, DB_LOCK_IGNOREDEL, NULL);
break;
case DB_LOCK_PUT_OBJ:
/* Remove all the locks associated with an object. */
@@ -269,8 +397,9 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock);
ret == 0 && lp != NULL;
lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock))
- ret = __lock_put_internal(lt,
- lp, ndx, DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
/*
* On the last time around, the object will get
@@ -281,18 +410,43 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
ret == 0 && lp != NULL;
lp = next_lock) {
next_lock = SH_TAILQ_NEXT(lp, links, __db_lock);
- ret = __lock_put_internal(lt,
- lp, ndx, DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
+ ret = __lock_put_internal(lt, lp, ndx,
+ DB_LOCK_UNLINK |
+ DB_LOCK_NOPROMOTE | DB_LOCK_DOALL);
}
break;
+
+ case DB_LOCK_TIMEOUT:
+ ret = __lock_set_timeout(dbenv,
+ locker, 0, DB_SET_TXN_NOW);
+ region->need_dd = 1;
+ break;
+
+ case DB_LOCK_TRADE:
+ /*
+ * INTERNAL USE ONLY.
+ * Change the holder of the lock described in
+ * list[i].lock to the locker-id specified by
+ * the locker parameter.
+ */
+ /*
+ * You had better know what you're doing here.
+ * We are trading locker-id's on a lock to
+ * facilitate file locking on open DB handles.
+ * We do not do any conflict checking on this,
+ * so heaven help you if you use this flag under
+ * any other circumstances.
+ */
+ ret = __lock_trade(dbenv, &list[i].lock, locker);
+ break;
#ifdef DEBUG
case DB_LOCK_DUMP:
/* Find the locker. */
LOCKER_LOCK(lt, region, locker, ndx);
if ((ret = __lock_getlocker(lt,
- locker, ndx, 0, &sh_locker)) != 0
- || sh_locker == NULL
- || F_ISSET(sh_locker, DB_LOCKER_DELETED))
+ locker, ndx, 0, &sh_locker)) != 0 ||
+ sh_locker == NULL ||
+ F_ISSET(sh_locker, DB_LOCKER_DELETED))
break;
for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock);
@@ -309,14 +463,12 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
break;
}
- if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN) {
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
run_dd = 1;
- region->need_dd = 0;
- }
UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
if (run_dd)
- (void)lock_detect(dbenv, 0, region->detect, &did_abort);
+ (void)dbenv->lock_detect(dbenv, 0, region->detect, &did_abort);
if (ret != 0 && elistp != NULL)
*elistp = &list[i - 1];
@@ -327,14 +479,17 @@ lock_vec(dbenv, locker, flags, list, nlist, elistp)
/*
* Lock acquisition routines. There are two library interfaces:
*
- * lock_get --
+ * __lock_get --
* original lock get interface that takes a locker id.
*
* All the work for lock_get (and for the GET option of lock_vec) is done
* inside of lock_get_internal.
+ *
+ * PUBLIC: int __lock_get __P((DB_ENV *,
+ * PUBLIC: u_int32_t, u_int32_t, const DBT *, db_lockmode_t, DB_LOCK *));
*/
int
-lock_get(dbenv, locker, flags, obj, lock_mode, lock)
+__lock_get(dbenv, locker, flags, obj, lock_mode, lock)
DB_ENV *dbenv;
u_int32_t locker, flags;
const DBT *obj;
@@ -343,95 +498,103 @@ lock_get(dbenv, locker, flags, obj, lock_mode, lock)
{
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_get(dbenv, locker,
- flags, obj, lock_mode, lock));
-#endif
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_get", DB_INIT_LOCK);
if (IS_RECOVERING(dbenv)) {
- lock->off = LOCK_INVALID;
+ LOCK_INIT(*lock);
return (0);
}
/* Validate arguments. */
- if ((ret = __db_fchk(dbenv,
- "lock_get", flags,
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_get", flags,
DB_LOCK_NOWAIT | DB_LOCK_UPGRADE | DB_LOCK_SWITCH)) != 0)
return (ret);
LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
ret = __lock_get_internal(dbenv->lk_handle,
- locker, flags, obj, lock_mode, lock);
+ locker, flags, obj, lock_mode, 0, lock);
UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
return (ret);
}
static int
-__lock_get_internal(lt, locker, flags, obj, lock_mode, lock)
+__lock_get_internal(lt, locker, flags, obj, lock_mode, timeout, lock)
DB_LOCKTAB *lt;
u_int32_t locker, flags;
const DBT *obj;
db_lockmode_t lock_mode;
+ db_timeout_t timeout;
DB_LOCK *lock;
{
- struct __db_lock *newl, *lp;
+ struct __db_lock *newl, *lp, *wwrite;
DB_ENV *dbenv;
DB_LOCKER *sh_locker;
DB_LOCKOBJ *sh_obj;
DB_LOCKREGION *region;
- u_int32_t locker_ndx;
- int did_abort, freed, ihold, on_locker_list, no_dd, ret;
+ u_int32_t locker_ndx, obj_ndx;
+ int did_abort, ihold, on_locker_list, no_dd, ret;
- no_dd = ret = 0;
- on_locker_list = 0;
- region = lt->reginfo.primary;
dbenv = lt->dbenv;
+ region = lt->reginfo.primary;
+ on_locker_list = no_dd = ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
/*
- * If we are not going to reuse this lock, initialize
- * the offset to invalid so that if we fail it
- * will not look like a valid lock.
+ * If we are not going to reuse this lock, initialize the offset to
+ * invalid so that if we fail it will not look like a valid lock.
*/
if (!LF_ISSET(DB_LOCK_UPGRADE | DB_LOCK_SWITCH))
- lock->off = LOCK_INVALID;
+ LOCK_INIT(*lock);
- /*
- * Check that the lock mode is valid.
- */
- if ((u_int32_t)lock_mode >= region->nmodes) {
- __db_err(dbenv,
- "lock_get: invalid lock mode %lu\n", (u_long)lock_mode);
+ /* Check that the lock mode is valid. */
+ if ((u_int32_t)lock_mode >= region->stat.st_nmodes) {
+ __db_err(dbenv, "DB_ENV->lock_get: invalid lock mode %lu",
+ (u_long)lock_mode);
return (EINVAL);
}
/* Allocate a new lock. Optimize for the common case of a grant. */
- region->nrequests++;
+ region->stat.st_nrequests++;
if ((newl = SH_TAILQ_FIRST(&region->free_locks, __db_lock)) != NULL)
SH_TAILQ_REMOVE(&region->free_locks, newl, links, __db_lock);
if (newl == NULL) {
__db_err(dbenv, __db_lock_err, "locks");
return (ENOMEM);
}
- if (++region->nlocks > region->maxnlocks)
- region->maxnlocks = region->nlocks;
+ if (++region->stat.st_nlocks > region->stat.st_maxnlocks)
+ region->stat.st_maxnlocks = region->stat.st_nlocks;
- /* Allocate a new object. */
- OBJECT_LOCK(lt, region, obj, lock->ndx);
- if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)
- goto err;
+ if (obj == NULL) {
+ DB_ASSERT(LOCK_ISSET(*lock));
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ sh_obj = (DB_LOCKOBJ *) ((u_int8_t *)lp + lp->obj);
+ } else {
+ /* Allocate a shared memory new object. */
+ OBJECT_LOCK(lt, region, obj, lock->ndx);
+ if ((ret = __lock_getobj(lt, obj, lock->ndx, 1, &sh_obj)) != 0)
+ goto err;
+ }
/* Get the locker, we may need it to find our parent. */
LOCKER_LOCK(lt, region, locker, locker_ndx);
- if ((ret =
- __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker)) != 0) {
+ if ((ret = __lock_getlocker(lt, locker,
+ locker_ndx, locker > DB_LOCK_MAXID ? 1 : 0, &sh_locker)) != 0) {
/*
- * XXX: Margo
- * CLEANUP the object and the lock.
+ * XXX We cannot tell if we created the object or not,
+ * so we don't kow if we should free it or not.
*/
- return (ret);
+ goto err;
+ }
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ ret = EINVAL;
+ goto err;
}
/*
@@ -460,11 +623,11 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lock)
if (LF_ISSET(DB_LOCK_SWITCH))
goto put_lock;
+ wwrite = NULL;
for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock);
lp != NULL;
lp = SH_TAILQ_NEXT(lp, links, __db_lock)) {
- if (locker == lp->holder ||
- __lock_is_parent(lt, lp->holder, sh_locker)) {
+ if (locker == lp->holder) {
if (lp->mode == lock_mode &&
lp->status == DB_LSTAT_HELD) {
if (LF_ISSET(DB_LOCK_UPGRADE))
@@ -473,20 +636,46 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lock)
/*
* Lock is held, so we can increment the
* reference count and return this lock.
+ * We do not count reference increments
+ * towards the locks held by the locker.
*/
lp->refcount++;
lock->off = R_OFFSET(&lt->reginfo, lp);
lock->gen = lp->gen;
+ lock->mode = lp->mode;
ret = 0;
goto done;
- } else
+ } else {
ihold = 1;
- } else if (CONFLICTS(lt, region, lp->mode, lock_mode))
+ if (lock_mode == DB_LOCK_WRITE &&
+ lp->mode == DB_LOCK_WWRITE)
+ wwrite = lp;
+ }
+ } else if (__lock_is_parent(lt, lp->holder, sh_locker))
+ ihold = 1;
+ else if (CONFLICTS(lt, region, lp->mode, lock_mode))
break;
}
/*
+ * If we are looking to upgrade a WWRITE to a WRITE lock
+ * and there were no conflicting locks then we can just
+ * upgrade this lock to the one we want.
+ */
+ if (wwrite != NULL && lp == NULL) {
+ lp = wwrite;
+ lp->mode = lock_mode;
+ lp->refcount++;
+ lock->off = R_OFFSET(&lt->reginfo, lp);
+ lock->gen = lp->gen;
+ lock->mode = lp->mode;
+
+ ret = 0;
+ goto done;
+ }
+
+ /*
* Make the new lock point to the new object, initialize fields.
*
* This lock is not linked in anywhere, so we can muck with it
@@ -542,10 +731,10 @@ put_lock:
SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links);
} else {
ret = DB_LOCK_NOTGRANTED;
- if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL
- && LOCKER_FREEABLE(sh_locker))
- __lock_freelocker( lt, region, sh_locker, locker_ndx);
- region->nnowaits++;
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
+ __lock_freelocker(lt, region, sh_locker, locker_ndx);
+ region->stat.st_nnowaits++;
goto err;
}
@@ -556,9 +745,9 @@ llist:
* detector, save that information.
*/
on_locker_list = 1;
- no_dd = sh_locker->master_locker == INVALID_ROFF
- && SH_LIST_FIRST(&sh_locker->child_locker, __db_locker) == NULL
- && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL;
+ no_dd = sh_locker->master_locker == INVALID_ROFF &&
+ SH_LIST_FIRST(&sh_locker->child_locker, __db_locker) == NULL &&
+ SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL;
SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock);
@@ -573,9 +762,60 @@ llist:
* block.
*/
newl->status = DB_LSTAT_WAITING;
- region->nconflicts++;
- if (region->detect == DB_LOCK_NORUN)
- region->need_dd = 1;
+ region->stat.st_nconflicts++;
+ region->need_dd = 1;
+ /*
+ * First check to see if this txn has expired.
+ * If not then see if the lock timeout is past
+ * the expiration of the txn, if it is, use
+ * the txn expiration time. lk_expire is passed
+ * to avoid an extra call to get the time.
+ */
+ if (__lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)) {
+ newl->status = DB_LSTAT_ABORTED;
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+
+ /*
+ * Remove the lock from the wait queue and if
+ * this was the only lock on the wait queue remove
+ * this object from the deadlock detector object
+ * list.
+ */
+ SH_LIST_REMOVE(newl, locker_links, __db_lock);
+ SH_TAILQ_REMOVE(
+ &sh_obj->waiters, newl, links, __db_lock);
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(&region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
+
+ /* Clear the timeout, we are done. */
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ goto expired;
+ }
+
+ /*
+ * If a timeout was specified in this call then it
+ * takes priority. If a lock timeout has been specified
+ * for this transaction then use that, otherwise use
+ * the global timeout value.
+ */
+ if (!LF_ISSET(DB_LOCK_SET_TIMEOUT)) {
+ if (F_ISSET(sh_locker, DB_LOCKER_TIMEOUT))
+ timeout = sh_locker->lk_timeout;
+ else
+ timeout = region->lk_timeout;
+ }
+ if (timeout != 0)
+ __lock_expires(dbenv, &sh_locker->lk_expire, timeout);
+ else
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
+ if (LOCK_TIME_ISVALID(&sh_locker->tx_expire) &&
+ (timeout == 0 || __lock_expired(dbenv,
+ &sh_locker->lk_expire, &sh_locker->tx_expire)))
+ sh_locker->lk_expire = sh_locker->tx_expire;
UNLOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
/*
@@ -583,22 +823,41 @@ llist:
* detector should be run.
*/
if (region->detect != DB_LOCK_NORUN && !no_dd)
- (void)lock_detect(dbenv, 0, region->detect, &did_abort);
+ (void)dbenv->lock_detect(
+ dbenv, 0, region->detect, &did_abort);
- MUTEX_LOCK(dbenv, &newl->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &newl->mutex);
LOCKREGION(dbenv, (DB_LOCKTAB *)dbenv->lk_handle);
+expired: /* Turn off lock timeout. */
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
+
if (newl->status != DB_LSTAT_PENDING) {
- (void)__lock_checklocker(lt,
- newl, newl->holder, 0, &freed);
+ (void)__lock_checklocker(lt, newl, newl->holder, 0);
switch (newl->status) {
case DB_LSTAT_ABORTED:
on_locker_list = 0;
ret = DB_LOCK_DEADLOCK;
break;
- case DB_LSTAT_NOGRANT:
- ret = DB_LOCK_NOTGRANTED;
+ case DB_LSTAT_NOTEXIST:
+ ret = DB_LOCK_NOTEXIST;
break;
+ case DB_LSTAT_EXPIRED:
+ SHOBJECT_LOCK(lt,
+ region, sh_obj, obj_ndx);
+ if ((ret = __lock_put_internal(
+ lt, newl, obj_ndx, 0) != 0))
+ goto err;
+ if (LOCK_TIME_EQUAL(
+ &sh_locker->lk_expire,
+ &sh_locker->tx_expire)) {
+ region->stat.st_ndeadlocks++;
+ region->stat.st_ntxntimeouts++;
+ return (DB_LOCK_DEADLOCK);
+ } else {
+ region->stat.st_nlocktimeouts++;
+ return (DB_LOCK_NOTGRANTED);
+ }
default:
ret = EINVAL;
break;
@@ -624,6 +883,10 @@ llist:
lock->off = R_OFFSET(&lt->reginfo, newl);
lock->gen = newl->gen;
+ lock->mode = newl->mode;
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(newl->mode))
+ sh_locker->nwrites++;
return (0);
@@ -631,18 +894,21 @@ upgrade:/*
* This was an upgrade, so return the new lock to the free list and
* upgrade the mode of the original lock.
*/
- ((struct __db_lock *)R_ADDR(&lt->reginfo, lock->off))->mode = lock_mode;
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+ if (IS_WRITELOCK(lock_mode) && !IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->mode = lock_mode;
ret = 0;
/* FALLTHROUGH */
done:
err: newl->status = DB_LSTAT_FREE;
+ region->stat.st_nlocks--;
if (on_locker_list) {
SH_LIST_REMOVE(newl, locker_links, __db_lock);
}
SH_TAILQ_INSERT_HEAD(&region->free_locks, newl, links, __db_lock);
- region->nlocks--;
return (ret);
}
@@ -651,21 +917,20 @@ err: newl->status = DB_LSTAT_FREE;
*
* The user callable one is lock_put and the three we use internally are
* __lock_put_nolock, __lock_put_internal and __lock_downgrade.
+ *
+ * PUBLIC: int __lock_put __P((DB_ENV *, DB_LOCK *));
*/
int
-lock_put(dbenv, lock)
+__lock_put(dbenv, lock)
DB_ENV *dbenv;
DB_LOCK *lock;
{
DB_LOCKTAB *lt;
int ret, run_dd;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_put(dbenv, lock));
-#endif
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_LOCK->lock_put", DB_INIT_LOCK);
if (IS_RECOVERING(dbenv))
return (0);
@@ -676,8 +941,14 @@ lock_put(dbenv, lock)
ret = __lock_put_nolock(dbenv, lock, &run_dd, 0);
UNLOCKREGION(dbenv, lt);
+ /*
+ * Only run the lock detector if put told us to AND we are running
+ * in auto-detect mode. If we are not running in auto-detect, then
+ * a call to lock_detect here will 0 the need_dd bit, but will not
+ * actually abort anything.
+ */
if (ret == 0 && run_dd)
- (void)lock_detect(dbenv, 0,
+ (void)dbenv->lock_detect(dbenv, 0,
((DB_LOCKREGION *)lt->reginfo.primary)->detect, NULL);
return (ret);
}
@@ -687,41 +958,43 @@ __lock_put_nolock(dbenv, lock, runp, flags)
DB_ENV *dbenv;
DB_LOCK *lock;
int *runp;
- int flags;
+ u_int32_t flags;
{
struct __db_lock *lockp;
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
- u_int32_t locker;
int ret;
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
+
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
- lock->off = LOCK_INVALID;
+ LOCK_INIT(*lock);
if (lock->gen != lockp->gen) {
- __db_err(dbenv, __db_lock_invalid, "lock_put");
- return (EACCES);
+ __db_err(dbenv, __db_lock_invalid, "DB_LOCK->lock_put");
+ return (EINVAL);
}
- locker = lockp->holder;
ret = __lock_put_internal(lt,
lockp, lock->ndx, flags | DB_LOCK_UNLINK | DB_LOCK_FREE);
*runp = 0;
- if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN) {
+ if (ret == 0 && region->need_dd && region->detect != DB_LOCK_NORUN)
*runp = 1;
- region->need_dd = 0;
- }
return (ret);
}
/*
* __lock_downgrade --
- * Used by the concurrent access product to downgrade write locks
- * back to iwrite locks.
+ * Used to downgrade locks. Currently this is used in two places,
+ * 1) by the concurrent access product to downgrade write locks
+ * back to iwrite locks and 2) to downgrade write-handle locks to read-handle
+ * locks at the end of an open/create.
*
* PUBLIC: int __lock_downgrade __P((DB_ENV *,
* PUBLIC: DB_LOCK *, db_lockmode_t, u_int32_t));
@@ -734,14 +1007,21 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
u_int32_t flags;
{
struct __db_lock *lockp;
+ DB_LOCKER *sh_locker;
DB_LOCKOBJ *obj;
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
+ u_int32_t indx;
int ret;
COMPQUIET(flags, 0);
PANIC_CHECK(dbenv);
+ ret = 0;
+
+ /* Check if locks have been globally turned off. */
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
+ return (0);
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
@@ -751,9 +1031,24 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
lockp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
if (lock->gen != lockp->gen) {
__db_err(dbenv, __db_lock_invalid, "lock_downgrade");
- ret = EACCES;
+ ret = EINVAL;
+ goto out;
+ }
+
+ LOCKER_LOCK(lt, region, lockp->holder, indx);
+
+ if ((ret = __lock_getlocker(lt, lockp->holder,
+ indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
+ if (ret == 0)
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
goto out;
}
+ if (IS_WRITELOCK(lockp->mode) && !IS_WRITELOCK(new_mode))
+ sh_locker->nwrites--;
+
+ if (new_mode == DB_LOCK_WWRITE)
+ F_SET(sh_locker, DB_LOCKER_DIRTY);
lockp->mode = new_mode;
@@ -761,25 +1056,23 @@ __lock_downgrade(dbenv, lock, new_mode, flags)
obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
(void)__lock_promote(lt, obj, LF_ISSET(DB_LOCK_NOWAITERS));
- ++region->nreleases;
out: UNLOCKREGION(dbenv, lt);
- return (0);
+ return (ret);
}
static int
__lock_put_internal(lt, lockp, obj_ndx, flags)
DB_LOCKTAB *lt;
struct __db_lock *lockp;
- u_int32_t obj_ndx;
- u_int32_t flags;
+ u_int32_t obj_ndx, flags;
{
DB_LOCKOBJ *sh_obj;
DB_LOCKREGION *region;
- int no_reclaim, ret, state_changed;
+ int ret, state_changed;
region = lt->reginfo.primary;
- no_reclaim = ret = state_changed = 0;
+ ret = state_changed = 0;
if (!OBJ_LINKS_VALID(lockp)) {
/*
@@ -791,14 +1084,14 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
lockp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(
&region->free_locks, lockp, links, __db_lock);
- region->nlocks--;
+ region->stat.st_nlocks--;
return (0);
}
if (LF_ISSET(DB_LOCK_DOALL))
- region->nreleases += lockp->refcount;
+ region->stat.st_nreleases += lockp->refcount;
else
- region->nreleases++;
+ region->stat.st_nreleases++;
if (!LF_ISSET(DB_LOCK_DOALL) && lockp->refcount > 1) {
lockp->refcount--;
@@ -812,8 +1105,8 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj);
/* Remove this lock from its holders/waitlist. */
- if (lockp->status != DB_LSTAT_HELD)
- __lock_remove_waiter(lt->dbenv, sh_obj, lockp, DB_LSTAT_FREE);
+ if (lockp->status != DB_LSTAT_HELD && lockp->status != DB_LSTAT_PENDING)
+ __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE);
else {
SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock);
lockp->links.stqe_prev = -1;
@@ -822,15 +1115,15 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
if (LF_ISSET(DB_LOCK_NOPROMOTE))
state_changed = 0;
else
- state_changed =
- __lock_promote(lt, sh_obj, LF_ISSET(DB_LOCK_NOWAITERS));
+ state_changed = __lock_promote(lt,
+ sh_obj, LF_ISSET(DB_LOCK_REMOVE | DB_LOCK_NOWAITERS));
if (LF_ISSET(DB_LOCK_UNLINK))
- ret = __lock_checklocker(lt, lockp, lockp->holder, flags, NULL);
+ ret = __lock_checklocker(lt, lockp, lockp->holder, flags);
/* Check if object should be reclaimed. */
- if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL
- && SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
+ if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL &&
+ SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL) {
HASHREMOVE_EL(lt->obj_tab,
obj_ndx, __db_lockobj, links, sh_obj);
if (sh_obj->lockobj.size > sizeof(sh_obj->objdata))
@@ -838,7 +1131,7 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
SH_DBT_PTR(&sh_obj->lockobj));
SH_TAILQ_INSERT_HEAD(
&region->free_objs, sh_obj, links, __db_lockobj);
- region->nobjects--;
+ region->stat.st_nobjects--;
state_changed = 1;
}
@@ -847,7 +1140,7 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
lockp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(
&region->free_locks, lockp, links, __db_lock);
- region->nlocks--;
+ region->stat.st_nlocks--;
}
/*
@@ -872,11 +1165,10 @@ __lock_put_internal(lt, lockp, obj_ndx, flags)
* Must be called without the locker's lock set.
*/
static int
-__lock_checklocker(lt, lockp, locker, flags, freed)
+__lock_checklocker(lt, lockp, locker, flags)
DB_LOCKTAB *lt;
struct __db_lock *lockp;
u_int32_t locker, flags;
- int *freed;
{
DB_ENV *dbenv;
DB_LOCKER *sh_locker;
@@ -888,17 +1180,14 @@ __lock_checklocker(lt, lockp, locker, flags, freed)
region = lt->reginfo.primary;
ret = 0;
- if (freed != NULL)
- *freed = 0;
-
LOCKER_LOCK(lt, region, locker, indx);
/* If the locker's list is NULL, free up the locker. */
if ((ret = __lock_getlocker(lt,
locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
if (ret == 0)
- ret = EACCES;
- __db_err(lt->dbenv, __db_locker_invalid);
+ ret = EINVAL;
+ __db_err(dbenv, __db_locker_invalid);
goto freelock;
}
@@ -908,22 +1197,25 @@ __lock_checklocker(lt, lockp, locker, flags, freed)
goto freelock;
}
- if (LF_ISSET(DB_LOCK_UNLINK))
+ if (LF_ISSET(DB_LOCK_UNLINK)) {
SH_LIST_REMOVE(lockp, locker_links, __db_lock);
+ if (lockp->status == DB_LSTAT_HELD) {
+ sh_locker->nlocks--;
+ if (IS_WRITELOCK(lockp->mode))
+ sh_locker->nwrites--;
+ }
+ }
- if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL
- && LOCKER_FREEABLE(sh_locker)) {
+ if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL &&
+ LF_ISSET(DB_LOCK_FREE_LOCKER))
__lock_freelocker( lt, region, sh_locker, indx);
- if (freed != NULL)
- *freed = 1;
- }
freelock:
if (LF_ISSET(DB_LOCK_FREE)) {
lockp->status = DB_LSTAT_FREE;
SH_TAILQ_INSERT_HEAD(
&region->free_locks, lockp, links, __db_lock);
- region->nlocks--;
+ region->stat.st_nlocks--;
}
return (ret);
@@ -1019,11 +1311,9 @@ __lock_freefamilylocker(lt, locker)
LOCKER_LOCK(lt, region, locker, indx);
if ((ret = __lock_getlocker(lt,
- locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL) {
- if (ret == 0)
- ret = EACCES;
+ locker, indx, 0, &sh_locker)) != 0 || sh_locker == NULL)
goto freelock;
- }
+
if (SH_LIST_FIRST(&sh_locker->heldby, __db_lock) != NULL) {
ret = EINVAL;
__db_err(dbenv, "Freeing locker with locks");
@@ -1046,11 +1336,8 @@ freelock:
* common code for deleting a locker.
*
* This must be called with the locker bucket locked.
- *
- * PUBLIC: void __lock_freelocker __P((DB_LOCKTAB *,
- * PUBLIC: DB_LOCKREGION *, DB_LOCKER *, u_int32_t));
*/
-void
+static void
__lock_freelocker(lt, region, sh_locker, indx)
DB_LOCKTAB *lt;
DB_LOCKREGION *region;
@@ -1062,7 +1349,123 @@ __lock_freelocker(lt, region, sh_locker, indx)
lt->locker_tab, indx, __db_locker, links, sh_locker);
SH_TAILQ_INSERT_HEAD(
&region->free_lockers, sh_locker, links, __db_locker);
- region->nlockers--;
+ SH_TAILQ_REMOVE(&region->lockers, sh_locker, ulinks, __db_locker);
+ region->stat.st_nlockers--;
+}
+
+/*
+ * __lock_set_timeout
+ * -- set timeout values in shared memory.
+ * This is called from the transaction system.
+ * We either set the time that this tranaction expires or the
+ * amount of time that a lock for this transaction is permitted
+ * to wait.
+ *
+ * PUBLIC: int __lock_set_timeout __P(( DB_ENV *,
+ * PUBLIC: u_int32_t, db_timeout_t, u_int32_t));
+ */
+int
+__lock_set_timeout(dbenv, locker, timeout, op)
+ DB_ENV *dbenv;
+ u_int32_t locker;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ DB_LOCKER *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ LOCKREGION(dbenv, lt);
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ ret = __lock_getlocker(lt, locker, locker_ndx, 1, &sh_locker);
+ UNLOCKREGION(dbenv, lt);
+ if (ret != 0)
+ return (ret);
+
+ if (op == DB_SET_TXN_TIMEOUT) {
+ if (timeout == 0)
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ else
+ __lock_expires(dbenv, &sh_locker->tx_expire, timeout);
+ } else if (op == DB_SET_LOCK_TIMEOUT) {
+ sh_locker->lk_timeout = timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ } else if (op == DB_SET_TXN_NOW) {
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ __lock_expires(dbenv, &sh_locker->tx_expire, 0);
+ sh_locker->lk_expire = sh_locker->tx_expire;
+ } else
+ return (EINVAL);
+
+ return (0);
+}
+
+/*
+ * __lock_inherit_timeout
+ * -- inherit timeout values from parent locker.
+ * This is called from the transaction system. This will
+ * return EINVAL if the parent does not exist or did not
+ * have a current txn timeout set.
+ *
+ * PUBLIC: int __lock_inherit_timeout __P(( DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_inherit_timeout(dbenv, parent, locker)
+ DB_ENV *dbenv;
+ u_int32_t parent, locker;
+{
+ DB_LOCKER *parent_locker, *sh_locker;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ u_int32_t locker_ndx;
+ int ret;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ ret = 0;
+ LOCKREGION(dbenv, lt);
+
+ /* If the parent does not exist, we are done. */
+ LOCKER_LOCK(lt, region, parent, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ parent, locker_ndx, 0, &parent_locker)) != 0)
+ goto err;
+
+ /*
+ * If the parent is not there yet, thats ok. If it
+ * does not have any timouts set, then avoid creating
+ * the child locker at this point.
+ */
+ if (parent_locker == NULL ||
+ (LOCK_TIME_ISVALID(&parent_locker->tx_expire) &&
+ !F_ISSET(parent_locker, DB_LOCKER_TIMEOUT))) {
+ ret = EINVAL;
+ goto done;
+ }
+
+ LOCKER_LOCK(lt, region, locker, locker_ndx);
+ if ((ret = __lock_getlocker(lt,
+ locker, locker_ndx, 1, &sh_locker)) != 0)
+ goto err;
+
+ sh_locker->tx_expire = parent_locker->tx_expire;
+
+ if (F_ISSET(parent_locker, DB_LOCKER_TIMEOUT)) {
+ sh_locker->lk_timeout = parent_locker->lk_timeout;
+ F_SET(sh_locker, DB_LOCKER_TIMEOUT);
+ if (!LOCK_TIME_ISVALID(&parent_locker->tx_expire))
+ ret = EINVAL;
+ }
+
+done:
+err:
+ UNLOCKREGION(dbenv, lt);
+ return (ret);
}
/*
@@ -1101,13 +1504,13 @@ __lock_getlocker(lt, locker, indx, create, retp)
/* Create new locker and then insert it into hash table. */
if ((sh_locker = SH_TAILQ_FIRST(
&region->free_lockers, __db_locker)) == NULL) {
- __db_err(lt->dbenv, __db_lock_err, "locker entries");
+ __db_err(dbenv, __db_lock_err, "locker entries");
return (ENOMEM);
}
SH_TAILQ_REMOVE(
&region->free_lockers, sh_locker, links, __db_locker);
- if (++region->nlockers > region->maxnlockers)
- region->maxnlockers = region->nlockers;
+ if (++region->stat.st_nlockers > region->stat.st_maxnlockers)
+ region->stat.st_maxnlockers = region->stat.st_nlockers;
sh_locker->id = locker;
sh_locker->dd_id = 0;
@@ -1116,8 +1519,18 @@ __lock_getlocker(lt, locker, indx, create, retp)
SH_LIST_INIT(&sh_locker->child_locker);
sh_locker->flags = 0;
SH_LIST_INIT(&sh_locker->heldby);
+ sh_locker->nlocks = 0;
+ sh_locker->nwrites = 0;
+ sh_locker->lk_timeout = 0;
+ LOCK_SET_TIME_INVALID(&sh_locker->tx_expire);
+ if (locker < TXN_MINIMUM && region->tx_timeout != 0)
+ __lock_expires(dbenv,
+ &sh_locker->tx_expire, region->tx_timeout);
+ LOCK_SET_TIME_INVALID(&sh_locker->lk_expire);
HASHINSERT(lt->locker_tab, indx, __db_locker, links, sh_locker);
+ SH_TAILQ_INSERT_HEAD(&region->lockers,
+ sh_locker, ulinks, __db_locker);
}
*retp = sh_locker;
@@ -1131,11 +1544,8 @@ __lock_getlocker(lt, locker, indx, create, retp)
* the table.
*
* This must be called with the object bucket locked.
- *
- * PUBLIC: int __lock_getobj __P((DB_LOCKTAB *,
- * PUBLIC: const DBT *, u_int32_t, int, DB_LOCKOBJ **));
*/
-int
+static int
__lock_getobj(lt, obj, ndx, create, retp)
DB_LOCKTAB *lt;
const DBT *obj;
@@ -1185,8 +1595,8 @@ __lock_getobj(lt, obj, ndx, create, retp)
SH_TAILQ_REMOVE(
&region->free_objs, sh_obj, links, __db_lockobj);
- if (++region->nobjects > region->maxnobjects)
- region->maxnobjects = region->nobjects;
+ if (++region->stat.st_nobjects > region->stat.st_maxnobjects)
+ region->stat.st_maxnobjects = region->stat.st_nobjects;
SH_TAILQ_INIT(&sh_obj->waiters);
SH_TAILQ_INIT(&sh_obj->holders);
@@ -1220,7 +1630,7 @@ __lock_is_parent(lt, locker, sh_locker)
parent = sh_locker;
while (parent->parent_locker != INVALID_ROFF) {
parent = (DB_LOCKER *)
- R_ADDR(&lt->reginfo, parent->parent_locker);
+ R_ADDR(&lt->reginfo, parent->parent_locker);
if (parent->id == locker)
return (1);
}
@@ -1234,13 +1644,13 @@ __lock_is_parent(lt, locker, sh_locker)
* Look through the waiters and holders lists and decide which (if any)
* locks can be promoted. Promote any that are eligible.
*
- * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, int));
+ * PUBLIC: int __lock_promote __P((DB_LOCKTAB *, DB_LOCKOBJ *, u_int32_t));
*/
int
-__lock_promote(lt, obj, not_waiters)
+__lock_promote(lt, obj, flags)
DB_LOCKTAB *lt;
DB_LOCKOBJ *obj;
- int not_waiters;
+ u_int32_t flags;
{
struct __db_lock *lp_w, *lp_h, *next_waiter;
DB_LOCKER *sh_locker;
@@ -1270,16 +1680,25 @@ __lock_promote(lt, obj, not_waiters)
lp_w = next_waiter) {
had_waiters = 1;
next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock);
+
+ /* Waiter may have aborted or expired. */
+ if (lp_w->status != DB_LSTAT_WAITING)
+ continue;
/* Are we switching locks? */
- if (not_waiters && lp_w->mode == DB_LOCK_WAIT)
+ if (LF_ISSET(DB_LOCK_NOWAITERS) && lp_w->mode == DB_LOCK_WAIT)
continue;
+
+ if (LF_ISSET(DB_LOCK_REMOVE)) {
+ __lock_remove_waiter(lt, obj, lp_w, DB_LSTAT_NOTEXIST);
+ continue;
+ }
for (lp_h = SH_TAILQ_FIRST(&obj->holders, __db_lock);
lp_h != NULL;
lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) {
if (lp_h->holder != lp_w->holder &&
CONFLICTS(lt, region, lp_h->mode, lp_w->mode)) {
-
- LOCKER_LOCK(lt, region, lp_w->holder, locker_ndx);
+ LOCKER_LOCK(lt,
+ region, lp_w->holder, locker_ndx);
if ((__lock_getlocker(lt, lp_w->holder,
locker_ndx, 0, &sh_locker)) != 0) {
DB_ASSERT(0);
@@ -1323,19 +1742,26 @@ __lock_promote(lt, obj, not_waiters)
* This must be called with the Object bucket locked.
*/
static void
-__lock_remove_waiter(dbenv, sh_obj, lockp, status)
- DB_ENV *dbenv;
+__lock_remove_waiter(lt, sh_obj, lockp, status)
+ DB_LOCKTAB *lt;
DB_LOCKOBJ *sh_obj;
struct __db_lock *lockp;
db_status_t status;
{
+ DB_LOCKREGION *region;
int do_wakeup;
+ region = lt->reginfo.primary;
+
do_wakeup = lockp->status == DB_LSTAT_WAITING;
SH_TAILQ_REMOVE(&sh_obj->waiters, lockp, links, __db_lock);
lockp->links.stqe_prev = -1;
lockp->status = status;
+ if (SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock) == NULL)
+ SH_TAILQ_REMOVE(
+ &region->dd_objs,
+ sh_obj, dd_links, __db_lockobj);
/*
* Wake whoever is waiting on this lock.
@@ -1344,96 +1770,105 @@ __lock_remove_waiter(dbenv, sh_obj, lockp, status)
* keep the compiler quiet.
*/
if (do_wakeup)
- MUTEX_UNLOCK(dbenv, &lockp->mutex);
+ MUTEX_UNLOCK(lt->dbenv, &lockp->mutex);
}
/*
- * __lock_printlock --
- *
- * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+ * __lock_expires -- set the expire time given the time to live.
+ * We assume that if timevalp is set then it contains "now".
+ * This avoids repeated system calls to get the time.
*/
-void
-__lock_printlock(lt, lp, ispgno)
- DB_LOCKTAB *lt;
- struct __db_lock *lp;
- int ispgno;
+static void
+__lock_expires(dbenv, timevalp, timeout)
+ DB_ENV *dbenv;
+ db_timeval_t *timevalp;
+ db_timeout_t timeout;
{
- DB_LOCKOBJ *lockobj;
- db_pgno_t pgno;
- u_int32_t *fidp;
- u_int8_t *ptr, type;
- const char *mode, *status;
-
- switch (lp->mode) {
- case DB_LOCK_IREAD:
- mode = "IREAD";
- break;
- case DB_LOCK_IWR:
- mode = "IWR";
- break;
- case DB_LOCK_IWRITE:
- mode = "IWRITE";
- break;
- case DB_LOCK_NG:
- mode = "NG";
- break;
- case DB_LOCK_READ:
- mode = "READ";
- break;
- case DB_LOCK_WRITE:
- mode = "WRITE";
- break;
- case DB_LOCK_WAIT:
- mode = "WAIT";
- break;
- default:
- mode = "UNKNOWN";
- break;
- }
- switch (lp->status) {
- case DB_LSTAT_ABORTED:
- status = "ABORT";
- break;
- case DB_LSTAT_ERR:
- status = "ERROR";
- break;
- case DB_LSTAT_FREE:
- status = "FREE";
- break;
- case DB_LSTAT_HELD:
- status = "HELD";
- break;
- case DB_LSTAT_NOGRANT:
- status = "NONE";
- break;
- case DB_LSTAT_WAITING:
- status = "WAIT";
- break;
- case DB_LSTAT_PENDING:
- status = "PENDING";
- break;
- default:
- status = "UNKNOWN";
- break;
+ if (!LOCK_TIME_ISVALID(timevalp))
+ __os_clock(dbenv, &timevalp->tv_sec, &timevalp->tv_usec);
+ if (timeout > 1000000) {
+ timevalp->tv_sec += timeout / 1000000;
+ timevalp->tv_usec += timeout % 1000000;
+ } else
+ timevalp->tv_usec += timeout;
+
+ if (timevalp->tv_usec > 1000000) {
+ timevalp->tv_sec++;
+ timevalp->tv_usec -= 1000000;
}
- printf("\t%lx\t%s\t%lu\t%s\t",
- (u_long)lp->holder, mode, (u_long)lp->refcount, status);
-
- lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
- ptr = SH_DBT_PTR(&lockobj->lockobj);
- if (ispgno && lockobj->lockobj.size == sizeof(struct __db_ilock)) {
- /* Assume this is a DBT lock. */
- memcpy(&pgno, ptr, sizeof(db_pgno_t));
- fidp = (u_int32_t *)(ptr + sizeof(db_pgno_t));
- type = *(u_int8_t *)(ptr + sizeof(db_pgno_t) + DB_FILE_ID_LEN);
- printf("%s %lu (%lu %lu %lu %lu %lu)\n",
- type == DB_PAGE_LOCK ? "page" : "record",
- (u_long)pgno,
- (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2],
- (u_long)fidp[3], (u_long)fidp[4]);
- } else {
- printf("0x%lx ", (u_long)R_OFFSET(&lt->reginfo, lockobj));
- __db_pr(ptr, lockobj->lockobj.size);
- printf("\n");
+}
+
+/*
+ * __lock_expired -- determine if a lock has expired.
+ *
+ * PUBLIC: int __lock_expired __P((DB_ENV *, db_timeval_t *, db_timeval_t *));
+ */
+int
+__lock_expired(dbenv, now, timevalp)
+ DB_ENV *dbenv;
+ db_timeval_t *now, *timevalp;
+{
+ if (!LOCK_TIME_ISVALID(timevalp))
+ return (0);
+
+ if (!LOCK_TIME_ISVALID(now))
+ __os_clock(dbenv, &now->tv_sec, &now->tv_usec);
+
+ return (now->tv_sec > timevalp->tv_sec ||
+ (now->tv_sec == timevalp->tv_sec &&
+ now->tv_usec >= timevalp->tv_usec));
+}
+
+/*
+ * __lock_trade --
+ *
+ * Trade locker ids on a lock. This is used to reassign file locks from
+ * a transactional locker id to a long-lived locker id. This should be
+ * called with the region mutex held.
+ */
+static int
+__lock_trade(dbenv, lock, new_locker)
+ DB_ENV *dbenv;
+ DB_LOCK *lock;
+ u_int32_t new_locker;
+{
+ struct __db_lock *lp;
+ DB_LOCKREGION *region;
+ DB_LOCKTAB *lt;
+ DB_LOCKER *sh_locker;
+ int ret;
+ u_int32_t locker_ndx;
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+
+ lp = (struct __db_lock *)R_ADDR(&lt->reginfo, lock->off);
+
+ /* If the lock is already released, simply return. */
+ if (lp->gen != lock->gen)
+ return (DB_NOTFOUND);
+
+ /* Make sure that we can get new locker and add this lock to it. */
+ LOCKER_LOCK(lt, region, new_locker, locker_ndx);
+ if ((ret =
+ __lock_getlocker(lt, new_locker, locker_ndx, 0, &sh_locker)) != 0)
+ return (ret);
+
+ if (sh_locker == NULL) {
+ __db_err(dbenv, "Locker does not exist");
+ return (EINVAL);
}
+
+ /* Remove the lock from its current locker. */
+ if ((ret = __lock_checklocker(lt, lp, lp->holder, DB_LOCK_UNLINK)) != 0)
+ return (ret);
+
+ /* Add lock to its new locker. */
+ SH_LIST_INSERT_HEAD(&sh_locker->heldby, lp, locker_links, __db_lock);
+ sh_locker->nlocks++;
+ if (IS_WRITELOCK(lp->mode))
+ sh_locker->nwrites++;
+ lp->holder = new_locker;
+
+ return (0);
}
diff --git a/bdb/lock/lock_conflict.c b/bdb/lock/lock_conflict.c
deleted file mode 100644
index 2d7945fe201..00000000000
--- a/bdb/lock/lock_conflict.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: lock_conflict.c,v 11.6 2000/12/12 17:38:13 bostic Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-#endif
-
-#include "db_int.h"
-
-/*
- * The conflict arrays are set up such that the row is the lock you
- * are holding and the column is the lock that is desired.
- */
-
-const u_int8_t db_riw_conflicts[] = {
- /* N S X WT IX IS SIX */
- /* N */ 0, 0, 0, 0, 0, 0, 0,
- /* S */ 0, 0, 1, 0, 1, 0, 1,
- /* X */ 0, 1, 1, 1, 1, 1, 1,
- /* WT */ 0, 0, 0, 0, 0, 0, 0,
- /* IX */ 0, 1, 1, 0, 0, 0, 0,
- /* IS */ 0, 0, 1, 0, 0, 0, 0,
- /* SIX */ 0, 1, 1, 0, 0, 0, 0
-};
diff --git a/bdb/lock/lock_deadlock.c b/bdb/lock/lock_deadlock.c
index 1f37db3890e..d1461b89a4f 100644
--- a/bdb/lock/lock_deadlock.c
+++ b/bdb/lock/lock_deadlock.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock_deadlock.c,v 11.23 2000/12/08 20:15:31 ubell Exp $";
+static const char revid[] = "$Id: lock_deadlock.c,v 11.54 2002/08/06 05:05:21 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,19 +17,11 @@ static const char revid[] = "$Id: lock_deadlock.c,v 11.23 2000/12/08 20:15:31 ub
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "txn.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+#include "dbinc/rep.h"
#define ISSET_MAP(M, N) ((M)[(N) / 32] & (1 << (N) % 32))
@@ -51,6 +43,8 @@ static const char revid[] = "$Id: lock_deadlock.c,v 11.23 2000/12/08 20:15:31 ub
typedef struct {
int valid;
+ int self_wait;
+ u_int32_t count;
u_int32_t id;
u_int32_t last_lock;
u_int32_t last_locker_id;
@@ -58,152 +52,232 @@ typedef struct {
} locker_info;
static int __dd_abort __P((DB_ENV *, locker_info *));
-static int __dd_build
- __P((DB_ENV *, u_int32_t **, u_int32_t *, locker_info **));
-static int __dd_find
- __P((DB_ENV *,u_int32_t *, locker_info *, u_int32_t, u_int32_t ***));
+static int __dd_build __P((DB_ENV *,
+ u_int32_t, u_int32_t **, u_int32_t *, u_int32_t *, locker_info **));
+static int __dd_find __P((DB_ENV *,
+ u_int32_t *, locker_info *, u_int32_t, u_int32_t, u_int32_t ***));
+static int __dd_isolder __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
+static int __dd_verify __P((locker_info *, u_int32_t *, u_int32_t *,
+ u_int32_t *, u_int32_t, u_int32_t, u_int32_t));
#ifdef DIAGNOSTIC
-static void __dd_debug __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t));
+static void __dd_debug
+ __P((DB_ENV *, locker_info *, u_int32_t *, u_int32_t, u_int32_t));
#endif
+/*
+ * lock_detect --
+ *
+ * PUBLIC: int __lock_detect __P((DB_ENV *, u_int32_t, u_int32_t, int *));
+ */
int
-lock_detect(dbenv, flags, atype, abortp)
+__lock_detect(dbenv, flags, atype, abortp)
DB_ENV *dbenv;
u_int32_t flags, atype;
int *abortp;
{
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
+ DB_TXNMGR *tmgr;
locker_info *idmap;
- u_int32_t *bitmap, **deadp, **free_me, i, killid, nentries, nlockers;
- int do_pass, ret;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_detect(dbenv, flags, atype, abortp));
-#endif
+ u_int32_t *bitmap, *copymap, **deadp, **free_me, *tmpmap;
+ u_int32_t i, keeper, killid, limit, nalloc, nlockers;
+ u_int32_t lock_max, txn_max;
+ int ret;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_detect", DB_INIT_LOCK);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->lock_detect", flags, 0)) != 0)
+ return (ret);
+ switch (atype) {
+ case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ case DB_LOCK_OLDEST:
+ case DB_LOCK_RANDOM:
+ case DB_LOCK_YOUNGEST:
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->lock_detect: unknown deadlock detection mode specified");
+ return (EINVAL);
+ }
+
+ /*
+ * If this environment is a replication client, then we must use the
+ * MINWRITE detection discipline.
+ */
+ if (__rep_is_client(dbenv))
+ atype = DB_LOCK_MINWRITE;
+
+ free_me = NULL;
lt = dbenv->lk_handle;
if (abortp != NULL)
*abortp = 0;
- /* Validate arguments. */
- if ((ret =
- __db_fchk(dbenv, "lock_detect", flags, DB_LOCK_CONFLICT)) != 0)
- return (ret);
-
/* Check if a detector run is necessary. */
LOCKREGION(dbenv, lt);
- if (LF_ISSET(DB_LOCK_CONFLICT)) {
- /* Make a pass every time a lock waits. */
- region = lt->reginfo.primary;
- do_pass = region->need_dd != 0;
- if (!do_pass) {
- UNLOCKREGION(dbenv, lt);
- return (0);
- }
+ /* Make a pass only if auto-detect would run. */
+ region = lt->reginfo.primary;
+
+ if (region->need_dd == 0) {
+ UNLOCKREGION(dbenv, lt);
+ return (0);
}
+ /* Reset need_dd, so we know we've run the detector. */
+ region->need_dd = 0;
+
/* Build the waits-for bitmap. */
- ret = __dd_build(dbenv, &bitmap, &nlockers, &idmap);
+ ret = __dd_build(dbenv, atype, &bitmap, &nlockers, &nalloc, &idmap);
+ lock_max = region->stat.st_cur_maxid;
UNLOCKREGION(dbenv, lt);
- if (ret != 0)
+
+ /*
+ * We need the cur_maxid from the txn region as well. In order
+ * to avoid tricky synchronization between the lock and txn
+ * regions, we simply unlock the lock region and then lock the
+ * txn region. This introduces a small window during which the
+ * transaction system could then wrap. We're willing to return
+ * the wrong answer for "oldest" or "youngest" in those rare
+ * circumstances.
+ */
+ tmgr = dbenv->tx_handle;
+ if (tmgr != NULL) {
+ R_LOCK(dbenv, &tmgr->reginfo);
+ txn_max = ((DB_TXNREGION *)tmgr->reginfo.primary)->cur_maxid;
+ R_UNLOCK(dbenv, &tmgr->reginfo);
+ } else
+ txn_max = TXN_MAXIMUM;
+ if (ret != 0 || atype == DB_LOCK_EXPIRE)
return (ret);
if (nlockers == 0)
return (0);
#ifdef DIAGNOSTIC
if (FLD_ISSET(dbenv->verbose, DB_VERB_WAITSFOR))
- __dd_debug(dbenv, idmap, bitmap, nlockers);
+ __dd_debug(dbenv, idmap, bitmap, nlockers, nalloc);
#endif
+ /* Now duplicate the bitmaps so we can verify deadlock participants. */
+ if ((ret = __os_calloc(dbenv, (size_t)nlockers,
+ sizeof(u_int32_t) * nalloc, &copymap)) != 0)
+ goto err;
+ memcpy(copymap, bitmap, nlockers * sizeof(u_int32_t) * nalloc);
+
+ if ((ret = __os_calloc(dbenv, sizeof(u_int32_t), nalloc, &tmpmap)) != 0)
+ goto err1;
+
/* Find a deadlock. */
- if ((ret = __dd_find(dbenv, bitmap, idmap, nlockers, &deadp)) != 0)
+ if ((ret =
+ __dd_find(dbenv, bitmap, idmap, nlockers, nalloc, &deadp)) != 0)
return (ret);
- nentries = ALIGN(nlockers, 32) / 32;
killid = BAD_KILLID;
free_me = deadp;
for (; *deadp != NULL; deadp++) {
if (abortp != NULL)
++*abortp;
- switch (atype) { /* Kill someone. */
- case DB_LOCK_OLDEST:
- /*
- * Find the first bit set in the current
- * array and then look for a lower tid in
- * the array.
- */
- for (i = 0; i < nlockers; i++)
- if (ISSET_MAP(*deadp, i)) {
- killid = i;
- break;
+ killid = (u_int32_t)((*deadp - bitmap) / nalloc);
+ limit = killid;
+ keeper = BAD_KILLID;
- }
- /*
- * It's conceivable that under XA, the locker could
- * have gone away.
- */
- if (killid == BAD_KILLID)
- break;
-
- /*
- * The oldest transaction has the lowest
- * transaction id.
- */
- for (i = killid + 1; i < nlockers; i++)
- if (ISSET_MAP(*deadp, i) &&
- idmap[i].id < idmap[killid].id)
- killid = i;
- break;
- case DB_LOCK_DEFAULT:
- case DB_LOCK_RANDOM:
- /*
- * We are trying to calculate the id of the
- * locker whose entry is indicated by deadlock.
- */
- killid = (*deadp - bitmap) / nentries;
+ if (atype == DB_LOCK_DEFAULT || atype == DB_LOCK_RANDOM)
+ goto dokill;
+ /*
+ * It's conceivable that under XA, the locker could
+ * have gone away.
+ */
+ if (killid == BAD_KILLID)
break;
- case DB_LOCK_YOUNGEST:
- /*
- * Find the first bit set in the current
- * array and then look for a lower tid in
- * the array.
- */
- for (i = 0; i < nlockers; i++)
- if (ISSET_MAP(*deadp, i)) {
- killid = i;
- break;
- }
- /*
- * It's conceivable that under XA, the locker could
- * have gone away.
- */
- if (killid == BAD_KILLID)
- break;
+ /*
+ * Start with the id that we know is deadlocked
+ * and then examine all other set bits and see
+ * if any are a better candidate for abortion
+ * and that they are genuinely part of the
+ * deadlock. The definition of "best":
+ * OLDEST: smallest id
+ * YOUNGEST: largest id
+ * MAXLOCKS: maximum count
+ * MINLOCKS: minimum count
+ * MINWRITE: minimum count
+ */
- /*
- * The youngest transaction has the highest
- * transaction id.
- */
- for (i = killid + 1; i < nlockers; i++)
- if (ISSET_MAP(*deadp, i) &&
- idmap[i].id > idmap[killid].id)
- killid = i;
- break;
- default:
- killid = BAD_KILLID;
- ret = EINVAL;
+ for (i = (killid + 1) % nlockers;
+ i != limit;
+ i = (i + 1) % nlockers) {
+ if (!ISSET_MAP(*deadp, i))
+ continue;
+ switch (atype) {
+ case DB_LOCK_OLDEST:
+ if (__dd_isolder(idmap[killid].id,
+ idmap[i].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_YOUNGEST:
+ if (__dd_isolder(idmap[i].id,
+ idmap[killid].id, lock_max, txn_max))
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MAXLOCKS:
+ if (idmap[i].count < idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
+ if (idmap[i].count > idmap[killid].count)
+ continue;
+ keeper = i;
+ break;
+ default:
+ killid = BAD_KILLID;
+ ret = EINVAL;
+ goto dokill;
+ }
+ if (__dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, i))
+ killid = i;
}
- if (killid == BAD_KILLID)
+dokill: if (killid == BAD_KILLID)
continue;
+ /*
+ * There are cases in which our general algorithm will
+ * fail. Returning 1 from verify indicates that the
+ * particular locker is not only involved in a deadlock,
+ * but that killing him will allow others to make forward
+ * progress. Unfortunately, there are cases where we need
+ * to abort someone, but killing them will not necessarily
+ * ensure forward progress (imagine N readers all trying to
+ * acquire a write lock). In such a scenario, we'll have
+ * gotten all the way through the loop, we will have found
+ * someone to keep (keeper will be valid), but killid will
+ * still be the initial deadlocker. In this case, if the
+ * initial killid satisfies __dd_verify, kill it, else abort
+ * keeper and indicate that we need to run deadlock detection
+ * again.
+ */
+
+ if (keeper != BAD_KILLID && killid == limit &&
+ __dd_verify(idmap, *deadp,
+ tmpmap, copymap, nlockers, nalloc, killid) == 0) {
+ LOCKREGION(dbenv, lt);
+ region->need_dd = 1;
+ UNLOCKREGION(dbenv, lt);
+ killid = keeper;
+ }
+
/* Kill the locker with lockid idmap[killid]. */
if ((ret = __dd_abort(dbenv, &idmap[killid])) != 0) {
/*
@@ -221,9 +295,13 @@ lock_detect(dbenv, flags, atype, abortp)
__db_err(dbenv,
"Aborting locker %lx", (u_long)idmap[killid].id);
}
- __os_free(free_me, 0);
- __os_free(bitmap, 0);
- __os_free(idmap, 0);
+ __os_free(dbenv, tmpmap);
+err1: __os_free(dbenv, copymap);
+
+err: if (free_me != NULL)
+ __os_free(dbenv, free_me);
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, idmap);
return (ret);
}
@@ -236,9 +314,9 @@ lock_detect(dbenv, flags, atype, abortp)
# define DD_INVALID_ID ((u_int32_t) -1)
static int
-__dd_build(dbenv, bmp, nlockers, idmap)
+__dd_build(dbenv, atype, bmp, nlockers, allocp, idmap)
DB_ENV *dbenv;
- u_int32_t **bmp, *nlockers;
+ u_int32_t atype, **bmp, *nlockers, *allocp;
locker_info **idmap;
{
struct __db_lock *lp;
@@ -247,12 +325,30 @@ __dd_build(dbenv, bmp, nlockers, idmap)
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
locker_info *id_array;
- u_int32_t *bitmap, count, dd, *entryp, i, id, ndx, nentries, *tmpmap;
+ db_timeval_t now;
+ u_int32_t *bitmap, count, dd, *entryp, id, ndx, nentries, *tmpmap;
u_int8_t *pptr;
- int is_first, ret;
+ int expire_only, is_first, need_timeout, ret;
lt = dbenv->lk_handle;
region = lt->reginfo.primary;
+ LOCK_SET_TIME_INVALID(&now);
+ need_timeout = 0;
+ expire_only = atype == DB_LOCK_EXPIRE;
+
+ /*
+ * While we always check for expired timeouts, if we are called
+ * with DB_LOCK_EXPIRE, then we are only checking for timeouts
+ * (i.e., not doing deadlock detection at all). If we aren't
+ * doing real deadlock detection, then we can skip a significant,
+ * amount of the processing. In particular we do not build
+ * the conflict array and our caller needs to expect this.
+ */
+ if (expire_only) {
+ count = 0;
+ nentries = 0;
+ goto obj_loop;
+ }
/*
* We'll check how many lockers there are, add a few more in for
@@ -260,8 +356,7 @@ __dd_build(dbenv, bmp, nlockers, idmap)
* verify that we have enough room when we go back in and get the
* mutex the second time.
*/
-retry: count = region->nlockers;
- region->need_dd = 0;
+retry: count = region->stat.st_nlockers;
if (count == 0) {
*nlockers = 0;
@@ -271,7 +366,7 @@ retry: count = region->nlockers;
if (FLD_ISSET(dbenv->verbose, DB_VERB_DEADLOCK))
__db_err(dbenv, "%lu lockers", (u_long)count);
- count += 40;
+ count += 20;
nentries = ALIGN(count, 32) / 32;
/*
@@ -287,38 +382,46 @@ retry: count = region->nlockers;
if ((ret = __os_calloc(dbenv,
sizeof(u_int32_t), nentries, &tmpmap)) != 0) {
- __os_free(bitmap, sizeof(u_int32_t) * nentries);
+ __os_free(dbenv, bitmap);
return (ret);
}
if ((ret = __os_calloc(dbenv,
(size_t)count, sizeof(locker_info), &id_array)) != 0) {
- __os_free(bitmap, count * sizeof(u_int32_t) * nentries);
- __os_free(tmpmap, sizeof(u_int32_t) * nentries);
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
return (ret);
}
/*
* Now go back in and actually fill in the matrix.
*/
- if (region->nlockers > count) {
- __os_free(bitmap, count * sizeof(u_int32_t) * nentries);
- __os_free(tmpmap, sizeof(u_int32_t) * nentries);
- __os_free(id_array, count * sizeof(locker_info));
+ if (region->stat.st_nlockers > count) {
+ __os_free(dbenv, bitmap);
+ __os_free(dbenv, tmpmap);
+ __os_free(dbenv, id_array);
goto retry;
}
/*
* First we go through and assign each locker a deadlock detector id.
*/
- for (id = 0, i = 0; i < region->locker_t_size; i++) {
- for (lip = SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
- lip != NULL; lip = SH_TAILQ_NEXT(lip, links, __db_locker))
- if (lip->master_locker == INVALID_ROFF) {
- lip->dd_id = id++;
- id_array[lip->dd_id].id = lip->id;
- } else
- lip->dd_id = DD_INVALID_ID;
+ for (id = 0, lip = SH_TAILQ_FIRST(&region->lockers, __db_locker);
+ lip != NULL;
+ lip = SH_TAILQ_NEXT(lip, ulinks, __db_locker)) {
+ if (F_ISSET(lip, DB_LOCKER_INABORT))
+ continue;
+ if (lip->master_locker == INVALID_ROFF) {
+ lip->dd_id = id++;
+ id_array[lip->dd_id].id = lip->id;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[lip->dd_id].count = lip->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[lip->dd_id].count = lip->nwrites;
+ } else
+ lip->dd_id = DD_INVALID_ID;
+
}
/*
@@ -328,8 +431,11 @@ retry: count = region->nlockers;
* list and add an entry in the waitsfor matrix for each waiter/holder
* combination.
*/
+obj_loop:
for (op = SH_TAILQ_FIRST(&region->dd_objs, __db_lockobj);
op != NULL; op = SH_TAILQ_NEXT(op, dd_links, __db_lockobj)) {
+ if (expire_only)
+ goto look_waiters;
CLEAR_MAP(tmpmap, nentries);
/*
@@ -343,11 +449,20 @@ retry: count = region->nlockers;
if ((ret = __lock_getlocker(lt,
lp->holder, ndx, 0, &lockerp)) != 0)
continue;
- if (lockerp->dd_id == DD_INVALID_ID)
- dd = ((DB_LOCKER *)
- R_ADDR(&lt->reginfo,
- lockerp->master_locker))->dd_id;
- else
+ if (F_ISSET(lockerp, DB_LOCKER_INABORT))
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+
+ } else
dd = lockerp->dd_id;
id_array[dd].valid = 1;
@@ -363,6 +478,7 @@ retry: count = region->nlockers;
* Next, for each waiter, we set its row in the matrix
* equal to the map of holders we set up above.
*/
+look_waiters:
for (is_first = 1,
lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
lp != NULL;
@@ -372,11 +488,30 @@ retry: count = region->nlockers;
if ((ret = __lock_getlocker(lt,
lp->holder, ndx, 0, &lockerp)) != 0)
continue;
- if (lockerp->dd_id == DD_INVALID_ID)
- dd = ((DB_LOCKER *)
- R_ADDR(&lt->reginfo,
- lockerp->master_locker))->dd_id;
- else
+ if (lp->status == DB_LSTAT_WAITING) {
+ if (__lock_expired(dbenv,
+ &now, &lockerp->lk_expire)) {
+ lp->status = DB_LSTAT_EXPIRED;
+ MUTEX_UNLOCK(dbenv, &lp->mutex);
+ continue;
+ }
+ need_timeout =
+ LOCK_TIME_ISVALID(&lockerp->lk_expire);
+ }
+
+ if (expire_only)
+ continue;
+
+ if (lockerp->dd_id == DD_INVALID_ID) {
+ dd = ((DB_LOCKER *)R_ADDR(&lt->reginfo,
+ lockerp->master_locker))->dd_id;
+ lockerp->dd_id = dd;
+ if (atype == DB_LOCK_MINLOCKS ||
+ atype == DB_LOCK_MAXLOCKS)
+ id_array[dd].count += lockerp->nlocks;
+ if (atype == DB_LOCK_MINWRITE)
+ id_array[dd].count += lockerp->nwrites;
+ } else
dd = lockerp->dd_id;
id_array[dd].valid = 1;
@@ -396,11 +531,19 @@ retry: count = region->nlockers;
* else on the queue, then we have to keep
* it and we have an automatic deadlock.
*/
- if (is_first)
+ if (is_first) {
+ if (ISSET_MAP(entryp, dd))
+ id_array[dd].self_wait = 1;
CLR_MAP(entryp, dd);
+ }
}
}
+ if (expire_only) {
+ region->need_dd = need_timeout;
+ return (0);
+ }
+
/* Now for each locker; record its last lock. */
for (id = 0; id < count; id++) {
if (!id_array[id].valid)
@@ -423,7 +566,7 @@ retry: count = region->nlockers;
do {
lp = SH_LIST_FIRST(&child->heldby, __db_lock);
if (lp != NULL &&
- lp->status == DB_LSTAT_WAITING) {
+ lp->status == DB_LSTAT_WAITING) {
id_array[id].last_locker_id = child->id;
goto get_lock;
}
@@ -445,8 +588,11 @@ retry: count = region->nlockers;
}
}
- /* Pass complete, reset the deadlock detector bit. */
- region->need_dd = 0;
+ /*
+ * Pass complete, reset the deadlock detector bit,
+ * unless we have pending timeouts.
+ */
+ region->need_dd = need_timeout;
/*
* Now we can release everything except the bitmap matrix that we
@@ -455,18 +601,19 @@ retry: count = region->nlockers;
*nlockers = id;
*idmap = id_array;
*bmp = bitmap;
- __os_free(tmpmap, sizeof(u_int32_t) * nentries);
+ *allocp = nentries;
+ __os_free(dbenv, tmpmap);
return (0);
}
static int
-__dd_find(dbenv, bmp, idmap, nlockers, deadp)
+__dd_find(dbenv, bmp, idmap, nlockers, nalloc, deadp)
DB_ENV *dbenv;
- u_int32_t *bmp, nlockers;
+ u_int32_t *bmp, nlockers, nalloc;
locker_info *idmap;
u_int32_t ***deadp;
{
- u_int32_t i, j, k, nentries, *mymap, *tmpmap;
+ u_int32_t i, j, k, *mymap, *tmpmap;
u_int32_t **retp;
int ndead, ndeadalloc, ret;
@@ -476,15 +623,14 @@ __dd_find(dbenv, bmp, idmap, nlockers, deadp)
ndeadalloc = INITIAL_DEAD_ALLOC;
ndead = 0;
if ((ret = __os_malloc(dbenv,
- ndeadalloc * sizeof(u_int32_t *), NULL, &retp)) != 0)
+ ndeadalloc * sizeof(u_int32_t *), &retp)) != 0)
return (ret);
/*
* For each locker, OR in the bits from the lockers on which that
* locker is waiting.
*/
- nentries = ALIGN(nlockers, 32) / 32;
- for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nentries) {
+ for (mymap = bmp, i = 0; i < nlockers; i++, mymap += nalloc) {
if (!idmap[i].valid)
continue;
for (j = 0; j < nlockers; j++) {
@@ -492,8 +638,8 @@ __dd_find(dbenv, bmp, idmap, nlockers, deadp)
continue;
/* Find the map for this bit. */
- tmpmap = bmp + (nentries * j);
- OR_MAP(mymap, tmpmap, nentries);
+ tmpmap = bmp + (nalloc * j);
+ OR_MAP(mymap, tmpmap, nalloc);
if (!ISSET_MAP(mymap, i))
continue;
@@ -506,7 +652,7 @@ __dd_find(dbenv, bmp, idmap, nlockers, deadp)
*/
if (__os_realloc(dbenv,
ndeadalloc * sizeof(u_int32_t),
- NULL, &retp) != 0) {
+ &retp) != 0) {
retp[ndead] = NULL;
*deadp = retp;
return (0);
@@ -543,6 +689,7 @@ __dd_abort(dbenv, info)
region = lt->reginfo.primary;
LOCKREGION(dbenv, lt);
+
/* Find the locker's last lock. */
LOCKER_LOCK(lt, region, info->last_locker_id, ndx);
if ((ret = __lock_getlocker(lt,
@@ -552,18 +699,12 @@ __dd_abort(dbenv, info)
goto out;
}
- lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock);
-
- /*
- * It's possible that this locker was already aborted. If that's
- * the case, make sure that we remove its locker from the hash table.
- */
- if (lockp == NULL) {
- if (LOCKER_FREEABLE(lockerp)) {
- __lock_freelocker(lt, region, lockerp, ndx);
- goto out;
- }
- } else if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
+ /* It's possible that this locker was already aborted. */
+ if ((lockp = SH_LIST_FIRST(&lockerp->heldby, __db_lock)) == NULL) {
+ ret = DB_ALREADY_ABORTED;
+ goto out;
+ }
+ if (R_OFFSET(&lt->reginfo, lockp) != info->last_lock ||
lockp->status != DB_LSTAT_WAITING) {
ret = DB_ALREADY_ABORTED;
goto out;
@@ -589,7 +730,7 @@ __dd_abort(dbenv, info)
ret = __lock_promote(lt, sh_obj, 0);
MUTEX_UNLOCK(dbenv, &lockp->mutex);
- region->ndeadlocks++;
+ region->stat.st_ndeadlocks++;
UNLOCKREGION(dbenv, lt);
return (0);
@@ -600,13 +741,12 @@ out: UNLOCKREGION(dbenv, lt);
#ifdef DIAGNOSTIC
static void
-__dd_debug(dbenv, idmap, bitmap, nlockers)
+__dd_debug(dbenv, idmap, bitmap, nlockers, nalloc)
DB_ENV *dbenv;
locker_info *idmap;
- u_int32_t *bitmap, nlockers;
+ u_int32_t *bitmap, nlockers, nalloc;
{
- u_int32_t i, j, *mymap, nentries;
- int ret;
+ u_int32_t i, j, *mymap;
char *msgbuf;
__db_err(dbenv, "Waitsfor array\nWaiter:\tWaiting on:");
@@ -614,11 +754,10 @@ __dd_debug(dbenv, idmap, bitmap, nlockers)
/* Allocate space to print 10 bytes per item waited on. */
#undef MSGBUF_LEN
#define MSGBUF_LEN ((nlockers + 1) * 10 + 64)
- if ((ret = __os_malloc(dbenv, MSGBUF_LEN, NULL, &msgbuf)) != 0)
+ if (__os_malloc(dbenv, MSGBUF_LEN, &msgbuf) != 0)
return;
- nentries = ALIGN(nlockers, 32) / 32;
- for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nentries) {
+ for (mymap = bitmap, i = 0; i < nlockers; i++, mymap += nalloc) {
if (!idmap[i].valid)
continue;
sprintf(msgbuf, /* Waiter. */
@@ -632,6 +771,116 @@ __dd_debug(dbenv, idmap, bitmap, nlockers)
__db_err(dbenv, msgbuf);
}
- __os_free(msgbuf, MSGBUF_LEN);
+ __os_free(dbenv, msgbuf);
}
#endif
+
+/*
+ * Given a bitmap that contains a deadlock, verify that the bit
+ * specified in the which parameter indicates a transaction that
+ * is actually deadlocked. Return 1 if really deadlocked, 0 otherwise.
+ * deadmap is the array that identified the deadlock.
+ * tmpmap is a copy of the initial bitmaps from the dd_build phase
+ * origmap is a temporary bit map into which we can OR things
+ * nlockers is the number of actual lockers under consideration
+ * nalloc is the number of words allocated for the bitmap
+ * which is the locker in question
+ */
+static int
+__dd_verify(idmap, deadmap, tmpmap, origmap, nlockers, nalloc, which)
+ locker_info *idmap;
+ u_int32_t *deadmap, *tmpmap, *origmap;
+ u_int32_t nlockers, nalloc, which;
+{
+ u_int32_t *tmap;
+ u_int32_t j;
+ int count;
+
+ memset(tmpmap, 0, sizeof(u_int32_t) * nalloc);
+
+ /*
+ * In order for "which" to be actively involved in
+ * the deadlock, removing him from the evaluation
+ * must remove the deadlock. So, we OR together everyone
+ * except which; if all the participants still have their
+ * bits set, then the deadlock persists and which does
+ * not participate. If the deadlock does not persist
+ * then "which" does participate.
+ */
+ count = 0;
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+
+ /* Find the map for this bit. */
+ tmap = origmap + (nalloc * j);
+
+ /*
+ * We special case the first waiter who is also a holder, so
+ * we don't automatically call that a deadlock. However, if
+ * it really is a deadlock, we need the bit set now so that
+ * we treat the first waiter like other waiters.
+ */
+ if (idmap[j].self_wait)
+ SET_MAP(tmap, j);
+ OR_MAP(tmpmap, tmap, nalloc);
+ count++;
+ }
+
+ if (count == 1)
+ return (1);
+
+ /*
+ * Now check the resulting map and see whether
+ * all participants still have their bit set.
+ */
+ for (j = 0; j < nlockers; j++) {
+ if (!ISSET_MAP(deadmap, j) || j == which)
+ continue;
+ if (!ISSET_MAP(tmpmap, j))
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * __dd_isolder --
+ *
+ * Figure out the relative age of two lockers. We make all lockers
+ * older than all transactions, because that's how it's worked
+ * historically (because lockers are lower ids).
+ */
+static int
+__dd_isolder(a, b, lock_max, txn_max)
+ u_int32_t a, b;
+ u_int32_t lock_max, txn_max;
+{
+ u_int32_t max;
+
+ /* Check for comparing lock-id and txnid. */
+ if (a <= DB_LOCK_MAXID && b > DB_LOCK_MAXID)
+ return (1);
+ if (b <= DB_LOCK_MAXID && a > DB_LOCK_MAXID)
+ return (0);
+
+ /* In the same space; figure out which one. */
+ max = txn_max;
+ if (a <= DB_LOCK_MAXID)
+ max = lock_max;
+
+ /*
+ * We can't get a 100% correct ordering, because we don't know
+ * where the current interval started and if there were older
+ * lockers outside the interval. We do the best we can.
+ */
+
+ /*
+ * Check for a wrapped case with ids above max.
+ */
+ if (a > max && b < max)
+ return (1);
+ if (b > max && a < max)
+ return (0);
+
+ return (a < b);
+}
diff --git a/bdb/lock/lock_method.c b/bdb/lock/lock_method.c
index 46ed9e5166f..72703e253bc 100644
--- a/bdb/lock/lock_method.c
+++ b/bdb/lock/lock_method.c
@@ -1,33 +1,126 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock_method.c,v 11.5 2000/12/21 19:16:42 bostic Exp $";
+static const char revid[] = "$Id: lock_method.c,v 11.30 2002/03/27 04:32:20 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
#include <string.h>
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
+static int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
+static int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
+static int __lock_set_env_timeout __P((DB_ENV *, db_timeout_t, u_int32_t));
+
+/*
+ * __lock_dbenv_create --
+ * Lock specific creation of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ */
+void
+__lock_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->lk_max = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
+ dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
+ dbenv->set_lk_detect = __dbcl_set_lk_detect;
+ dbenv->set_lk_max = __dbcl_set_lk_max;
+ dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
+ dbenv->lock_detect = __dbcl_lock_detect;
+ dbenv->lock_dump_region = NULL;
+ dbenv->lock_get = __dbcl_lock_get;
+ dbenv->lock_id = __dbcl_lock_id;
+ dbenv->lock_id_free = __dbcl_lock_id_free;
+ dbenv->lock_put = __dbcl_lock_put;
+ dbenv->lock_stat = __dbcl_lock_stat;
+ dbenv->lock_vec = __dbcl_lock_vec;
+ } else
+#endif
+ {
+ dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
+ dbenv->set_lk_detect = __lock_set_lk_detect;
+ dbenv->set_lk_max = __lock_set_lk_max;
+ dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
+ dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
+ dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
+ dbenv->set_timeout = __lock_set_env_timeout;
+ dbenv->lock_detect = __lock_detect;
+ dbenv->lock_dump_region = __lock_dump_region;
+ dbenv->lock_get = __lock_get;
+ dbenv->lock_id = __lock_id;
+ dbenv->lock_id_free = __lock_id_free;
+#ifdef CONFIG_TEST
+ dbenv->lock_id_set = __lock_id_set;
+#endif
+ dbenv->lock_put = __lock_put;
+ dbenv->lock_stat = __lock_stat;
+ dbenv->lock_vec = __lock_vec;
+ dbenv->lock_downgrade = __lock_downgrade;
+ }
+}
+
+/*
+ * __lock_dbenv_close --
+ * Lock specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: void __lock_dbenv_close __P((DB_ENV *));
+ */
+void
+__lock_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ if (dbenv->lk_conflicts != NULL) {
+ __os_free(dbenv, dbenv->lk_conflicts);
+ dbenv->lk_conflicts = NULL;
+ }
+}
/*
* __lock_set_lk_conflicts
* Set the conflicts matrix.
- *
- * PUBLIC: int __lock_set_lk_conflicts __P((DB_ENV *, u_int8_t *, int));
*/
-int
+static int
__lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
DB_ENV *dbenv;
u_int8_t *lk_conflicts;
@@ -38,12 +131,11 @@ __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lk_conflicts");
if (dbenv->lk_conflicts != NULL) {
- __os_free(dbenv->lk_conflicts,
- dbenv->lk_modes * dbenv->lk_modes);
+ __os_free(dbenv, dbenv->lk_conflicts);
dbenv->lk_conflicts = NULL;
}
if ((ret = __os_malloc(dbenv,
- lk_modes * lk_modes, NULL, &dbenv->lk_conflicts)) != 0)
+ lk_modes * lk_modes, &dbenv->lk_conflicts)) != 0)
return (ret);
memcpy(dbenv->lk_conflicts, lk_conflicts, lk_modes * lk_modes);
dbenv->lk_modes = lk_modes;
@@ -54,10 +146,8 @@ __lock_set_lk_conflicts(dbenv, lk_conflicts, lk_modes)
/*
* __lock_set_lk_detect
* Set the automatic deadlock detection.
- *
- * PUBLIC: int __lock_set_lk_detect __P((DB_ENV *, u_int32_t));
*/
-int
+static int
__lock_set_lk_detect(dbenv, lk_detect)
DB_ENV *dbenv;
u_int32_t lk_detect;
@@ -66,11 +156,17 @@ __lock_set_lk_detect(dbenv, lk_detect)
switch (lk_detect) {
case DB_LOCK_DEFAULT:
+ case DB_LOCK_EXPIRE:
+ case DB_LOCK_MAXLOCKS:
+ case DB_LOCK_MINLOCKS:
+ case DB_LOCK_MINWRITE:
case DB_LOCK_OLDEST:
case DB_LOCK_RANDOM:
case DB_LOCK_YOUNGEST:
break;
default:
+ __db_err(dbenv,
+ "DB_ENV->set_lk_detect: unknown deadlock detection mode specified");
return (EINVAL);
}
dbenv->lk_detect = lk_detect;
@@ -80,10 +176,8 @@ __lock_set_lk_detect(dbenv, lk_detect)
/*
* __lock_set_lk_max
* Set the lock table size.
- *
- * PUBLIC: int __lock_set_lk_max __P((DB_ENV *, u_int32_t));
*/
-int
+static int
__lock_set_lk_max(dbenv, lk_max)
DB_ENV *dbenv;
u_int32_t lk_max;
@@ -99,10 +193,8 @@ __lock_set_lk_max(dbenv, lk_max)
/*
* __lock_set_lk_max_locks
* Set the lock table size.
- *
- * PUBLIC: int __lock_set_lk_max_locks __P((DB_ENV *, u_int32_t));
*/
-int
+static int
__lock_set_lk_max_locks(dbenv, lk_max)
DB_ENV *dbenv;
u_int32_t lk_max;
@@ -116,10 +208,8 @@ __lock_set_lk_max_locks(dbenv, lk_max)
/*
* __lock_set_lk_max_lockers
* Set the lock table size.
- *
- * PUBLIC: int __lock_set_lk_max_lockers __P((DB_ENV *, u_int32_t));
*/
-int
+static int
__lock_set_lk_max_lockers(dbenv, lk_max)
DB_ENV *dbenv;
u_int32_t lk_max;
@@ -133,10 +223,8 @@ __lock_set_lk_max_lockers(dbenv, lk_max)
/*
* __lock_set_lk_max_objects
* Set the lock table size.
- *
- * PUBLIC: int __lock_set_lk_max_objects __P((DB_ENV *, u_int32_t));
*/
-int
+static int
__lock_set_lk_max_objects(dbenv, lk_max)
DB_ENV *dbenv;
u_int32_t lk_max;
@@ -146,3 +234,42 @@ __lock_set_lk_max_objects(dbenv, lk_max)
dbenv->lk_max_objects = lk_max;
return (0);
}
+
+/*
+ * __lock_set_env_timeout
+ * Set the lock environment timeout.
+ */
+static int
+__lock_set_env_timeout(dbenv, timeout, flags)
+ DB_ENV *dbenv;
+ db_timeout_t timeout;
+ u_int32_t flags;
+{
+ DB_LOCKREGION *region;
+
+ region = NULL;
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOCKING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_timeout", DB_INIT_LOCK));
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ }
+
+ switch (flags) {
+ case DB_SET_LOCK_TIMEOUT:
+ dbenv->lk_timeout = timeout;
+ if (region != NULL)
+ region->lk_timeout = timeout;
+ break;
+ case DB_SET_TXN_TIMEOUT:
+ dbenv->tx_timeout = timeout;
+ if (region != NULL)
+ region->tx_timeout = timeout;
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_ENV->set_timeout", 0));
+ /* NOTREACHED */
+ }
+
+ return (0);
+}
diff --git a/bdb/lock/lock_region.c b/bdb/lock/lock_region.c
index 5ca91a9951d..6df6937e873 100644
--- a/bdb/lock/lock_region.c
+++ b/bdb/lock/lock_region.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock_region.c,v 11.41 2000/12/20 21:53:04 ubell Exp $";
+static const char revid[] = "$Id: lock_region.c,v 11.69 2002/08/06 05:05:22 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,95 +17,50 @@ static const char revid[] = "$Id: lock_region.c,v 11.41 2000/12/20 21:53:04 ubel
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
static int __lock_init __P((DB_ENV *, DB_LOCKTAB *));
static size_t
__lock_region_size __P((DB_ENV *));
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
static size_t __lock_region_maint __P((DB_ENV *));
#endif
/*
- * This conflict array is used for concurrent db access (CDB). It
- * uses the same locks as the db_rw_conflict array, but adds an IW
- * mode to be used for write cursors.
+ * The conflict arrays are set up such that the row is the lock you are
+ * holding and the column is the lock that is desired.
*/
-#define DB_LOCK_CDB_N 5
-static u_int8_t const db_cdb_conflicts[] = {
- /* N R W WT IW*/
- /* N */ 0, 0, 0, 0, 0,
- /* R */ 0, 0, 1, 0, 0,
- /* W */ 0, 1, 1, 1, 1,
- /* WT */ 0, 0, 0, 0, 0,
- /* IW */ 0, 0, 1, 0, 1,
+#define DB_LOCK_RIW_N 9
+static const u_int8_t db_riw_conflicts[] = {
+/* N R W WT IW IR RIW DR WW */
+/* N */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* R */ 0, 0, 1, 0, 1, 0, 1, 0, 1,
+/* W */ 0, 1, 1, 1, 1, 1, 1, 1, 1,
+/* WT */ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+/* IW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* IR */ 0, 0, 1, 0, 0, 0, 0, 0, 1,
+/* RIW */ 0, 1, 1, 0, 0, 0, 0, 1, 1,
+/* DR */ 0, 0, 1, 0, 1, 0, 1, 0, 0,
+/* WW */ 0, 1, 1, 0, 1, 1, 1, 0, 1
};
/*
- * __lock_dbenv_create --
- * Lock specific creation of the DB_ENV structure.
- *
- * PUBLIC: void __lock_dbenv_create __P((DB_ENV *));
+ * This conflict array is used for concurrent db access (CDB). It uses
+ * the same locks as the db_riw_conflicts array, but adds an IW mode to
+ * be used for write cursors.
*/
-void
-__lock_dbenv_create(dbenv)
- DB_ENV *dbenv;
-{
- dbenv->lk_max = DB_LOCK_DEFAULT_N;
- dbenv->lk_max_lockers = DB_LOCK_DEFAULT_N;
- dbenv->lk_max_objects = DB_LOCK_DEFAULT_N;
-
- dbenv->set_lk_conflicts = __lock_set_lk_conflicts;
- dbenv->set_lk_detect = __lock_set_lk_detect;
- dbenv->set_lk_max = __lock_set_lk_max;
- dbenv->set_lk_max_locks = __lock_set_lk_max_locks;
- dbenv->set_lk_max_lockers = __lock_set_lk_max_lockers;
- dbenv->set_lk_max_objects = __lock_set_lk_max_objects;
-
-#ifdef HAVE_RPC
- /*
- * If we have a client, overwrite what we just set up to point
- * to the client functions.
- */
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
- dbenv->set_lk_conflicts = __dbcl_set_lk_conflict;
- dbenv->set_lk_detect = __dbcl_set_lk_detect;
- dbenv->set_lk_max = __dbcl_set_lk_max;
- dbenv->set_lk_max_locks = __dbcl_set_lk_max_locks;
- dbenv->set_lk_max_lockers = __dbcl_set_lk_max_lockers;
- dbenv->set_lk_max_objects = __dbcl_set_lk_max_objects;
- }
-#endif
-}
-
-/*
- * __lock_dbenv_close --
- * Lock specific destruction of the DB_ENV structure.
- *
- * PUBLIC: void __lock_dbenv_close __P((DB_ENV *));
- */
-void
-__lock_dbenv_close(dbenv)
- DB_ENV *dbenv;
-{
- if (!F_ISSET(dbenv, DB_ENV_USER_ALLOC) && dbenv->lk_conflicts != NULL) {
- __os_free(dbenv->lk_conflicts,
- dbenv->lk_modes * dbenv->lk_modes);
- dbenv->lk_conflicts = NULL;
- }
-}
+#define DB_LOCK_CDB_N 5
+static const u_int8_t db_cdb_conflicts[] = {
+ /* N R W WT IW */
+ /* N */ 0, 0, 0, 0, 0,
+ /* R */ 0, 0, 1, 0, 0,
+ /* W */ 0, 1, 1, 1, 1,
+ /* WT */ 0, 0, 0, 0, 0,
+ /* IW */ 0, 0, 1, 0, 1
+};
/*
* __lock_open --
@@ -167,6 +122,15 @@ __lock_open(dbenv)
region->detect = dbenv->lk_detect;
}
+ /*
+ * A process joining the region may have reset the lock and transaction
+ * timeouts.
+ */
+ if (dbenv->lk_timeout != 0)
+ region->lk_timeout = dbenv->lk_timeout;
+ if (dbenv->tx_timeout != 0)
+ region->tx_timeout = dbenv->tx_timeout;
+
/* Set remaining pointers into region. */
lt->conflicts = (u_int8_t *)R_ADDR(&lt->reginfo, region->conf_off);
lt->obj_tab = (DB_HASHTAB *)R_ADDR(&lt->reginfo, region->obj_off);
@@ -183,7 +147,7 @@ err: if (lt->reginfo.addr != NULL) {
R_UNLOCK(dbenv, &lt->reginfo);
(void)__db_r_detach(dbenv, &lt->reginfo, 0);
}
- __os_free(lt, sizeof(*lt));
+ __os_free(dbenv, lt);
return (ret);
}
@@ -201,7 +165,7 @@ __lock_init(dbenv, lt)
DB_LOCKER *lidp;
DB_LOCKOBJ *op;
DB_LOCKREGION *region;
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
size_t maint_size;
#endif
u_int32_t i, lk_modes;
@@ -229,25 +193,19 @@ __lock_init(dbenv, lt)
lk_conflicts = dbenv->lk_conflicts;
}
- region->id = 0;
region->need_dd = 0;
region->detect = DB_LOCK_NORUN;
- region->maxlocks = dbenv->lk_max;
- region->maxlockers = dbenv->lk_max_lockers;
- region->maxobjects = dbenv->lk_max_objects;
+ region->lk_timeout = dbenv->lk_timeout;
+ region->tx_timeout = dbenv->tx_timeout;
region->locker_t_size = __db_tablesize(dbenv->lk_max_lockers);
region->object_t_size = __db_tablesize(dbenv->lk_max_objects);
- region->nmodes = lk_modes;
- region->nlocks = 0;
- region->maxnlocks = 0;
- region->nlockers = 0;
- region->maxnlockers = 0;
- region->nobjects = 0;
- region->maxnobjects = 0;
- region->nconflicts = 0;
- region->nrequests = 0;
- region->nreleases = 0;
- region->ndeadlocks = 0;
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_id = 0;
+ region->stat.st_cur_maxid = DB_LOCK_MAXID;
+ region->stat.st_maxlocks = dbenv->lk_max;
+ region->stat.st_maxlockers = dbenv->lk_max_lockers;
+ region->stat.st_maxobjects = dbenv->lk_max_objects;
+ region->stat.st_nmodes = lk_modes;
/* Allocate room for the conflict matrix and initialize it. */
if ((ret =
@@ -270,7 +228,7 @@ __lock_init(dbenv, lt)
__db_hashinit(addr, region->locker_t_size);
region->locker_off = R_OFFSET(&lt->reginfo, addr);
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
maint_size = __lock_region_maint(dbenv);
/* Allocate room for the locker maintenance info and initialize it. */
if ((ret = __db_shalloc(lt->reginfo.addr,
@@ -286,25 +244,23 @@ __lock_init(dbenv, lt)
* the mutex.
*/
SH_TAILQ_INIT(&region->free_locks);
- for (i = 0; i < region->maxlocks; ++i) {
+ for (i = 0; i < region->stat.st_maxlocks; ++i) {
if ((ret = __db_shalloc(lt->reginfo.addr,
sizeof(struct __db_lock), MUTEX_ALIGN, &lp)) != 0)
goto mem_err;
lp->status = DB_LSTAT_FREE;
- lp->gen=0;
- if ((ret = __db_shmutex_init(dbenv, &lp->mutex,
- R_OFFSET(&lt->reginfo, &lp->mutex) + DB_FCNTL_OFF_LOCK,
- MUTEX_SELF_BLOCK, &lt->reginfo,
- (REGMAINT *)R_ADDR(&lt->reginfo, region->maint_off))) != 0)
+ lp->gen = 0;
+ if ((ret = __db_mutex_setup(dbenv, &lt->reginfo, &lp->mutex,
+ MUTEX_NO_RLOCK | MUTEX_SELF_BLOCK)) != 0)
return (ret);
- MUTEX_LOCK(dbenv, &lp->mutex, lt->dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &lp->mutex);
SH_TAILQ_INSERT_HEAD(&region->free_locks, lp, links, __db_lock);
}
/* Initialize objects onto a free list. */
SH_TAILQ_INIT(&region->dd_objs);
SH_TAILQ_INIT(&region->free_objs);
- for (i = 0; i < region->maxobjects; ++i) {
+ for (i = 0; i < region->stat.st_maxobjects; ++i) {
if ((ret = __db_shalloc(lt->reginfo.addr,
sizeof(DB_LOCKOBJ), 0, &op)) != 0)
goto mem_err;
@@ -313,13 +269,15 @@ __lock_init(dbenv, lt)
}
/* Initialize lockers onto a free list. */
+ SH_TAILQ_INIT(&region->lockers);
SH_TAILQ_INIT(&region->free_lockers);
- for (i = 0; i < region->maxlockers; ++i) {
+ for (i = 0; i < region->stat.st_maxlockers; ++i) {
if ((ret = __db_shalloc(lt->reginfo.addr,
sizeof(DB_LOCKER), 0, &lidp)) != 0) {
-mem_err: __db_err(dbenv, "Unable to allocate memory for the lock table");
- return (ret);
- }
+mem_err: __db_err(dbenv,
+ "Unable to allocate memory for the lock table");
+ return (ret);
+ }
SH_TAILQ_INSERT_HEAD(
&region->free_lockers, lidp, links, __db_locker);
}
@@ -328,13 +286,14 @@ mem_err: __db_err(dbenv, "Unable to allocate memory for the lock table");
}
/*
- * __lock_close --
- * Internal version of lock_close: only called from db_appinit.
+ * __lock_dbenv_refresh --
+ * Clean up after the lock system on a close or failed open. Called
+ * only from __dbenv_refresh. (Formerly called __lock_close.)
*
- * PUBLIC: int __lock_close __P((DB_ENV *));
+ * PUBLIC: int __lock_dbenv_refresh __P((DB_ENV *));
*/
int
-__lock_close(dbenv)
+__lock_dbenv_refresh(dbenv)
DB_ENV *dbenv;
{
DB_LOCKTAB *lt;
@@ -345,7 +304,7 @@ __lock_close(dbenv)
/* Detach from the region. */
ret = __db_r_detach(dbenv, &lt->reginfo, 0);
- __os_free(lt, sizeof(*lt));
+ __os_free(dbenv, lt);
dbenv->lk_handle = NULL;
return (ret);
@@ -369,17 +328,19 @@ __lock_region_size(dbenv)
retval += __db_shalloc_size(sizeof(DB_LOCKREGION), 1);
retval += __db_shalloc_size(dbenv->lk_modes * dbenv->lk_modes, 1);
retval += __db_shalloc_size(
- __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 1);
+ __db_tablesize(dbenv->lk_max_lockers) * (sizeof(DB_HASHTAB)), 1);
retval += __db_shalloc_size(
- __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 1);
-#ifdef MUTEX_SYSTEM_RESOURCES
+ __db_tablesize(dbenv->lk_max_objects) * (sizeof(DB_HASHTAB)), 1);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
retval +=
__db_shalloc_size(sizeof(REGMAINT) + __lock_region_maint(dbenv), 1);
#endif
retval += __db_shalloc_size(
- sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
- retval += __db_shalloc_size(sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
- retval += __db_shalloc_size(sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
+ sizeof(struct __db_lock), MUTEX_ALIGN) * dbenv->lk_max;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKOBJ), 1) * dbenv->lk_max_objects;
+ retval += __db_shalloc_size(
+ sizeof(DB_LOCKER), 1) * dbenv->lk_max_lockers;
/*
* Include 16 bytes of string space per lock. DB doesn't use it
@@ -393,7 +354,7 @@ __lock_region_size(dbenv)
return (retval);
}
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
/*
* __lock_region_maint --
* Return the amount of space needed for region maintenance info.
@@ -404,7 +365,7 @@ __lock_region_maint(dbenv)
{
size_t s;
- s = sizeof(MUTEX *) * dbenv->lk_max;
+ s = sizeof(DB_MUTEX *) * dbenv->lk_max;
return (s);
}
#endif
@@ -420,12 +381,37 @@ __lock_region_destroy(dbenv, infop)
DB_ENV *dbenv;
REGINFO *infop;
{
- DB_LOCKREGION *region;
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_LOCKREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
COMPQUIET(dbenv, NULL);
- region = R_ADDR(infop, infop->rp->primary);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __lock_id_set --
+ * Set the current locker ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __lock_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
+int
+__lock_id_set(dbenv, cur_id, max_id)
+ DB_ENV *dbenv;
+ u_int32_t cur_id, max_id;
+{
+ DB_LOCKTAB *lt;
+ DB_LOCKREGION *region;
- __db_shlocks_destroy(infop,
- (REGMAINT *)R_ADDR(infop, region->maint_off));
- return;
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_id_set", DB_INIT_LOCK);
+
+ lt = dbenv->lk_handle;
+ region = lt->reginfo.primary;
+ region->stat.st_id = cur_id;
+ region->stat.st_cur_maxid = max_id;
+
+ return (0);
}
+#endif
diff --git a/bdb/lock/lock_stat.c b/bdb/lock/lock_stat.c
index ed5b60d0d7a..0bef3e18021 100644
--- a/bdb/lock/lock_stat.c
+++ b/bdb/lock/lock_stat.c
@@ -1,94 +1,105 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock_stat.c,v 11.4 2000/12/08 20:15:31 ubell Exp $";
+static const char revid[] = "$Id: lock_stat.c,v 11.32 2002/08/14 20:08:51 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-
-#include <ctype.h>
+#include <string.h>
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
+#include <ctype.h>
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "lock.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/db_page.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/db_am.h"
static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKER *, FILE *));
static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
-static const char *
- __lock_dump_status __P((db_status_t));
+static void __lock_printheader __P((void));
/*
- * lock_stat --
+ * __lock_stat --
* Return LOCK statistics.
+ *
+ * PUBLIC: int __lock_stat __P((DB_ENV *, DB_LOCK_STAT **, u_int32_t));
*/
int
-lock_stat(dbenv, statp, db_malloc)
+__lock_stat(dbenv, statp, flags)
DB_ENV *dbenv;
DB_LOCK_STAT **statp;
- void *(*db_malloc) __P((size_t));
+ u_int32_t flags;
{
DB_LOCKREGION *region;
DB_LOCKTAB *lt;
- DB_LOCK_STAT *stats;
+ DB_LOCK_STAT *stats, tmp;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_lock_stat(dbenv, statp, db_malloc));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lk_handle, DB_INIT_LOCK);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "DB_ENV->lock_stat", DB_INIT_LOCK);
*statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->lock_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
lt = dbenv->lk_handle;
- if ((ret = __os_malloc(dbenv, sizeof(*stats), db_malloc, &stats)) != 0)
+ if ((ret = __os_umalloc(dbenv, sizeof(*stats), &stats)) != 0)
return (ret);
/* Copy out the global statistics. */
R_LOCK(dbenv, &lt->reginfo);
region = lt->reginfo.primary;
- stats->st_lastid = region->id;
- stats->st_maxlocks = region->maxlocks;
- stats->st_maxlockers = region->maxlockers;
- stats->st_maxobjects = region->maxobjects;
- stats->st_nmodes = region->nmodes;
- stats->st_nlockers = region->nlockers;
- stats->st_maxnlockers = region->maxnlockers;
- stats->st_nobjects = region->nobjects;
- stats->st_maxnobjects = region->maxnobjects;
- stats->st_nlocks = region->nlocks;
- stats->st_maxnlocks = region->maxnlocks;
- stats->st_nconflicts = region->nconflicts;
- stats->st_nrequests = region->nrequests;
- stats->st_nreleases = region->nreleases;
- stats->st_nnowaits = region->nnowaits;
- stats->st_ndeadlocks = region->ndeadlocks;
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_locktimeout = region->lk_timeout;
+ stats->st_txntimeout = region->tx_timeout;
stats->st_region_wait = lt->reginfo.rp->mutex.mutex_set_wait;
stats->st_region_nowait = lt->reginfo.rp->mutex.mutex_set_nowait;
stats->st_regsize = lt->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ tmp = region->stat;
+ memset(&region->stat, 0, sizeof(region->stat));
+ lt->reginfo.rp->mutex.mutex_set_wait = 0;
+ lt->reginfo.rp->mutex.mutex_set_nowait = 0;
+
+ region->stat.st_id = tmp.st_id;
+ region->stat.st_cur_maxid = tmp.st_cur_maxid;
+ region->stat.st_maxlocks = tmp.st_maxlocks;
+ region->stat.st_maxlockers = tmp.st_maxlockers;
+ region->stat.st_maxobjects = tmp.st_maxobjects;
+ region->stat.st_nlocks =
+ region->stat.st_maxnlocks = tmp.st_nlocks;
+ region->stat.st_nlockers =
+ region->stat.st_maxnlockers = tmp.st_nlockers;
+ region->stat.st_nobjects =
+ region->stat.st_maxnobjects = tmp.st_nobjects;
+ region->stat.st_nmodes = tmp.st_nmodes;
+ }
R_UNLOCK(dbenv, &lt->reginfo);
@@ -97,30 +108,34 @@ lock_stat(dbenv, statp, db_malloc)
}
#define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */
-#define LOCK_DUMP_FREE 0x002 /* Display lock free list. */
-#define LOCK_DUMP_LOCKERS 0x004 /* Display lockers. */
-#define LOCK_DUMP_MEM 0x008 /* Display region memory. */
-#define LOCK_DUMP_OBJECTS 0x010 /* Display objects. */
-#define LOCK_DUMP_ALL 0x01f /* Display all. */
+#define LOCK_DUMP_LOCKERS 0x002 /* Display lockers. */
+#define LOCK_DUMP_MEM 0x004 /* Display region memory. */
+#define LOCK_DUMP_OBJECTS 0x008 /* Display objects. */
+#define LOCK_DUMP_PARAMS 0x010 /* Display params. */
+#define LOCK_DUMP_ALL /* All */ \
+ (LOCK_DUMP_CONF | LOCK_DUMP_LOCKERS | LOCK_DUMP_MEM | \
+ LOCK_DUMP_OBJECTS | LOCK_DUMP_PARAMS)
/*
* __lock_dump_region --
*
- * PUBLIC: void __lock_dump_region __P((DB_ENV *, char *, FILE *));
+ * PUBLIC: int __lock_dump_region __P((DB_ENV *, char *, FILE *));
*/
-void
+int
__lock_dump_region(dbenv, area, fp)
DB_ENV *dbenv;
char *area;
FILE *fp;
{
- struct __db_lock *lp;
DB_LOCKER *lip;
DB_LOCKOBJ *op;
DB_LOCKREGION *lrp;
DB_LOCKTAB *lt;
u_int32_t flags, i, j;
- int label;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lk_handle, "lock_dump_region", DB_INIT_LOCK);
/* Make it easy to call from the debugger. */
if (fp == NULL)
@@ -134,9 +149,6 @@ __lock_dump_region(dbenv, area, fp)
case 'c':
LF_SET(LOCK_DUMP_CONF);
break;
- case 'f':
- LF_SET(LOCK_DUMP_FREE);
- break;
case 'l':
LF_SET(LOCK_DUMP_LOCKERS);
break;
@@ -146,91 +158,67 @@ __lock_dump_region(dbenv, area, fp)
case 'o':
LF_SET(LOCK_DUMP_OBJECTS);
break;
+ case 'p':
+ LF_SET(LOCK_DUMP_PARAMS);
+ break;
}
lt = dbenv->lk_handle;
lrp = lt->reginfo.primary;
LOCKREGION(dbenv, lt);
- fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
- fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu, %s: %lu\n",
- "locker table size", (u_long)lrp->locker_t_size,
- "object table size", (u_long)lrp->object_t_size,
- "obj_off", (u_long)lrp->obj_off,
- "osynch_off", (u_long)lrp->osynch_off,
- "locker_off", (u_long)lrp->locker_off,
- "lsynch_off", (u_long)lrp->lsynch_off,
- "need_dd", (u_long)lrp->need_dd);
+ if (LF_ISSET(LOCK_DUMP_PARAMS)) {
+ fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
+ fprintf(fp,
+ "%s: %lu, %s: %lu, %s: %lu,\n%s: %lu, %s: %lu, %s: %lu, %s: %lu\n",
+ "locker table size", (u_long)lrp->locker_t_size,
+ "object table size", (u_long)lrp->object_t_size,
+ "obj_off", (u_long)lrp->obj_off,
+ "osynch_off", (u_long)lrp->osynch_off,
+ "locker_off", (u_long)lrp->locker_off,
+ "lsynch_off", (u_long)lrp->lsynch_off,
+ "need_dd", (u_long)lrp->need_dd);
+ }
if (LF_ISSET(LOCK_DUMP_CONF)) {
fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE);
- for (i = 0; i < lrp->nmodes; i++) {
- for (j = 0; j < lrp->nmodes; j++)
- fprintf(fp, "%lu\t",
- (u_long)lt->conflicts[i * lrp->nmodes + j]);
+ for (i = 0; i < lrp->stat.st_nmodes; i++) {
+ for (j = 0; j < lrp->stat.st_nmodes; j++)
+ fprintf(fp, "%lu\t", (u_long)
+ lt->conflicts[i * lrp->stat.st_nmodes + j]);
fprintf(fp, "\n");
}
}
if (LF_ISSET(LOCK_DUMP_LOCKERS)) {
- fprintf(fp, "%s\nLocker hash buckets\n", DB_LINE);
- for (i = 0; i < lrp->locker_t_size; i++) {
- label = 1;
+ fprintf(fp, "%s\nLocks grouped by lockers\n", DB_LINE);
+ __lock_printheader();
+ for (i = 0; i < lrp->locker_t_size; i++)
for (lip =
SH_TAILQ_FIRST(&lt->locker_tab[i], __db_locker);
lip != NULL;
lip = SH_TAILQ_NEXT(lip, links, __db_locker)) {
- if (label) {
- fprintf(fp, "Bucket %lu:\n", (u_long)i);
- label = 0;
- }
__lock_dump_locker(lt, lip, fp);
}
- }
}
if (LF_ISSET(LOCK_DUMP_OBJECTS)) {
- fprintf(fp, "%s\nObject hash buckets\n", DB_LINE);
+ fprintf(fp, "%s\nLocks grouped by object\n", DB_LINE);
+ __lock_printheader();
for (i = 0; i < lrp->object_t_size; i++) {
- label = 1;
for (op = SH_TAILQ_FIRST(&lt->obj_tab[i], __db_lockobj);
op != NULL;
- op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
- if (label) {
- fprintf(fp, "Bucket %lu:\n", (u_long)i);
- label = 0;
- }
+ op = SH_TAILQ_NEXT(op, links, __db_lockobj))
__lock_dump_object(lt, op, fp);
- }
}
}
- if (LF_ISSET(LOCK_DUMP_FREE)) {
- fprintf(fp, "%s\nLock free list\n", DB_LINE);
- for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock);
- lp != NULL;
- lp = SH_TAILQ_NEXT(lp, links, __db_lock))
- fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp,
- (u_long)lp->holder, (u_long)lp->mode,
- __lock_dump_status(lp->status), (u_long)lp->obj);
-
- fprintf(fp, "%s\nObject free list\n", DB_LINE);
- for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj);
- op != NULL;
- op = SH_TAILQ_NEXT(op, links, __db_lockobj))
- fprintf(fp, "0x%lx\n", (u_long)op);
-
- fprintf(fp, "%s\nLocker free list\n", DB_LINE);
- for (lip = SH_TAILQ_FIRST(&lrp->free_lockers, __db_locker);
- lip != NULL;
- lip = SH_TAILQ_NEXT(lip, links, __db_locker))
- fprintf(fp, "0x%lx\n", (u_long)lip);
- }
-
if (LF_ISSET(LOCK_DUMP_MEM))
__db_shalloc_dump(lt->reginfo.addr, fp);
UNLOCKREGION(dbenv, lt);
+
+ return (0);
}
static void
@@ -240,16 +228,35 @@ __lock_dump_locker(lt, lip, fp)
FILE *fp;
{
struct __db_lock *lp;
+ time_t s;
+ char buf[64];
- fprintf(fp, "L %lx [%ld]", (u_long)lip->id, (long)lip->dd_id);
+ fprintf(fp, "%8lx dd=%2ld locks held %-4d write locks %-4d",
+ (u_long)lip->id, (long)lip->dd_id, lip->nlocks, lip->nwrites);
fprintf(fp, " %s ", F_ISSET(lip, DB_LOCKER_DELETED) ? "(D)" : " ");
+ if (LOCK_TIME_ISVALID(&lip->tx_expire)) {
+ s = lip->tx_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " expires %s.%lu", buf, (u_long)lip->tx_expire.tv_usec);
+ }
+ if (F_ISSET(lip, DB_LOCKER_TIMEOUT))
+ fprintf(fp, " lk timeout %u", lip->lk_timeout);
+ if (LOCK_TIME_ISVALID(&lip->lk_expire)) {
+ s = lip->lk_expire.tv_sec;
+ strftime(buf, sizeof(buf), "%m-%d-%H:%M:%S", localtime(&s));
+ fprintf(fp,
+ " lk expires %s.%lu", buf, (u_long)lip->lk_expire.tv_usec);
+ }
+ fprintf(fp, "\n");
- if ((lp = SH_LIST_FIRST(&lip->heldby, __db_lock)) == NULL)
- fprintf(fp, "\n");
- else
+ lp = SH_LIST_FIRST(&lip->heldby, __db_lock);
+ if (lp != NULL) {
for (; lp != NULL;
lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
__lock_printlock(lt, lp, 1);
+ fprintf(fp, "\n");
+ }
}
static void
@@ -259,50 +266,133 @@ __lock_dump_object(lt, op, fp)
FILE *fp;
{
struct __db_lock *lp;
- u_int32_t j;
- u_int8_t *ptr;
- u_int ch;
-
- ptr = SH_DBT_PTR(&op->lockobj);
- for (j = 0; j < op->lockobj.size; ptr++, j++) {
- ch = *ptr;
- fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch);
- }
- fprintf(fp, "\n");
- fprintf(fp, "H:");
for (lp =
SH_TAILQ_FIRST(&op->holders, __db_lock);
lp != NULL;
lp = SH_TAILQ_NEXT(lp, links, __db_lock))
__lock_printlock(lt, lp, 1);
- lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
- if (lp != NULL) {
- fprintf(fp, "\nW:");
- for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock))
- __lock_printlock(lt, lp, 1);
- }
+ for (lp =
+ SH_TAILQ_FIRST(&op->waiters, __db_lock);
+ lp != NULL;
+ lp = SH_TAILQ_NEXT(lp, links, __db_lock))
+ __lock_printlock(lt, lp, 1);
+
+ fprintf(fp, "\n");
}
-static const char *
-__lock_dump_status(status)
- db_status_t status;
+/*
+ * __lock_printheader --
+ */
+static void
+__lock_printheader()
{
- switch (status) {
+ printf("%-8s %-6s %-6s %-10s %s\n",
+ "Locker", "Mode",
+ "Count", "Status", "----------- Object ----------");
+}
+
+/*
+ * __lock_printlock --
+ *
+ * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int));
+ */
+void
+__lock_printlock(lt, lp, ispgno)
+ DB_LOCKTAB *lt;
+ struct __db_lock *lp;
+ int ispgno;
+{
+ DB_LOCKOBJ *lockobj;
+ db_pgno_t pgno;
+ u_int32_t *fidp, type;
+ u_int8_t *ptr;
+ char *namep;
+ const char *mode, *status;
+
+ switch (lp->mode) {
+ case DB_LOCK_DIRTY:
+ mode = "DIRTY_READ";
+ break;
+ case DB_LOCK_IREAD:
+ mode = "IREAD";
+ break;
+ case DB_LOCK_IWR:
+ mode = "IWR";
+ break;
+ case DB_LOCK_IWRITE:
+ mode = "IWRITE";
+ break;
+ case DB_LOCK_NG:
+ mode = "NG";
+ break;
+ case DB_LOCK_READ:
+ mode = "READ";
+ break;
+ case DB_LOCK_WRITE:
+ mode = "WRITE";
+ break;
+ case DB_LOCK_WWRITE:
+ mode = "WAS_WRITE";
+ break;
+ case DB_LOCK_WAIT:
+ mode = "WAIT";
+ break;
+ default:
+ mode = "UNKNOWN";
+ break;
+ }
+ switch (lp->status) {
case DB_LSTAT_ABORTED:
- return ("aborted");
+ status = "ABORT";
+ break;
case DB_LSTAT_ERR:
- return ("err");
+ status = "ERROR";
+ break;
case DB_LSTAT_FREE:
- return ("free");
+ status = "FREE";
+ break;
case DB_LSTAT_HELD:
- return ("held");
- case DB_LSTAT_NOGRANT:
- return ("nogrant");
- case DB_LSTAT_PENDING:
- return ("pending");
+ status = "HELD";
+ break;
case DB_LSTAT_WAITING:
- return ("waiting");
+ status = "WAIT";
+ break;
+ case DB_LSTAT_PENDING:
+ status = "PENDING";
+ break;
+ case DB_LSTAT_EXPIRED:
+ status = "EXPIRED";
+ break;
+ default:
+ status = "UNKNOWN";
+ break;
+ }
+ printf("%8lx %-6s %6lu %-10s ",
+ (u_long)lp->holder, mode, (u_long)lp->refcount, status);
+
+ lockobj = (DB_LOCKOBJ *)((u_int8_t *)lp + lp->obj);
+ ptr = SH_DBT_PTR(&lockobj->lockobj);
+ if (ispgno && lockobj->lockobj.size == sizeof(struct __db_ilock)) {
+ /* Assume this is a DBT lock. */
+ memcpy(&pgno, ptr, sizeof(db_pgno_t));
+ fidp = (u_int32_t *)(ptr + sizeof(db_pgno_t));
+ type = *(u_int32_t *)(ptr + sizeof(db_pgno_t) + DB_FILE_ID_LEN);
+ if (__dbreg_get_name(lt->dbenv, (u_int8_t *)fidp, &namep) != 0)
+ namep = NULL;
+ if (namep == NULL)
+ printf("(%lx %lx %lx %lx %lx)",
+ (u_long)fidp[0], (u_long)fidp[1], (u_long)fidp[2],
+ (u_long)fidp[3], (u_long)fidp[4]);
+ else
+ printf("%-20s", namep);
+ printf("%-7s %lu\n",
+ type == DB_PAGE_LOCK ? "page" :
+ type == DB_RECORD_LOCK ? "record" : "handle",
+ (u_long)pgno);
+ } else {
+ printf("0x%lx ", (u_long)R_OFFSET(&lt->reginfo, lockobj));
+ __db_pr(ptr, lockobj->lockobj.size, stdout);
+ printf("\n");
}
- return ("unknown status");
}
diff --git a/bdb/lock/lock_util.c b/bdb/lock/lock_util.c
index fd5c6ad90cb..260f021b1ee 100644
--- a/bdb/lock/lock_util.c
+++ b/bdb/lock/lock_util.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: lock_util.c,v 11.5 2000/07/04 18:28:24 bostic Exp $";
+static const char revid[] = "$Id: lock_util.c,v 11.8 2002/03/27 04:32:20 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,10 +18,10 @@ static const char revid[] = "$Id: lock_util.c,v 11.5 2000/07/04 18:28:24 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "hash.h"
-#include "lock.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
/*
* __lock_cmp --
diff --git a/bdb/log/log.c b/bdb/log/log.c
index 8ddb7bcaf7d..f57caeccb95 100644
--- a/bdb/log/log.c
+++ b/bdb/log/log.c
@@ -1,40 +1,34 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log.c,v 11.42 2001/01/15 16:42:37 bostic Exp $";
+static const char revid[] = "$Id: log.c,v 11.111 2002/08/16 00:27:44 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "log.h"
-#include "db_dispatch.h"
-#include "txn.h"
-#include "txn_auto.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
static int __log_init __P((DB_ENV *, DB_LOG *));
static int __log_recover __P((DB_LOG *));
+static size_t __log_region_size __P((DB_ENV *));
+static int __log_zero __P((DB_ENV *, DB_LSN *, DB_LSN *));
/*
* __log_open --
@@ -49,16 +43,10 @@ __log_open(dbenv)
DB_LOG *dblp;
LOG *lp;
int ret;
- u_int8_t *readbufp;
-
- readbufp = NULL;
/* Create/initialize the DB_LOG structure. */
if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOG), &dblp)) != 0)
return (ret);
- if ((ret = __os_calloc(dbenv, 1, dbenv->lg_bsize, &readbufp)) != 0)
- goto err;
- ZERO_LSN(dblp->c_lsn);
dblp->dbenv = dbenv;
/* Join/create the log region. */
@@ -69,40 +57,66 @@ __log_open(dbenv)
if (F_ISSET(dbenv, DB_ENV_CREATE))
F_SET(&dblp->reginfo, REGION_CREATE_OK);
if ((ret = __db_r_attach(
- dbenv, &dblp->reginfo, LG_BASE_REGION_SIZE + dbenv->lg_bsize)) != 0)
+ dbenv, &dblp->reginfo, __log_region_size(dbenv))) != 0)
goto err;
- dblp->readbufp = readbufp;
-
/* If we created the region, initialize it. */
- if (F_ISSET(&dblp->reginfo, REGION_CREATE) &&
- (ret = __log_init(dbenv, dblp)) != 0)
- goto err;
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE))
+ if ((ret = __log_init(dbenv, dblp)) != 0)
+ goto err;
/* Set the local addresses. */
lp = dblp->reginfo.primary =
R_ADDR(&dblp->reginfo, dblp->reginfo.rp->primary);
- dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
/*
* If the region is threaded, then we have to lock both the handles
* and the region, and we need to allocate a mutex for that purpose.
*/
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if ((ret = __db_mutex_alloc(
- dbenv, &dblp->reginfo, &dblp->mutexp)) != 0)
- goto err;
- if ((ret = __db_mutex_init(
- dbenv, dblp->mutexp, 0, MUTEX_THREAD)) != 0)
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &dblp->reginfo, &dblp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK)) != 0)
+ goto err;
+
+ /* Initialize the rest of the structure. */
+ dblp->bufp = R_ADDR(&dblp->reginfo, lp->buffer_off);
+
+ /*
+ * Set the handle -- we may be about to run recovery, which allocates
+ * log cursors. Log cursors require logging be already configured,
+ * and the handle being set is what demonstrates that.
+ *
+ * If we created the region, run recovery. If that fails, make sure
+ * we reset the log handle before cleaning up, otherwise we will try
+ * and clean up again in the mainline DB_ENV initialization code.
+ */
+ dbenv->lg_handle = dblp;
+
+ if (F_ISSET(&dblp->reginfo, REGION_CREATE)) {
+ if ((ret = __log_recover(dblp)) != 0) {
+ dbenv->lg_handle = NULL;
goto err;
+ }
+
+ /*
+ * We first take the log file size from the environment, if
+ * specified. If that wasn't set, recovery may have set it
+ * from the persistent information in a log file header. If
+ * that didn't set it either, we default.
+ */
+ if (lp->log_size == 0)
+ lp->log_size = lp->log_nsize = LG_MAX_DEFAULT;
+ } else {
+ /*
+ * A process joining the region may have reset the log file
+ * size, too. If so, it only affects the next log file we
+ * create.
+ */
+ if (dbenv->lg_size != 0)
+ lp->log_nsize = dbenv->lg_size;
}
R_UNLOCK(dbenv, &dblp->reginfo);
-
- dblp->r_file = 0;
- dblp->r_off = 0;
- dblp->r_size = 0;
- dbenv->lg_handle = dblp;
return (0);
err: if (dblp->reginfo.addr != NULL) {
@@ -112,11 +126,11 @@ err: if (dblp->reginfo.addr != NULL) {
(void)__db_r_detach(dbenv, &dblp->reginfo, 0);
}
- if (readbufp != NULL)
- __os_free(readbufp, dbenv->lg_bsize);
if (dblp->mutexp != NULL)
__db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
- __os_free(dblp, sizeof(*dblp));
+
+ __os_free(dbenv, dblp);
+
return (ret);
}
@@ -129,9 +143,13 @@ __log_init(dbenv, dblp)
DB_ENV *dbenv;
DB_LOG *dblp;
{
+ DB_MUTEX *flush_mutexp;
LOG *region;
int ret;
void *p;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
if ((ret = __db_shalloc(dblp->reginfo.addr,
sizeof(*region), 0, &dblp->reginfo.primary)) != 0)
@@ -141,15 +159,55 @@ __log_init(dbenv, dblp)
region = dblp->reginfo.primary;
memset(region, 0, sizeof(*region));
- region->persist.lg_max = dbenv->lg_max;
- region->persist.magic = DB_LOGMAGIC;
- region->persist.version = DB_LOGVERSION;
- region->persist.mode = dbenv->db_mode;
+ region->fid_max = 0;
SH_TAILQ_INIT(&region->fq);
+ region->free_fid_stack = INVALID_ROFF;
+ region->free_fids = region->free_fids_alloced = 0;
/* Initialize LOG LSNs. */
- region->lsn.file = 1;
- region->lsn.offset = 0;
+ INIT_LSN(region->lsn);
+ INIT_LSN(region->ready_lsn);
+ INIT_LSN(region->t_lsn);
+
+ /*
+ * It's possible to be waiting for an LSN of [1][0], if a replication
+ * client gets the first log record out of order. An LSN of [0][0]
+ * signifies that we're not waiting.
+ */
+ ZERO_LSN(region->waiting_lsn);
+
+ /*
+ * Log makes note of the fact that it ran into a checkpoint on
+ * startup if it did so, as a recovery optimization. A zero
+ * LSN signifies that it hasn't found one [yet].
+ */
+ ZERO_LSN(region->cached_ckp_lsn);
+
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the log maintenance info and initialize it. */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(REGMAINT) + LG_MAINT_SIZE, 0, &addr)) != 0)
+ goto mem_err;
+ __db_maintinit(&dblp->reginfo, addr, LG_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&dblp->reginfo, addr);
+#endif
+
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, &region->fq_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+
+ /*
+ * We must create a place for the flush mutex separately; mutexes have
+ * to be aligned to MUTEX_ALIGN, and the only way to guarantee that is
+ * to make sure they're at the beginning of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(dblp->reginfo.addr,
+ sizeof(DB_MUTEX), MUTEX_ALIGN, &flush_mutexp)) != 0)
+ goto mem_err;
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo, flush_mutexp,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ region->flush_mutex_off = R_OFFSET(&dblp->reginfo, flush_mutexp);
/* Initialize the buffer. */
if ((ret =
@@ -159,9 +217,23 @@ mem_err: __db_err(dbenv, "Unable to allocate memory for the log buffer");
}
region->buffer_size = dbenv->lg_bsize;
region->buffer_off = R_OFFSET(&dblp->reginfo, p);
+ region->log_size = region->log_nsize = dbenv->lg_size;
- /* Try and recover any previous log files before releasing the lock. */
- return (__log_recover(dblp));
+ /* Initialize the commit Queue. */
+ SH_TAILQ_INIT(&region->free_commits);
+ SH_TAILQ_INIT(&region->commits);
+ region->ncommit = 0;
+
+ /*
+ * Fill in the log's persistent header. Don't fill in the log file
+ * sizes, as they may change at any time and so have to be filled in
+ * as each log file is created.
+ */
+ region->persist.magic = DB_LOGMAGIC;
+ region->persist.version = DB_LOGVERSION;
+ region->persist.mode = (u_int32_t)dbenv->db_mode;
+
+ return (0);
}
/*
@@ -173,12 +245,16 @@ __log_recover(dblp)
DB_LOG *dblp;
{
DBT dbt;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
DB_LSN lsn;
LOG *lp;
- int cnt, found_checkpoint, ret;
- u_int32_t chk;
+ u_int32_t cnt, rectype;
+ int ret;
logfile_validity status;
+ logc = NULL;
+ dbenv = dblp->dbenv;
lp = dblp->reginfo.primary;
/*
@@ -192,8 +268,9 @@ __log_recover(dblp)
/*
* If the last file is an old version, readable or no, start a new
- * file. Don't bother finding checkpoints; if we didn't take a
- * checkpoint right before upgrading, the user screwed up anyway.
+ * file. Don't bother finding the end of the last log file;
+ * we assume that it's valid in its entirety, since the user
+ * should have shut down cleanly or run recovery before upgrading.
*/
if (status == DB_LV_OLD_READABLE || status == DB_LV_OLD_UNREADABLE) {
lp->lsn.file = lp->s_lsn.file = cnt + 1;
@@ -213,25 +290,35 @@ __log_recover(dblp)
lsn.file = cnt;
lsn.offset = 0;
- /* Set the cursor. Shouldn't fail; leave error messages on. */
- memset(&dbt, 0, sizeof(dbt));
- if ((ret = __log_get(dblp, &lsn, &dbt, DB_SET, 0)) != 0)
+ /*
+ * Allocate a cursor and set it to the first record. This shouldn't
+ * fail, leave error messages on.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
return (ret);
+ F_SET(logc, DB_LOG_LOCKED);
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_SET)) != 0)
+ goto err;
/*
- * Read to the end of the file, saving checkpoints. This will fail
- * at some point, so turn off error messages.
+ * Read to the end of the file. This may fail at some point, so
+ * turn off error messages.
*/
- found_checkpoint = 0;
- while (__log_get(dblp, &lsn, &dbt, DB_NEXT, 1) == 0) {
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ while (logc->get(logc, &lsn, &dbt, DB_NEXT) == 0) {
if (dbt.size < sizeof(u_int32_t))
continue;
- memcpy(&chk, dbt.data, sizeof(u_int32_t));
- if (chk == DB_txn_ckp) {
- lp->chkpt_lsn = lsn;
- found_checkpoint = 1;
- }
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp)
+ /*
+ * If we happen to run into a checkpoint, cache its
+ * LSN so that the transaction system doesn't have
+ * to walk this log file again looking for it.
+ */
+ lp->cached_ckp_lsn = lsn;
}
+ F_CLR(logc, DB_LOG_SILENT_ERR);
/*
* We now know where the end of the log is. Set the first LSN that
@@ -240,59 +327,24 @@ __log_recover(dblp)
*/
lp->lsn = lsn;
lp->s_lsn = lsn;
- lp->lsn.offset += dblp->c_len;
- lp->s_lsn.offset += dblp->c_len;
+ lp->lsn.offset += logc->c_len;
+ lp->s_lsn.offset += logc->c_len;
/* Set up the current buffer information, too. */
- lp->len = dblp->c_len;
+ lp->len = logc->c_len;
lp->b_off = 0;
lp->w_off = lp->lsn.offset;
- /*
- * It's possible that we didn't find a checkpoint because there wasn't
- * one in the last log file. Start searching.
- */
- if (!found_checkpoint && cnt > 1) {
- lsn.file = cnt;
- lsn.offset = 0;
-
- /* Set the cursor. Shouldn't fail, leave error messages on. */
- if ((ret = __log_get(dblp, &lsn, &dbt, DB_SET, 0)) != 0)
- return (ret);
-
- /*
- * Read to the end of the file, saving checkpoints. Again,
- * this can fail if there are no checkpoints in any log file,
- * so turn error messages off.
- */
- while (__log_get(dblp, &lsn, &dbt, DB_PREV, 1) == 0) {
- if (dbt.size < sizeof(u_int32_t))
- continue;
- memcpy(&chk, dbt.data, sizeof(u_int32_t));
- if (chk == DB_txn_ckp) {
- lp->chkpt_lsn = lsn;
- found_checkpoint = 1;
- break;
- }
- }
- }
-
- /* If we never find a checkpoint, that's okay, just 0 it out. */
- if (!found_checkpoint)
-skipsearch: ZERO_LSN(lp->chkpt_lsn);
-
- /*
- * Reset the cursor lsn to the beginning of the log, so that an
- * initial call to DB_NEXT does the right thing.
- */
- ZERO_LSN(dblp->c_lsn);
-
- if (FLD_ISSET(dblp->dbenv->verbose, DB_VERB_RECOVERY))
- __db_err(dblp->dbenv,
+skipsearch:
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_RECOVERY))
+ __db_err(dbenv,
"Finding last valid log LSN: file: %lu offset %lu",
(u_long)lp->lsn.file, (u_long)lp->lsn.offset);
- return (0);
+err: if (logc != NULL)
+ (void)logc->close(logc, 0);
+
+ return (ret);
}
/*
@@ -301,20 +353,23 @@ skipsearch: ZERO_LSN(lp->chkpt_lsn);
* the number of the first readable log file, else it will contain the number
* of the last log file (which may be too old to read).
*
- * PUBLIC: int __log_find __P((DB_LOG *, int, int *, logfile_validity *));
+ * PUBLIC: int __log_find __P((DB_LOG *, int, u_int32_t *, logfile_validity *));
*/
int
__log_find(dblp, find_first, valp, statusp)
DB_LOG *dblp;
- int find_first, *valp;
+ int find_first;
+ u_int32_t *valp;
logfile_validity *statusp;
{
+ DB_ENV *dbenv;
logfile_validity logval_status, status;
u_int32_t clv, logval;
int cnt, fcnt, ret;
const char *dir;
- char **names, *p, *q, savech;
+ char *c, **names, *p, *q, savech;
+ dbenv = dblp->dbenv;
logval_status = status = DB_LV_NONEXISTENT;
/* Return a value of 0 as the log file number on failure. */
@@ -333,7 +388,7 @@ __log_find(dblp, find_first, valp, statusp)
}
/* Get the list of file names. */
- ret = __os_dirlist(dblp->dbenv, dir, &names, &fcnt);
+ ret = __os_dirlist(dbenv, dir, &names, &fcnt);
/*
* !!!
@@ -345,8 +400,8 @@ __log_find(dblp, find_first, valp, statusp)
*q = savech;
if (ret != 0) {
- __db_err(dblp->dbenv, "%s: %s", dir, db_strerror(ret));
- __os_freestr(p);
+ __db_err(dbenv, "%s: %s", dir, db_strerror(ret));
+ __os_free(dbenv, p);
return (ret);
}
@@ -356,74 +411,92 @@ __log_find(dblp, find_first, valp, statusp)
continue;
/*
+ * Names of the form log\.[0-9]* are reserved for DB. Other
+ * names sharing LFPREFIX, such as "log.db", are legal.
+ */
+ for (c = names[cnt] + sizeof(LFPREFIX) - 1; *c != '\0'; c++)
+ if (!isdigit((int)*c))
+ break;
+ if (*c != '\0')
+ continue;
+
+ /*
* Use atol, not atoi; if an "int" is 16-bits, the largest
* log file name won't fit.
*/
clv = atol(names[cnt] + (sizeof(LFPREFIX) - 1));
+
+ /*
+ * If searching for the first log file, we want to return the
+ * oldest log file we can read, or, if no readable log files
+ * exist, the newest log file we can't read (the crossover
+ * point between the old and new versions of the log file).
+ *
+ * If we're searching for the last log file, we want to return
+ * the newest log file, period.
+ *
+ * Readable log files should never preceede unreadable log
+ * files, that would mean the admin seriously screwed up.
+ */
if (find_first) {
- if (logval != 0 && clv > logval)
+ if (logval != 0 &&
+ status != DB_LV_OLD_UNREADABLE && clv > logval)
continue;
} else
if (logval != 0 && clv < logval)
continue;
- /*
- * Take note of whether the log file logval is
- * an old version or incompletely initialized.
- */
- if ((ret = __log_valid(dblp, clv, 1, &status)) != 0)
+ if ((ret = __log_valid(dblp, clv, 1, &status)) != 0) {
+ __db_err(dbenv, "Invalid log file: %s: %s",
+ names[cnt], db_strerror(ret));
goto err;
+ }
switch (status) {
+ case DB_LV_NONEXISTENT:
+ /* __log_valid never returns DB_LV_NONEXISTENT. */
+ DB_ASSERT(0);
+ break;
case DB_LV_INCOMPLETE:
/*
- * It's acceptable for the last log file to
- * have been incompletely initialized--it's possible
- * to create a log file but not write anything to it,
- * and recovery needs to gracefully handle this.
- *
- * Just ignore it; we don't want to return this
- * as a valid log file.
+ * The last log file may not have been initialized --
+ * it's possible to create a log file but not write
+ * anything to it. If performing recovery (that is,
+ * if find_first isn't set), ignore the file, it's
+ * not interesting. If we're searching for the first
+ * log record, return the file (assuming we don't find
+ * something better), as the "real" first log record
+ * is likely to be in the log buffer, and we want to
+ * set the file LSN for our return.
*/
+ if (find_first)
+ goto found;
break;
- case DB_LV_NONEXISTENT:
- /* Should never happen. */
- DB_ASSERT(0);
+ case DB_LV_OLD_UNREADABLE:
+ /*
+ * If we're searching for the first log file, then we
+ * only want this file if we don't yet have a file or
+ * already have an unreadable file and this one is
+ * newer than that one. If we're searching for the
+ * last log file, we always want this file because we
+ * wouldn't be here if it wasn't newer than our current
+ * choice.
+ */
+ if (!find_first || logval == 0 ||
+ (status == DB_LV_OLD_UNREADABLE && clv > logval))
+ goto found;
break;
case DB_LV_NORMAL:
case DB_LV_OLD_READABLE:
- logval = clv;
+found: logval = clv;
logval_status = status;
break;
- case DB_LV_OLD_UNREADABLE:
- /*
- * Continue; we want the oldest valid log,
- * and clv is too old to be useful. We don't
- * want it to supplant logval if we're looking for
- * the oldest valid log, but we do want to return
- * it if it's the last log file--we want the very
- * last file number, so that our caller can
- * start a new file after it.
- *
- * The code here assumes that there will never
- * be a too-old log that's preceded by a log
- * of the current version, but in order to
- * attain that state of affairs the user
- * would have had to really seriously screw
- * up; I think we can safely assume this won't
- * happen.
- */
- if (!find_first) {
- logval = clv;
- logval_status = status;
- }
- break;
}
}
*valp = logval;
-err: __os_dirfree(names, fcnt);
- __os_freestr(p);
+err: __os_dirfree(dbenv, names, fcnt);
+ __os_free(dbenv, p);
*statusp = logval_status;
return (ret);
@@ -446,30 +519,48 @@ __log_valid(dblp, number, set_persist, statusp)
int set_persist;
logfile_validity *statusp;
{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
DB_FH fh;
+ HDR *hdr;
LOG *region;
- LOGP persist;
- char *fname;
- int ret;
+ LOGP *persist;
logfile_validity status;
- size_t nw;
+ size_t hdrsize, nw, recsize;
+ int is_hmac, need_free, ret;
+ u_int8_t *tmp;
+ char *fname;
+ dbenv = dblp->dbenv;
+ db_cipher = dbenv->crypto_handle;
+ persist = NULL;
status = DB_LV_NORMAL;
/* Try to open the log file. */
if ((ret = __log_name(dblp,
number, &fname, &fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
- __os_freestr(fname);
+ __os_free(dbenv, fname);
return (ret);
}
+ need_free = 0;
+ hdrsize = HDR_NORMAL_SZ;
+ is_hmac = 0;
+ recsize = sizeof(LOGP);
+ if (CRYPTO_ON(dbenv)) {
+ hdrsize = HDR_CRYPTO_SZ;
+ recsize = sizeof(LOGP);
+ recsize += db_cipher->adj_size(recsize);
+ is_hmac = 1;
+ }
+ if ((ret = __os_calloc(dbenv, 1, recsize + hdrsize, &tmp)) != 0)
+ return (ret);
+ need_free = 1;
+ hdr = (HDR *)tmp;
+ persist = (LOGP *)(tmp + hdrsize);
/* Try to read the header. */
- if ((ret =
- __os_seek(dblp->dbenv,
- &fh, 0, 0, sizeof(HDR), 0, DB_OS_SEEK_SET)) != 0 ||
- (ret =
- __os_read(dblp->dbenv, &fh, &persist, sizeof(LOGP), &nw)) != 0 ||
- nw != sizeof(LOGP)) {
+ if ((ret = __os_read(dbenv, &fh, tmp, recsize + hdrsize, &nw)) != 0 ||
+ nw != recsize + hdrsize) {
if (ret == 0)
status = DB_LV_INCOMPLETE;
else
@@ -477,19 +568,63 @@ __log_valid(dblp, number, set_persist, statusp)
* The error was a fatal read error, not just an
* incompletely initialized log file.
*/
- __db_err(dblp->dbenv, "Ignoring log file: %s: %s",
+ __db_err(dbenv, "Ignoring log file: %s: %s",
fname, db_strerror(ret));
- (void)__os_closehandle(&fh);
+ (void)__os_closehandle(dbenv, &fh);
goto err;
}
- (void)__os_closehandle(&fh);
+ (void)__os_closehandle(dbenv, &fh);
+
+ /*
+ * Now we have to validate the persistent record. We have
+ * several scenarios we have to deal with:
+ *
+ * 1. User has crypto turned on:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading a current, unencrypted log file
+ * . We will fail the record size match check below.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . After decryption we'll fail the version check. [NOT YET]
+ * - They're reading a current, encrypted log file
+ * . We should proceed as usual.
+ * 2. User has crypto turned off:
+ * - They're reading an old, unencrypted log file
+ * . We will fail the version check.
+ * - They're reading a current, unencrypted log file
+ * . We should proceed as usual.
+ * - They're reading an old, encrypted log file [NOT YET]
+ * . We'll fail the magic number check (it is encrypted).
+ * - They're reading a current, encrypted log file
+ * . We'll fail the magic number check (it is encrypted).
+ */
+ if (CRYPTO_ON(dbenv)) {
+ /*
+ * If we are trying to decrypt an unencrypted log
+ * we can only detect that by having an unreasonable
+ * data length for our persistent data.
+ */
+ if ((hdr->len - hdrsize) != sizeof(LOGP)) {
+ __db_err(dbenv, "log record size mismatch");
+ goto err;
+ }
+ /* Check the checksum and decrypt. */
+ if ((ret = __db_check_chksum(dbenv, db_cipher, &hdr->chksum[0],
+ (u_int8_t *)persist, hdr->len - hdrsize, is_hmac)) != 0) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], (u_int8_t *)persist, hdr->len - hdrsize)) != 0)
+ goto err;
+ }
/* Validate the header. */
- if (persist.magic != DB_LOGMAGIC) {
- __db_err(dblp->dbenv,
+ if (persist->magic != DB_LOGMAGIC) {
+ __db_err(dbenv,
"Ignoring log file: %s: magic number %lx, not %lx",
- fname, (u_long)persist.magic, (u_long)DB_LOGMAGIC);
+ fname, (u_long)persist->magic, (u_long)DB_LOGMAGIC);
ret = EINVAL;
goto err;
}
@@ -499,135 +634,162 @@ __log_valid(dblp, number, set_persist, statusp)
* belongs to an unreadable or readable old version; leave it
* alone if and only if the log file version is the current one.
*/
- if (persist.version > DB_LOGVERSION) {
+ if (persist->version > DB_LOGVERSION) {
/* This is a fatal error--the log file is newer than DB. */
- __db_err(dblp->dbenv,
+ __db_err(dbenv,
"Ignoring log file: %s: unsupported log version %lu",
- fname, (u_long)persist.version);
+ fname, (u_long)persist->version);
ret = EINVAL;
goto err;
- } else if (persist.version < DB_LOGOLDVER) {
+ } else if (persist->version < DB_LOGOLDVER) {
status = DB_LV_OLD_UNREADABLE;
/*
* We don't want to set persistent info based on an
* unreadable region, so jump to "err".
*/
goto err;
- } else if (persist.version < DB_LOGVERSION)
+ } else if (persist->version < DB_LOGVERSION)
status = DB_LV_OLD_READABLE;
/*
- * If the log is thus far readable and we're doing system
- * initialization, set the region's persistent information
- * based on the headers.
+ * Only if we have a current log do we verify the checksum.
+ * We could not check the checksum before checking the magic
+ * and version because old log hdrs have the length and checksum
+ * in a different location.
+ */
+ if (!CRYPTO_ON(dbenv) && ((ret = __db_check_chksum(dbenv,
+ db_cipher, &hdr->chksum[0], (u_int8_t *)persist,
+ hdr->len - hdrsize, is_hmac)) != 0)) {
+ __db_err(dbenv, "log record checksum mismatch");
+ goto err;
+ }
+
+ /*
+ * If the log is readable so far and we're doing system initialization,
+ * set the region's persistent information based on the headers.
+ *
+ * Always set the current log file size. Only set the next log file's
+ * size if the application hasn't set it already.
+ *
+ * XXX
+ * Always use the persistent header's mode, regardless of what was set
+ * in the current environment. We've always done it this way, but it's
+ * probably a bug -- I can't think of a way not-changing the mode would
+ * be a problem, though.
*/
if (set_persist) {
region = dblp->reginfo.primary;
- region->persist.lg_max = persist.lg_max;
- region->persist.mode = persist.mode;
+ region->log_size = persist->log_size;
+ if (region->log_nsize == 0)
+ region->log_nsize = persist->log_size;
+ region->persist.mode = persist->mode;
}
-err: __os_freestr(fname);
+err: __os_free(dbenv, fname);
+ if (need_free)
+ __os_free(dbenv, tmp);
*statusp = status;
return (ret);
}
/*
- * __log_close --
- * Internal version of log_close: only called from dbenv_refresh.
+ * __log_dbenv_refresh --
+ * Clean up after the log system on a close or failed open. Called only
+ * from __dbenv_refresh. (Formerly called __log_close.)
*
- * PUBLIC: int __log_close __P((DB_ENV *));
+ * PUBLIC: int __log_dbenv_refresh __P((DB_ENV *));
*/
int
-__log_close(dbenv)
+__log_dbenv_refresh(dbenv)
DB_ENV *dbenv;
{
DB_LOG *dblp;
int ret, t_ret;
- ret = 0;
dblp = dbenv->lg_handle;
/* We may have opened files as part of XA; if so, close them. */
F_SET(dblp, DBLOG_RECOVER);
- __log_close_files(dbenv);
+ ret = __dbreg_close_files(dbenv);
/* Discard the per-thread lock. */
if (dblp->mutexp != NULL)
__db_mutex_free(dbenv, &dblp->reginfo, dblp->mutexp);
/* Detach from the region. */
- ret = __db_r_detach(dbenv, &dblp->reginfo, 0);
+ if ((t_ret =
+ __db_r_detach(dbenv, &dblp->reginfo, 0)) != 0 && ret == 0)
+ ret = t_ret;
/* Close open files, release allocated memory. */
if (F_ISSET(&dblp->lfh, DB_FH_VALID) &&
- (t_ret = __os_closehandle(&dblp->lfh)) != 0 && ret == 0)
- ret = t_ret;
- if (dblp->c_dbt.data != NULL)
- __os_free(dblp->c_dbt.data, dblp->c_dbt.ulen);
- if (F_ISSET(&dblp->c_fh, DB_FH_VALID) &&
- (t_ret = __os_closehandle(&dblp->c_fh)) != 0 && ret == 0)
+ (t_ret = __os_closehandle(dbenv, &dblp->lfh)) != 0 && ret == 0)
ret = t_ret;
if (dblp->dbentry != NULL)
- __os_free(dblp->dbentry,
- (dblp->dbentry_cnt * sizeof(DB_ENTRY)));
- if (dblp->readbufp != NULL)
- __os_free(dblp->readbufp, dbenv->lg_bsize);
+ __os_free(dbenv, dblp->dbentry);
- __os_free(dblp, sizeof(*dblp));
+ __os_free(dbenv, dblp);
dbenv->lg_handle = NULL;
return (ret);
}
/*
- * log_stat --
- * Return LOG statistics.
+ * __log_stat --
+ * Return log statistics.
+ *
+ * PUBLIC: int __log_stat __P((DB_ENV *, DB_LOG_STAT **, u_int32_t));
*/
int
-log_stat(dbenv, statp, db_malloc)
+__log_stat(dbenv, statp, flags)
DB_ENV *dbenv;
DB_LOG_STAT **statp;
- void *(*db_malloc) __P((size_t));
+ u_int32_t flags;
{
DB_LOG *dblp;
DB_LOG_STAT *stats;
LOG *region;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_stat(dbenv, statp, db_malloc));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_stat", DB_INIT_LOG);
*statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->log_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
dblp = dbenv->lg_handle;
region = dblp->reginfo.primary;
- if ((ret = __os_malloc(dbenv,
- sizeof(DB_LOG_STAT), db_malloc, &stats)) != 0)
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_LOG_STAT), &stats)) != 0)
return (ret);
/* Copy out the global statistics. */
R_LOCK(dbenv, &dblp->reginfo);
*stats = region->stat;
+ if (LF_ISSET(DB_STAT_CLEAR))
+ memset(&region->stat, 0, sizeof(region->stat));
stats->st_magic = region->persist.magic;
stats->st_version = region->persist.version;
stats->st_mode = region->persist.mode;
stats->st_lg_bsize = region->buffer_size;
- stats->st_lg_max = region->persist.lg_max;
+ stats->st_lg_size = region->log_nsize;
stats->st_region_wait = dblp->reginfo.rp->mutex.mutex_set_wait;
stats->st_region_nowait = dblp->reginfo.rp->mutex.mutex_set_nowait;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dblp->reginfo.rp->mutex.mutex_set_wait = 0;
+ dblp->reginfo.rp->mutex.mutex_set_nowait = 0;
+ }
stats->st_regsize = dblp->reginfo.rp->size;
stats->st_cur_file = region->lsn.file;
stats->st_cur_offset = region->lsn.offset;
+ stats->st_disk_file = region->s_lsn.file;
+ stats->st_disk_offset = region->s_lsn.offset;
R_UNLOCK(dbenv, &dblp->reginfo);
@@ -636,22 +798,287 @@ log_stat(dbenv, statp, db_malloc)
}
/*
- * __log_lastckp --
- * Return the current chkpt_lsn, so that we can store it in
- * the transaction region and keep the chain of checkpoints
- * unbroken across environment recreates.
+ * __log_get_cached_ckp_lsn --
+ * Retrieve any last checkpoint LSN that we may have found on startup.
+ *
+ * PUBLIC: void __log_get_cached_ckp_lsn __P((DB_ENV *, DB_LSN *));
+ */
+void
+__log_get_cached_ckp_lsn(dbenv, ckp_lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *ckp_lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ *ckp_lsnp = lp->cached_ckp_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_region_size --
+ * Return the amount of space needed for the log region.
+ * Make the region large enough to hold txn_max transaction
+ * detail structures plus some space to hold thread handles
+ * and the beginning of the shalloc region and anything we
+ * need for mutex system resource recording.
+ */
+static size_t
+__log_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = dbenv->lg_regionmax + dbenv->lg_bsize;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + LG_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __log_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __log_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__log_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __log_vtruncate
+ * This is a virtual truncate. We set up the log indicators to
+ * make everyone believe that the given record is the last one in the
+ * log. Returns with the next valid LSN (i.e., the LSN of the next
+ * record to be written). This is used in replication to discard records
+ * in the log file that do not agree with the master.
+ *
+ * PUBLIC: int __log_vtruncate __P((DB_ENV *, DB_LSN *, DB_LSN *));
+ */
+int
+__log_vtruncate(dbenv, lsn, ckplsn)
+ DB_ENV *dbenv;
+ DB_LSN *lsn, *ckplsn;
+{
+ DBT log_dbt;
+ DB_FH fh;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN end_lsn;
+ LOG *lp;
+ u_int32_t bytes, c_len;
+ int fn, ret, t_ret;
+ char *fname;
+
+ /* Need to find out the length of this soon-to-be-last record. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&log_dbt, 0, sizeof(log_dbt));
+ ret = logc->get(logc, lsn, &log_dbt, DB_SET);
+ c_len = logc->c_len;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ return (ret);
+
+ /* Now do the truncate. */
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ end_lsn = lp->lsn;
+ lp->lsn = *lsn;
+ lp->len = c_len;
+ lp->lsn.offset += lp->len;
+
+ /*
+ * I am going to assume that the number of bytes written since
+ * the last checkpoint doesn't exceed a 32-bit number.
+ */
+ DB_ASSERT(lp->lsn.file >= ckplsn->file);
+ bytes = 0;
+ if (ckplsn->file != lp->lsn.file) {
+ bytes = lp->log_size - ckplsn->offset;
+ if (lp->lsn.file > ckplsn->file + 1)
+ bytes += lp->log_size *
+ (lp->lsn.file - ckplsn->file - 1);
+ bytes += lp->lsn.offset;
+ } else
+ bytes = lp->lsn.offset - ckplsn->offset;
+
+ lp->stat.st_wc_mbytes += bytes / MEGABYTE;
+ lp->stat.st_wc_bytes += bytes % MEGABYTE;
+
+ /*
+ * If the saved lsn is greater than our new end of log, reset it
+ * to our current end of log.
+ */
+ if (log_compare(&lp->s_lsn, lsn) > 0)
+ lp->s_lsn = lp->lsn;
+
+ /*
+ * If the new end of log is in the middle of the buffer,
+ * don't change the w_off or f_lsn. If the new end is
+ * before the w_off then reset w_off and f_lsn to the new
+ * end of log.
+ */
+ if (lp->w_off >= lp->lsn.offset) {
+ lp->f_lsn = lp->lsn;
+ lp->w_off = lp->lsn.offset;
+ lp->b_off = 0;
+ } else
+ lp->b_off = lp->lsn.offset - lp->w_off;
+
+ ZERO_LSN(lp->waiting_lsn);
+ lp->ready_lsn = lp->lsn;
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+
+ /* Now throw away any extra log files that we have around. */
+ for (fn = lp->lsn.file + 1;; fn++) {
+ if (__log_name(dblp, fn, &fname, &fh, DB_OSO_RDONLY) != 0) {
+ __os_free(dbenv, fname);
+ break;
+ }
+ (void)__os_closehandle(dbenv, &fh);
+ ret = __os_unlink(dbenv, fname);
+ __os_free(dbenv, fname);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Truncate the log to the new point. */
+ if ((ret = __log_zero(dbenv, &lp->lsn, &end_lsn)) != 0)
+ goto err;
+
+err: R_UNLOCK(dbenv, &dblp->reginfo);
+ return (ret);
+}
+
+/*
+ * __log_is_outdated --
+ * Used by the replication system to identify if a client's logs
+ * are too old. The log represented by dbenv is compared to the file
+ * number passed in fnum. If the log file fnum does not exist and is
+ * lower-numbered than the current logs, the we return *outdatedp non
+ * zero, else we return it 0.
*
- * PUBLIC: int __log_lastckp __P((DB_ENV *, DB_LSN *));
+ * PUBLIC: int __log_is_outdated __P((DB_ENV *dbenv,
+ * PUBLIC: u_int32_t fnum, int *outdatedp));
*/
int
-__log_lastckp(dbenv, lsnp)
+__log_is_outdated(dbenv, fnum, outdatedp)
DB_ENV *dbenv;
- DB_LSN *lsnp;
+ u_int32_t fnum;
+ int *outdatedp;
{
+ DB_LOG *dblp;
LOG *lp;
+ char *name;
+ int ret;
+ u_int32_t cfile;
- lp = (LOG *)(((DB_LOG *)dbenv->lg_handle)->reginfo.primary);
+ dblp = dbenv->lg_handle;
+ *outdatedp = 0;
+
+ if ((ret = __log_name(dblp, fnum, &name, NULL, 0)) != 0)
+ return (ret);
+
+ /* If the file exists, we're just fine. */
+ if (__os_exists(name, NULL) == 0)
+ goto out;
+
+ /*
+ * It didn't exist, decide if the file number is too big or
+ * too little. If it's too little, then we need to indicate
+ * that the LSN is outdated.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = (LOG *)dblp->reginfo.primary;
+ cfile = lp->lsn.file;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (cfile > fnum)
+ *outdatedp = 1;
+out: __os_free(dbenv, name);
+ return (ret);
+}
+
+/*
+ * __log_zero --
+ * Zero out the tail of a log after a truncate.
+ */
+static int
+__log_zero(dbenv, from_lsn, to_lsn)
+ DB_ENV *dbenv;
+ DB_LSN *from_lsn, *to_lsn;
+{
+ char *lname;
+ DB_LOG *dblp;
+ LOG *lp;
+ int ret;
+ size_t nbytes, len, nw;
+ u_int8_t buf[4096];
+ u_int32_t mbytes, bytes;
+
+ dblp = dbenv->lg_handle;
+ lp = (LOG *)dblp->reginfo.primary;
+ lname = NULL;
+
+ if (dblp->lfname != lp->lsn.file) {
+ if (F_ISSET(&dblp->lfh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &dblp->lfh);
+ dblp->lfname = lp->lsn.file;
+ }
+
+ if (from_lsn->file != to_lsn->file) {
+ /* We removed some log files; have to 0 to end of file. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) && (ret =
+ __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ return (ret);
+ if ((ret = __os_ioinfo(dbenv,
+ NULL, &dblp->lfh, &mbytes, &bytes, NULL)) != 0)
+ goto err;
+ len = mbytes * MEGABYTE + bytes - from_lsn->offset;
+ } else if (to_lsn->offset <= from_lsn->offset)
+ return (0);
+ else
+ len = to_lsn->offset = from_lsn->offset;
+
+ memset(buf, 0, sizeof(buf));
+
+ /* Initialize the write position. */
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) &&
+ (ret = __log_name(dblp, dblp->lfname, &lname, &dblp->lfh, 0)) != 0)
+ goto err;
+
+ if ((ret = __os_seek(dbenv,
+ &dblp->lfh, 0, 0, from_lsn->offset, 0, DB_OS_SEEK_SET)) != 0)
+ return (ret);
+
+ while (len > 0) {
+ nbytes = len > sizeof(buf) ? sizeof(buf) : len;
+ if ((ret =
+ __os_write(dbenv, &dblp->lfh, buf, nbytes, &nw)) != 0)
+ return (ret);
+ len -= nbytes;
+ }
+err: if (lname != NULL)
+ __os_free(dbenv, lname);
- *lsnp = lp->chkpt_lsn;
return (0);
}
diff --git a/bdb/log/log.src b/bdb/log/log.src
deleted file mode 100644
index a92fae8de26..00000000000
--- a/bdb/log/log.src
+++ /dev/null
@@ -1,46 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- *
- * $Id: log.src,v 10.12 2000/02/17 20:24:10 bostic Exp $
- */
-
-PREFIX log
-
-INCLUDE #include "db_config.h"
-INCLUDE
-INCLUDE #ifndef NO_SYSTEM_INCLUDES
-INCLUDE #include <sys/types.h>
-INCLUDE
-INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
-INCLUDE #include <string.h>
-INCLUDE #endif
-INCLUDE
-INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "log.h"
-INCLUDE #include "txn.h"
-INCLUDE
-
-/* Used for registering name/id translations at open or close. */
-DEPRECATED register1 1
-ARG opcode u_int32_t lu
-DBT name DBT s
-DBT uid DBT s
-ARG fileid int32_t ld
-ARG ftype DBTYPE lx
-END
-
-BEGIN register 2
-ARG opcode u_int32_t lu
-DBT name DBT s
-DBT uid DBT s
-ARG fileid int32_t ld
-ARG ftype DBTYPE lx
-ARG meta_pgno db_pgno_t lu
-END
diff --git a/bdb/log/log_archive.c b/bdb/log/log_archive.c
index 83728c79e55..19e1af5a93e 100644
--- a/bdb/log/log_archive.c
+++ b/bdb/log/log_archive.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log_archive.c,v 11.13 2000/11/30 00:58:40 ubell Exp $";
+static const char revid[] = "$Id: log_archive.c,v 11.39 2002/08/06 05:00:31 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,50 +19,41 @@ static const char revid[] = "$Id: log_archive.c,v 11.13 2000/11/30 00:58:40 ubel
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_dispatch.h"
-#include "log.h"
-#include "clib_ext.h" /* XXX: needed for getcwd. */
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
static int __absname __P((DB_ENV *, char *, char *, char **));
-static int __build_data __P((DB_ENV *, char *, char ***, void *(*)(size_t)));
+static int __build_data __P((DB_ENV *, char *, char ***));
static int __cmpfunc __P((const void *, const void *));
-static int __usermem __P((DB_ENV *, char ***, void *(*)(size_t)));
+static int __usermem __P((DB_ENV *, char ***));
/*
- * log_archive --
+ * __log_archive --
* Supporting function for db_archive(1).
+ *
+ * PUBLIC: int __log_archive __P((DB_ENV *, char **[], u_int32_t));
*/
int
-log_archive(dbenv, listp, flags, db_malloc)
+__log_archive(dbenv, listp, flags)
DB_ENV *dbenv;
char ***listp;
u_int32_t flags;
- void *(*db_malloc) __P((size_t));
{
DBT rec;
DB_LOG *dblp;
+ DB_LOGC *logc;
DB_LSN stable_lsn;
- u_int32_t fnum;
- int array_size, n, ret;
+ __txn_ckp_args *ckp_args;
char **array, **arrayp, *name, *p, *pref, buf[MAXPATHLEN];
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_archive(dbenv, listp, flags, db_malloc));
-#endif
+ int array_size, db_arch_abs, n, ret;
+ u_int32_t fnum;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_archive", DB_INIT_LOG);
name = NULL;
dblp = dbenv->lg_handle;
@@ -70,15 +61,24 @@ log_archive(dbenv, listp, flags, db_malloc)
#define OKFLAGS (DB_ARCH_ABS | DB_ARCH_DATA | DB_ARCH_LOG)
if (flags != 0) {
- if ((ret =
- __db_fchk(dbenv, "log_archive", flags, OKFLAGS)) != 0)
+ if ((ret = __db_fchk(
+ dbenv, "DB_ENV->log_archive", flags, OKFLAGS)) != 0)
return (ret);
- if ((ret =
- __db_fcchk(dbenv,
- "log_archive", flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0)
+ if ((ret = __db_fcchk(dbenv, "DB_ENV->log_archive",
+ flags, DB_ARCH_DATA, DB_ARCH_LOG)) != 0)
return (ret);
}
+ if (LF_ISSET(DB_ARCH_ABS)) {
+ db_arch_abs = 1;
+ LF_CLR(DB_ARCH_ABS);
+ } else
+ db_arch_abs = 0;
+
+ if (flags == 0 || flags == DB_ARCH_DATA)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "DB_ENV->log_archive", DB_INIT_TXN);
+
/*
* Get the absolute pathname of the current directory. It would
* be nice to get the shortest pathname of the database directory,
@@ -88,7 +88,7 @@ log_archive(dbenv, listp, flags, db_malloc)
* Can't trust getcwd(3) to set a valid errno. If it doesn't, just
* guess that we ran out of memory.
*/
- if (LF_ISSET(DB_ARCH_ABS)) {
+ if (db_arch_abs) {
__os_set_errno(0);
if ((pref = getcwd(buf, sizeof(buf))) == NULL) {
if (__os_get_errno() == 0)
@@ -98,31 +98,55 @@ log_archive(dbenv, listp, flags, db_malloc)
} else
pref = NULL;
- switch (LF_ISSET(~DB_ARCH_ABS)) {
+ switch (flags) {
case DB_ARCH_DATA:
- return (__build_data(dbenv, pref, listp, db_malloc));
+ return (__build_data(dbenv, pref, listp));
case DB_ARCH_LOG:
memset(&rec, 0, sizeof(rec));
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- F_SET(&rec, DB_DBT_MALLOC);
- if ((ret = log_get(dbenv, &stable_lsn, &rec, DB_LAST)) != 0)
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+#ifdef UMRW
+ ZERO_LSN(stable_lsn);
+#endif
+ ret = logc->get(logc, &stable_lsn, &rec, DB_LAST);
+ (void)logc->close(logc, 0);
+ if (ret != 0)
return (ret);
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- __os_free(rec.data, rec.size);
fnum = stable_lsn.file;
break;
case 0:
- if ((ret = __log_findckp(dbenv, &stable_lsn)) != 0) {
+ memset(&rec, 0, sizeof(rec));
+ if (__txn_getckp(dbenv, &stable_lsn) != 0) {
/*
- * A return of DB_NOTFOUND means that we didn't find
- * any records in the log (so we are not going to be
- * deleting any log files).
+ * A failure return means that there's no checkpoint
+ * in the log (so we are not going to be deleting
+ * any log files).
*/
- if (ret != DB_NOTFOUND)
- return (ret);
*listp = NULL;
return (0);
}
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ if ((ret = logc->get(logc, &stable_lsn, &rec, DB_SET)) != 0 ||
+ (ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ /*
+ * A return of DB_NOTFOUND may only mean that the
+ * checkpoint LSN is before the beginning of the
+ * log files that we still have. This is not
+ * an error; it just means our work is done.
+ */
+ if (ret == DB_NOTFOUND) {
+ *listp = NULL;
+ ret = 0;
+ }
+ (void)logc->close(logc, 0);
+ return (ret);
+ }
+ if ((ret = logc->close(logc, 0)) != 0)
+ return (ret);
+ stable_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
/* Remove any log files before the last stable LSN. */
fnum = stable_lsn.file - 1;
break;
@@ -130,9 +154,9 @@ log_archive(dbenv, listp, flags, db_malloc)
#define LIST_INCREMENT 64
/* Get some initial space. */
- array_size = 10;
+ array_size = 64;
if ((ret = __os_malloc(dbenv,
- sizeof(char *) * array_size, NULL, &array)) != 0)
+ sizeof(char *) * array_size, &array)) != 0)
return (ret);
array[0] = NULL;
@@ -143,27 +167,27 @@ log_archive(dbenv, listp, flags, db_malloc)
if (__os_exists(name, NULL) != 0) {
if (LF_ISSET(DB_ARCH_LOG) && fnum == stable_lsn.file)
continue;
- __os_freestr(name);
+ __os_free(dbenv, name);
name = NULL;
break;
}
- if (n >= array_size - 1) {
+ if (n >= array_size - 2) {
array_size += LIST_INCREMENT;
if ((ret = __os_realloc(dbenv,
- sizeof(char *) * array_size, NULL, &array)) != 0)
+ sizeof(char *) * array_size, &array)) != 0)
goto err;
}
- if (LF_ISSET(DB_ARCH_ABS)) {
+ if (db_arch_abs) {
if ((ret = __absname(dbenv,
pref, name, &array[n])) != 0)
goto err;
- __os_freestr(name);
+ __os_free(dbenv, name);
} else if ((p = __db_rpath(name)) != NULL) {
if ((ret = __os_strdup(dbenv, p + 1, &array[n])) != 0)
goto err;
- __os_freestr(name);
+ __os_free(dbenv, name);
} else
array[n] = name;
@@ -182,7 +206,7 @@ log_archive(dbenv, listp, flags, db_malloc)
qsort(array, (size_t)n, sizeof(char *), __cmpfunc);
/* Rework the memory. */
- if ((ret = __usermem(dbenv, &array, db_malloc)) != 0)
+ if ((ret = __usermem(dbenv, &array)) != 0)
goto err;
*listp = array;
@@ -190,11 +214,11 @@ log_archive(dbenv, listp, flags, db_malloc)
err: if (array != NULL) {
for (arrayp = array; *arrayp != NULL; ++arrayp)
- __os_freestr(*arrayp);
- __os_free(array, sizeof(char *) * array_size);
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
}
if (name != NULL)
- __os_freestr(name);
+ __os_free(dbenv, name);
return (ret);
}
@@ -203,73 +227,89 @@ err: if (array != NULL) {
* Build a list of datafiles for return.
*/
static int
-__build_data(dbenv, pref, listp, db_malloc)
+__build_data(dbenv, pref, listp)
DB_ENV *dbenv;
char *pref, ***listp;
- void *(*db_malloc) __P((size_t));
{
DBT rec;
+ DB_LOGC *logc;
DB_LSN lsn;
- __log_register_args *argp;
+ __dbreg_register_args *argp;
u_int32_t rectype;
- int array_size, last, n, nxt, ret;
- char **array, **arrayp, *p, *real_name;
+ int array_size, last, n, nxt, ret, t_ret;
+ char **array, **arrayp, **list, **lp, *p, *real_name;
/* Get some initial space. */
- array_size = 10;
+ array_size = 64;
if ((ret = __os_malloc(dbenv,
- sizeof(char *) * array_size, NULL, &array)) != 0)
+ sizeof(char *) * array_size, &array)) != 0)
return (ret);
array[0] = NULL;
memset(&rec, 0, sizeof(rec));
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- F_SET(&rec, DB_DBT_MALLOC);
- for (n = 0, ret = log_get(dbenv, &lsn, &rec, DB_FIRST);
- ret == 0; ret = log_get(dbenv, &lsn, &rec, DB_NEXT)) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ for (n = 0; (ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0;) {
if (rec.size < sizeof(rectype)) {
ret = EINVAL;
- __db_err(dbenv, "log_archive: bad log record");
- goto lg_free;
+ __db_err(dbenv, "DB_ENV->log_archive: bad log record");
+ goto free_continue;
}
memcpy(&rectype, rec.data, sizeof(rectype));
- if (rectype != DB_log_register) {
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- __os_free(rec.data, rec.size);
- rec.data = NULL;
- }
+ if (rectype != DB___dbreg_register)
continue;
- }
- if ((ret = __log_register_read(dbenv, rec.data, &argp)) != 0) {
+ if ((ret =
+ __dbreg_register_read(dbenv, rec.data, &argp)) != 0) {
ret = EINVAL;
__db_err(dbenv,
- "log_archive: unable to read log record");
- goto lg_free;
+ "DB_ENV->log_archive: unable to read log record");
+ goto free_continue;
}
- if (n >= array_size - 1) {
+ if (n >= array_size - 2) {
array_size += LIST_INCREMENT;
if ((ret = __os_realloc(dbenv,
- sizeof(char *) * array_size, NULL, &array)) != 0)
- goto lg_free;
+ sizeof(char *) * array_size, &array)) != 0)
+ goto free_continue;
}
if ((ret = __os_strdup(dbenv,
- argp->name.data, &array[n])) != 0) {
-lg_free: if (F_ISSET(&rec, DB_DBT_MALLOC) && rec.data != NULL)
- __os_free(rec.data, rec.size);
- goto err1;
- }
-
- array[++n] = NULL;
- __os_free(argp, 0);
-
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- __os_free(rec.data, rec.size);
- rec.data = NULL;
+ argp->name.data, &array[n++])) != 0)
+ goto free_continue;
+ array[n] = NULL;
+
+ if (argp->ftype == DB_QUEUE) {
+ if ((ret = __qam_extent_names(dbenv,
+ argp->name.data, &list)) != 0)
+ goto q_err;
+ for (lp = list;
+ lp != NULL && *lp != NULL; lp++) {
+ if (n >= array_size - 2) {
+ array_size += LIST_INCREMENT;
+ if ((ret = __os_realloc(dbenv,
+ sizeof(char *) *
+ array_size, &array)) != 0)
+ goto q_err;
+ }
+ if ((ret =
+ __os_strdup(dbenv, *lp, &array[n++])) != 0)
+ goto q_err;
+ array[n] = NULL;
+ }
+q_err: if (list != NULL)
+ __os_free(dbenv, list);
}
+free_continue: __os_free(dbenv, argp);
+ if (ret != 0)
+ break;
}
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err1;
/* If there's nothing to return, we're done. */
if (n == 0) {
@@ -297,34 +337,34 @@ lg_free: if (F_ISSET(&rec, DB_DBT_MALLOC) && rec.data != NULL)
}
for (++nxt; nxt < n &&
strcmp(array[last], array[nxt]) == 0; ++nxt) {
- __os_freestr(array[nxt]);
+ __os_free(dbenv, array[nxt]);
array[nxt] = NULL;
}
/* Get the real name. */
if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, array[last], 0, NULL, &real_name)) != 0)
+ DB_APP_DATA, array[last], 0, NULL, &real_name)) != 0)
goto err2;
/* If the file doesn't exist, ignore it. */
if (__os_exists(real_name, NULL) != 0) {
- __os_freestr(real_name);
- __os_freestr(array[last]);
+ __os_free(dbenv, real_name);
+ __os_free(dbenv, array[last]);
array[last] = NULL;
continue;
}
/* Rework the name as requested by the user. */
- __os_freestr(array[last]);
+ __os_free(dbenv, array[last]);
array[last] = NULL;
if (pref != NULL) {
ret = __absname(dbenv, pref, real_name, &array[last]);
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
if (ret != 0)
goto err2;
} else if ((p = __db_rpath(real_name)) != NULL) {
ret = __os_strdup(dbenv, p + 1, &array[last]);
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
if (ret != 0)
goto err2;
} else
@@ -336,7 +376,7 @@ lg_free: if (F_ISSET(&rec, DB_DBT_MALLOC) && rec.data != NULL)
array[last] = NULL;
/* Rework the memory. */
- if ((ret = __usermem(dbenv, &array, db_malloc)) != 0)
+ if ((ret = __usermem(dbenv, &array)) != 0)
goto err1;
*listp = array;
@@ -349,13 +389,13 @@ err2: /*
*/
if (array != NULL)
for (; nxt < n; ++nxt)
- __os_freestr(array[nxt]);
+ __os_free(dbenv, array[nxt]);
/* FALLTHROUGH */
err1: if (array != NULL) {
for (arrayp = array; *arrayp != NULL; ++arrayp)
- __os_freestr(*arrayp);
- __os_free(array, array_size * sizeof(char *));
+ __os_free(dbenv, *arrayp);
+ __os_free(dbenv, array);
}
return (ret);
}
@@ -379,7 +419,7 @@ __absname(dbenv, pref, name, newnamep)
/* Malloc space for concatenating the two. */
if ((ret = __os_malloc(dbenv,
- l_pref + l_name + 2, NULL, &newname)) != 0)
+ l_pref + l_name + 2, &newname)) != 0)
return (ret);
*newnamep = newname;
@@ -400,10 +440,9 @@ __absname(dbenv, pref, name, newnamep)
* If the user has their own malloc routine, use it.
*/
static int
-__usermem(dbenv, listp, db_malloc)
+__usermem(dbenv, listp)
DB_ENV *dbenv;
char ***listp;
- void *(*db_malloc) __P((size_t));
{
size_t len;
int ret;
@@ -415,7 +454,7 @@ __usermem(dbenv, listp, db_malloc)
len += sizeof(char *);
/* Allocate it and set up the pointers. */
- if ((ret = __os_malloc(dbenv, len, db_malloc, &array)) != 0)
+ if ((ret = __os_umalloc(dbenv, len, &array)) != 0)
return (ret);
strp = (char *)(array + (orig - *listp) + 1);
@@ -427,13 +466,13 @@ __usermem(dbenv, listp, db_malloc)
*arrayp = strp;
strp += len + 1;
- __os_freestr(*orig);
+ __os_free(dbenv, *orig);
}
/* NULL-terminate the list. */
*arrayp = NULL;
- __os_free(*listp, 0);
+ __os_free(dbenv, *listp);
*listp = array;
return (0);
diff --git a/bdb/log/log_compare.c b/bdb/log/log_compare.c
index 9bc3c028a5f..115f9c21b76 100644
--- a/bdb/log/log_compare.c
+++ b/bdb/log/log_compare.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log_compare.c,v 11.3 2000/02/14 02:59:59 bostic Exp $";
+static const char revid[] = "$Id: log_compare.c,v 11.6 2002/01/11 15:52:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,6 +19,8 @@ static const char revid[] = "$Id: log_compare.c,v 11.3 2000/02/14 02:59:59 bosti
/*
* log_compare --
* Compare two LSN's; return 1, 0, -1 if first is >, == or < second.
+ *
+ * EXTERN: int log_compare __P((const DB_LSN *, const DB_LSN *));
*/
int
log_compare(lsn0, lsn1)
diff --git a/bdb/log/log_findckp.c b/bdb/log/log_findckp.c
deleted file mode 100644
index b1e8fddbdb7..00000000000
--- a/bdb/log/log_findckp.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: log_findckp.c,v 11.5 2000/11/30 00:58:40 ubell Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
-#include "db_int.h"
-#include "log.h"
-#include "txn.h"
-
-/*
- * __log_findckp --
- *
- * Looks for the most recent checkpoint that occurs before the most recent
- * checkpoint LSN, subject to the constraint that there must be at least two
- * checkpoints. The reason you need two checkpoints is that you might have
- * crashed during the most recent one and may not have a copy of all the
- * open files. This is the point from which recovery can start and the
- * point up to which archival/truncation can take place. Checkpoints in
- * the log look like:
- *
- * -------------------------------------------------------------------
- * | ckp A, ckplsn 100 | .... record .... | ckp B, ckplsn 600 | ...
- * -------------------------------------------------------------------
- * LSN 500 LSN 1000
- *
- * If we read what log returns from using the DB_CKP parameter to logput,
- * we'll get the record at LSN 1000. The checkpoint LSN there is 600.
- * Now we have to scan backwards looking for a checkpoint before LSN 600.
- * We find one at 500. This means that we can truncate the log before
- * 500 or run recovery beginning at 500.
- *
- * Returns 0 if we find a suitable checkpoint or we retrieved the first
- * record in the log from which to start. Returns DB_NOTFOUND if there
- * are no log records, errno on error.
- *
- * PUBLIC: int __log_findckp __P((DB_ENV *, DB_LSN *));
- */
-int
-__log_findckp(dbenv, lsnp)
- DB_ENV *dbenv;
- DB_LSN *lsnp;
-{
- DBT data;
- DB_LSN ckp_lsn, final_ckp, last_ckp, next_lsn;
- __txn_ckp_args *ckp_args;
- int ret;
-
- /*
- * Need to find the appropriate point from which to begin
- * recovery.
- */
- memset(&data, 0, sizeof(data));
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- F_SET(&data, DB_DBT_MALLOC);
- ZERO_LSN(ckp_lsn);
- if ((ret = log_get(dbenv, &last_ckp, &data, DB_CHECKPOINT)) != 0) {
- if (ret == ENOENT)
- goto get_first;
- else
- return (ret);
- }
- final_ckp = last_ckp;
-
- next_lsn = last_ckp;
- do {
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- __os_free(data.data, data.size);
-
- if ((ret = log_get(dbenv, &next_lsn, &data, DB_SET)) != 0)
- return (ret);
- if ((ret = __txn_ckp_read(dbenv, data.data, &ckp_args)) != 0) {
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- __os_free(data.data, data.size);
- return (ret);
- }
- if (IS_ZERO_LSN(ckp_lsn))
- ckp_lsn = ckp_args->ckp_lsn;
- if (FLD_ISSET(dbenv->verbose, DB_VERB_CHKPOINT)) {
- __db_err(dbenv, "Checkpoint at: [%lu][%lu]",
- (u_long)last_ckp.file, (u_long)last_ckp.offset);
- __db_err(dbenv, "Checkpoint LSN: [%lu][%lu]",
- (u_long)ckp_args->ckp_lsn.file,
- (u_long)ckp_args->ckp_lsn.offset);
- __db_err(dbenv, "Previous checkpoint: [%lu][%lu]",
- (u_long)ckp_args->last_ckp.file,
- (u_long)ckp_args->last_ckp.offset);
- }
- last_ckp = next_lsn;
- next_lsn = ckp_args->last_ckp;
- __os_free(ckp_args, sizeof(*ckp_args));
-
- /*
- * Keep looping until either you 1) run out of checkpoints,
- * 2) you've found a checkpoint before the most recent
- * checkpoint's LSN and you have at least 2 checkpoints.
- */
- } while (!IS_ZERO_LSN(next_lsn) &&
- (log_compare(&last_ckp, &ckp_lsn) > 0 ||
- log_compare(&final_ckp, &last_ckp) == 0));
-
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- __os_free(data.data, data.size);
-
- /*
- * At this point, either, next_lsn is ZERO or ckp_lsn is the
- * checkpoint lsn and last_ckp is the LSN of the last checkpoint
- * before ckp_lsn. If the compare in the loop is still true, then
- * next_lsn must be 0 and we need to roll forward from the
- * beginning of the log.
- */
- if (log_compare(&last_ckp, &ckp_lsn) >= 0 ||
- log_compare(&final_ckp, &last_ckp) == 0) {
-get_first: if ((ret = log_get(dbenv, &last_ckp, &data, DB_FIRST)) != 0)
- return (ret);
- if (F_ISSET(dbenv, DB_ENV_THREAD))
- __os_free(data.data, data.size);
- }
- *lsnp = last_ckp;
-
- return (IS_ZERO_LSN(last_ckp) ? DB_NOTFOUND : 0);
-}
diff --git a/bdb/log/log_get.c b/bdb/log/log_get.c
index b75d50a62fd..c8b028da0fb 100644
--- a/bdb/log/log_get.c
+++ b/bdb/log/log_get.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log_get.c,v 11.32 2001/01/11 18:19:53 bostic Exp $";
+static const char revid[] = "$Id: log_get.c,v 11.81 2002/08/14 20:09:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,81 +17,175 @@ static const char revid[] = "$Id: log_get.c,v 11.32 2001/01/11 18:19:53 bostic E
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "log.h"
-#include "hash.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/hash.h"
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+typedef enum { L_ALREADY, L_ACQUIRED, L_NONE } RLOCK;
+
+static int __log_c_close __P((DB_LOGC *, u_int32_t));
+static int __log_c_get __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_get_int __P((DB_LOGC *, DB_LSN *, DBT *, u_int32_t));
+static int __log_c_hdrchk __P((DB_LOGC *, HDR *, int *));
+static int __log_c_incursor __P((DB_LOGC *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_inregion __P((DB_LOGC *,
+ DB_LSN *, RLOCK *, DB_LSN *, HDR *, u_int8_t **));
+static int __log_c_io __P((DB_LOGC *,
+ u_int32_t, u_int32_t, void *, size_t *, int *));
+static int __log_c_ondisk __P((DB_LOGC *,
+ DB_LSN *, DB_LSN *, int, HDR *, u_int8_t **, int *));
+static int __log_c_set_maxrec __P((DB_LOGC *, char *));
+static int __log_c_shortread __P((DB_LOGC *, int));
/*
- * log_get --
- * Get a log record.
+ * __log_cursor --
+ * Create a log cursor.
+ *
+ * PUBLIC: int __log_cursor __P((DB_ENV *, DB_LOGC **, u_int32_t));
*/
int
-log_get(dbenv, alsn, dbt, flags)
+__log_cursor(dbenv, logcp, flags)
+ DB_ENV *dbenv;
+ DB_LOGC **logcp;
+ u_int32_t flags;
+{
+ DB_LOGC *logc;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_cursor", DB_INIT_LOG);
+
+ *logcp = NULL;
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "DB_ENV->log_cursor", flags, 0)) != 0)
+ return (ret);
+
+ /* Allocate memory for the cursor. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_LOGC), &logc)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &logc->c_fh)) != 0)
+ goto err;
+
+ logc->bp_size = DB_LOGC_BUF_SIZE;
+ if ((ret = __os_malloc(dbenv, logc->bp_size, &logc->bp)) != 0)
+ goto err;
+
+ logc->dbenv = dbenv;
+ logc->close = __log_c_close;
+ logc->get = __log_c_get;
+
+ *logcp = logc;
+ return (0);
+
+err: if (logc != NULL) {
+ if (logc->c_fh != NULL)
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+ }
+
+ return (ret);
+}
+
+/*
+ * __log_c_close --
+ * Close a log cursor.
+ */
+static int
+__log_c_close(logc, flags)
+ DB_LOGC *logc;
+ u_int32_t flags;
+{
DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ PANIC_CHECK(dbenv);
+ if ((ret = __db_fchk(dbenv, "DB_LOGC->close", flags, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(logc->c_fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, logc->c_fh);
+
+ if (logc->c_dbt.data != NULL)
+ __os_free(dbenv, logc->c_dbt.data);
+
+ __os_free(dbenv, logc->bp);
+ __os_free(dbenv, logc->c_fh);
+ __os_free(dbenv, logc);
+
+ return (0);
+}
+
+/*
+ * __log_c_get --
+ * Get a log record.
+ */
+static int
+__log_c_get(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
DB_LSN *alsn;
DBT *dbt;
u_int32_t flags;
{
- DB_LOG *dblp;
+ DB_ENV *dbenv;
DB_LSN saved_lsn;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_get(dbenv, alsn, dbt, flags));
-#endif
+ dbenv = logc->dbenv;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
/* Validate arguments. */
- if (flags != DB_CHECKPOINT && flags != DB_CURRENT &&
- flags != DB_FIRST && flags != DB_LAST &&
- flags != DB_NEXT && flags != DB_PREV && flags != DB_SET)
- return (__db_ferr(dbenv, "log_get", 1));
-
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if (flags == DB_NEXT || flags == DB_PREV || flags == DB_CURRENT)
- return (__db_ferr(dbenv, "log_get", 1));
- if (!F_ISSET(dbt,
- DB_DBT_MALLOC | DB_DBT_REALLOC | DB_DBT_USERMEM))
- return (__db_ferr(dbenv, "threaded data", 1));
+ switch (flags) {
+ case DB_CURRENT:
+ case DB_FIRST:
+ case DB_LAST:
+ case DB_NEXT:
+ case DB_PREV:
+ break;
+ case DB_SET:
+ if (IS_ZERO_LSN(*alsn)) {
+ __db_err(dbenv, "DB_LOGC->get: invalid LSN");
+ return (EINVAL);
+ }
+ break;
+ default:
+ return (__db_ferr(dbenv, "DB_LOGC->get", 1));
}
- dblp = dbenv->lg_handle;
- R_LOCK(dbenv, &dblp->reginfo);
-
/*
- * The alsn field is only initialized if DB_SET is the flag, so this
- * assignment causes uninitialized memory complaints for other flag
- * values.
+ * On error, we take care not to overwrite the caller's LSN. This
+ * is because callers looking for the end of the log loop using the
+ * DB_NEXT flag, and expect to take the last successful lsn out of
+ * the passed-in structure after DB_LOGC->get fails with DB_NOTFOUND.
+ *
+ * !!!
+ * This line is often flagged an uninitialized memory read during a
+ * Purify or similar tool run, as the application didn't initialize
+ * *alsn. If the application isn't setting the DB_SET flag, there is
+ * no reason it should have initialized *alsn, but we can't know that
+ * and we want to make sure we never overwrite whatever the application
+ * put in there.
*/
-#ifdef UMRW
- if (flags == DB_SET)
- saved_lsn = *alsn;
- else
- ZERO_LSN(saved_lsn);
-#else
saved_lsn = *alsn;
-#endif
/*
- * If we get one of the log's header records, repeat the operation.
- * This assumes that applications don't ever request the log header
- * records by LSN, but that seems reasonable to me.
+ * If we get one of the log's header records as a result of doing a
+ * DB_FIRST, DB_NEXT, DB_LAST or DB_PREV, repeat the operation, log
+ * file header records aren't useful to applications.
*/
- if ((ret = __log_get(dblp,
- alsn, dbt, flags, 0)) == 0 && alsn->offset == 0) {
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
+ if (alsn->offset == 0 && (flags == DB_FIRST ||
+ flags == DB_NEXT || flags == DB_LAST || flags == DB_PREV)) {
switch (flags) {
case DB_FIRST:
flags = DB_NEXT;
@@ -101,92 +195,100 @@ log_get(dbenv, alsn, dbt, flags)
break;
}
if (F_ISSET(dbt, DB_DBT_MALLOC)) {
- __os_free(dbt->data, dbt->size);
+ __os_free(dbenv, dbt->data);
dbt->data = NULL;
}
- ret = __log_get(dblp, alsn, dbt, flags, 0);
+ if ((ret = __log_c_get_int(logc, alsn, dbt, flags)) != 0) {
+ *alsn = saved_lsn;
+ return (ret);
+ }
}
- if (ret != 0)
- *alsn = saved_lsn;
- R_UNLOCK(dbenv, &dblp->reginfo);
-
- return (ret);
+ return (0);
}
/*
- * __log_get --
+ * __log_c_get_int --
* Get a log record; internal version.
- *
- * PUBLIC: int __log_get __P((DB_LOG *, DB_LSN *, DBT *, u_int32_t, int));
*/
-int
-__log_get(dblp, alsn, dbt, flags, silent)
- DB_LOG *dblp;
+static int
+__log_c_get_int(logc, alsn, dbt, flags)
+ DB_LOGC *logc;
DB_LSN *alsn;
DBT *dbt;
u_int32_t flags;
- int silent;
{
+ DB_CIPHER *db_cipher;
DB_ENV *dbenv;
- DB_LSN nlsn;
+ DB_LOG *dblp;
+ DB_LSN last_lsn, nlsn;
HDR hdr;
LOG *lp;
- const char *fail;
- char *np, *tbuf;
- int cnt, ret;
+ RLOCK rlock;
logfile_validity status;
- size_t len, nr;
- u_int32_t offset;
- u_int8_t *p;
- void *shortp, *readp;
+ u_int32_t cnt;
+ u_int8_t *rp;
+ int eof, is_hmac, ret;
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
lp = dblp->reginfo.primary;
- fail = np = tbuf = NULL;
- dbenv = dblp->dbenv;
+ is_hmac = 0;
- nlsn = dblp->c_lsn;
+ /*
+ * We don't acquire the log region lock until we need it, and we
+ * release it as soon as we're done.
+ */
+ rlock = F_ISSET(logc, DB_LOG_LOCKED) ? L_ALREADY : L_NONE;
+
+ nlsn = logc->c_lsn;
switch (flags) {
- case DB_CHECKPOINT:
- nlsn = lp->chkpt_lsn;
- if (IS_ZERO_LSN(nlsn)) {
- /* No db_err. The caller may expect this. */
- ret = ENOENT;
- goto err2;
- }
- break;
case DB_NEXT: /* Next log record. */
if (!IS_ZERO_LSN(nlsn)) {
/* Increment the cursor by the cursor record size. */
- nlsn.offset += dblp->c_len;
+ nlsn.offset += logc->c_len;
break;
}
+ flags = DB_FIRST;
/* FALLTHROUGH */
- case DB_FIRST: /* Find the first log record. */
+ case DB_FIRST: /* First log record. */
/* Find the first log file. */
if ((ret = __log_find(dblp, 1, &cnt, &status)) != 0)
- goto err2;
+ goto err;
/*
- * We want any readable version, so either DB_LV_NORMAL
- * or DB_LV_OLD_READABLE is acceptable here. If it's
- * not one of those two, there is no first log record that
- * we can read.
+ * DB_LV_INCOMPLETE:
+ * Theoretically, the log file we want could be created
+ * but not yet written, the "first" log record must be
+ * in the log buffer.
+ * DB_LV_NORMAL:
+ * DB_LV_OLD_READABLE:
+ * We found a log file we can read.
+ * DB_LV_NONEXISTENT:
+ * No log files exist, the "first" log record must be in
+ * the log buffer.
+ * DB_LV_OLD_UNREADABLE:
+ * No readable log files exist, we're at the cross-over
+ * point between two versions. The "first" log record
+ * must be in the log buffer.
*/
- if (status != DB_LV_NORMAL && status != DB_LV_OLD_READABLE) {
- ret = DB_NOTFOUND;
- goto err2;
+ switch (status) {
+ case DB_LV_INCOMPLETE:
+ DB_ASSERT(lp->lsn.file == cnt);
+ /* FALLTHROUGH */
+ case DB_LV_NORMAL:
+ case DB_LV_OLD_READABLE:
+ nlsn.file = cnt;
+ break;
+ case DB_LV_NONEXISTENT:
+ nlsn.file = 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
+ case DB_LV_OLD_UNREADABLE:
+ nlsn.file = cnt + 1;
+ DB_ASSERT(lp->lsn.file == nlsn.file);
+ break;
}
-
- /*
- * We may have only entered records in the buffer, and not
- * yet written a log file. If no log files were found and
- * there's anything in the buffer, it belongs to file 1.
- */
- if (cnt == 0)
- cnt = 1;
-
- nlsn.file = cnt;
nlsn.offset = 0;
break;
case DB_CURRENT: /* Current log record. */
@@ -197,21 +299,28 @@ __log_get(dblp, alsn, dbt, flags, silent)
if (nlsn.offset == 0) {
if (nlsn.file == 1 ||
__log_valid(dblp,
- nlsn.file - 1, 0, &status) != 0)
- return (DB_NOTFOUND);
+ nlsn.file - 1, 0, &status) != 0) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
if (status != DB_LV_NORMAL &&
- status != DB_LV_OLD_READABLE)
- return (DB_NOTFOUND);
+ status != DB_LV_OLD_READABLE) {
+ ret = DB_NOTFOUND;
+ goto err;
+ }
--nlsn.file;
- nlsn.offset = dblp->c_off;
- } else
- nlsn.offset = dblp->c_off;
+ }
+ nlsn.offset = logc->c_prev;
break;
}
/* FALLTHROUGH */
case DB_LAST: /* Last log record. */
+ if (rlock == L_NONE) {
+ rlock = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
+ }
nlsn.file = lp->lsn.file;
nlsn.offset = lp->lsn.offset - lp->len;
break;
@@ -225,241 +334,725 @@ next_file: ++nlsn.file;
nlsn.offset = 0;
}
- /* Return 1 if the request is past the end of the log. */
- if (nlsn.file > lp->lsn.file ||
- (nlsn.file == lp->lsn.file && nlsn.offset >= lp->lsn.offset))
- return (DB_NOTFOUND);
+ /*
+ * The above switch statement should have set nlsn to the lsn of
+ * the requested record.
+ */
- /* If we've switched files, discard the current file handle. */
- if (dblp->c_lsn.file != nlsn.file &&
- F_ISSET(&dblp->c_fh, DB_FH_VALID)) {
- (void)__os_closehandle(&dblp->c_fh);
+ if (CRYPTO_ON(dbenv)) {
+ hdr.size = HDR_CRYPTO_SZ;
+ is_hmac = 1;
+ } else {
+ hdr.size = HDR_NORMAL_SZ;
+ is_hmac = 0;
}
-
- /* If the entire record is in the in-memory buffer, copy it out. */
- if (nlsn.file == lp->lsn.file && nlsn.offset >= lp->w_off) {
- /* Copy the header. */
- p = dblp->bufp + (nlsn.offset - lp->w_off);
- memcpy(&hdr, p, sizeof(HDR));
-
- /* Copy the record. */
- len = hdr.len - sizeof(HDR);
- if ((ret = __db_retcopy(NULL, dbt, p + sizeof(HDR),
- len, &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
- goto err2;
+ /* Check to see if the record is in the cursor's buffer. */
+ if ((ret = __log_c_incursor(logc, &nlsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
goto cksum;
- }
- shortp = NULL;
+ /*
+ * Look to see if we're moving backward in the log with the last record
+ * coming from the disk -- it means the record can't be in the region's
+ * buffer. Else, check the region's buffer.
+ *
+ * If the record isn't in the region's buffer, we're going to have to
+ * read the record from disk. We want to make a point of not reading
+ * past the end of the logical log (after recovery, there may be data
+ * after the end of the logical log, not to mention the log file may
+ * have been pre-allocated). So, zero out last_lsn, and initialize it
+ * inside __log_c_inregion -- if it's still zero when we check it in
+ * __log_c_ondisk, that's OK, it just means the logical end of the log
+ * isn't an issue for this request.
+ */
+ ZERO_LSN(last_lsn);
+ if (!F_ISSET(logc, DB_LOG_DISK) ||
+ log_compare(&nlsn, &logc->c_lsn) > 0) {
+ F_CLR(logc, DB_LOG_DISK);
- /* Acquire a file descriptor. */
- if (!F_ISSET(&dblp->c_fh, DB_FH_VALID)) {
- if ((ret = __log_name(dblp, nlsn.file,
- &np, &dblp->c_fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
- fail = np;
- goto err1;
- }
- __os_freestr(np);
- np = NULL;
+ if ((ret = __log_c_inregion(logc,
+ &nlsn, &rlock, &last_lsn, &hdr, &rp)) != 0)
+ goto err;
+ if (rp != NULL)
+ goto cksum;
}
- /* See if we've already read this */
- if (nlsn.file == dblp->r_file && nlsn.offset > dblp->r_off
- && nlsn.offset + sizeof(HDR) < dblp->r_off + dblp->r_size)
- goto got_header;
-
/*
- * Seek to the header offset and read the header. Because the file
- * may be pre-allocated, we have to make sure that we're not reading
- * past the information in the start of the in-memory buffer.
+ * We have to read from an on-disk file to retrieve the record.
+ * If we ever can't retrieve the record at offset 0, we're done,
+ * return EOF/DB_NOTFOUND.
+ *
+ * Discard the region lock if we're still holding it, the on-disk
+ * reading routines don't need it.
*/
-
- readp = &hdr;
- offset = nlsn.offset;
- if (nlsn.file == lp->lsn.file && offset + sizeof(HDR) > lp->w_off)
- nr = lp->w_off - offset;
- else if (dblp->readbufp == NULL)
- nr = sizeof(HDR);
- else {
- nr = lp->buffer_size;
- readp = dblp->readbufp;
- dblp->r_file = nlsn.file;
- /* Going backwards. Put the current in the middle. */
- if (flags == DB_PREV || flags == DB_LAST) {
- if (offset <= lp->buffer_size/2)
- offset = 0;
- else
- offset = offset - lp->buffer_size/2;
- }
- if (nlsn.file == lp->lsn.file && offset + nr > lp->lsn.offset)
- nr = lp->lsn.offset - offset;
- dblp->r_off = offset;
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ if ((ret = __log_c_ondisk(
+ logc, &nlsn, &last_lsn, flags, &hdr, &rp, &eof)) != 0)
+ goto err;
+ if (eof == 1) {
+ /*
+ * Only DB_NEXT automatically moves to the next file, and
+ * it only happens once.
+ */
+ if (flags != DB_NEXT || nlsn.offset == 0)
+ return (DB_NOTFOUND);
+ goto next_file;
}
+ F_SET(logc, DB_LOG_DISK);
- if ((ret = __os_seek(dblp->dbenv,
- &dblp->c_fh, 0, 0, offset, 0, DB_OS_SEEK_SET)) != 0) {
- fail = "seek";
- goto err1;
+cksum: /*
+ * Discard the region lock if we're still holding it. (The path to
+ * get here is that we acquired the lock because of the caller's
+ * flag argument, but we found the record in the cursor's buffer.
+ * Improbable, but it's easy to avoid.
+ */
+ if (rlock == L_ACQUIRED) {
+ rlock = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
}
- if ((ret = __os_read(dblp->dbenv, &dblp->c_fh, readp, nr, &nr)) != 0) {
- fail = "read";
- goto err1;
+
+ /*
+ * Checksum: there are two types of errors -- a configuration error
+ * or a checksum mismatch. The former is always bad. The latter is
+ * OK if we're searching for the end of the log, and very, very bad
+ * if we're reading random log records.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if ((ret = __db_check_chksum(dbenv, db_cipher,
+ hdr.chksum, rp + hdr.size, hdr.len - hdr.size, is_hmac)) != 0) {
+ if (F_ISSET(logc, DB_LOG_SILENT_ERR)) {
+ if (ret == 0 || ret == -1)
+ ret = EIO;
+ } else if (ret == -1) {
+ __db_err(dbenv,
+ "DB_LOGC->get: log record checksum mismatch");
+ __db_err(dbenv,
+ "DB_LOGC->get: catastrophic recovery may be required");
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
+ goto err;
}
- if (nr < sizeof(HDR)) {
- /* If read returns EOF, try the next file. */
- if (nr == 0) {
- if (flags != DB_NEXT || nlsn.file == lp->lsn.file)
- goto corrupt;
+
+ /*
+ * If we got a 0-length record, that means we're in the midst of
+ * some bytes that got 0'd as the result of a vtruncate. We're
+ * going to have to retry.
+ */
+ if (hdr.len == 0) {
+ switch (flags) {
+ case DB_FIRST:
+ case DB_NEXT:
+ /* Zero'd records always indicate the end of a file. */
goto next_file;
+
+ case DB_LAST:
+ case DB_PREV:
+ /*
+ * We should never get here. If we recover a log
+ * file with 0's at the end, we'll treat the 0'd
+ * headers as the end of log and ignore them. If
+ * we're reading backwards from another file, then
+ * the first record in that new file should have its
+ * prev field set correctly.
+ */
+ __db_err(dbenv,
+ "Encountered zero length records while traversing backwards");
+ DB_ASSERT(0);
+ case DB_SET:
+ default:
+ /* Return the 0-length record. */
+ break;
}
+ }
- if (dblp->readbufp != NULL)
- memcpy((u_int8_t *) &hdr, readp, nr);
+ /* Copy the record into the user's DBT. */
+ if ((ret = __db_retcopy(dbenv, dbt, rp + hdr.size,
+ (u_int32_t)(hdr.len - hdr.size),
+ &logc->c_dbt.data, &logc->c_dbt.ulen)) != 0)
+ goto err;
+ if (CRYPTO_ON(dbenv)) {
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ hdr.iv, dbt->data, hdr.len - hdr.size)) != 0) {
+ ret = EAGAIN;
+ goto err;
+ }
/*
- * If read returns a short count the rest of the record has
- * to be in the in-memory buffer.
+ * Return the original log record size to the user,
+ * even though we've allocated more than that, possibly.
+ * The log record is decrypted in the user dbt, not in
+ * the buffer, so we must do this here after decryption,
+ * not adjust the len passed to the __db_retcopy call.
*/
- if (lp->b_off < sizeof(HDR) - nr)
- goto corrupt;
+ dbt->size = hdr.orig_size;
+ }
- /* Get the rest of the header from the in-memory buffer. */
- memcpy((u_int8_t *)&hdr + nr, dblp->bufp, sizeof(HDR) - nr);
+ /* Update the cursor and the returned LSN. */
+ *alsn = nlsn;
+ logc->c_lsn = nlsn;
+ logc->c_len = hdr.len;
+ logc->c_prev = hdr.prev;
- if (hdr.len == 0)
- goto next_file;
+err: if (rlock == L_ACQUIRED)
+ R_UNLOCK(dbenv, &dblp->reginfo);
- shortp = dblp->bufp + (sizeof(HDR) - nr);
- }
+ return (ret);
+}
- else if (dblp->readbufp != NULL) {
- dblp->r_size = nr;
-got_header: memcpy((u_int8_t *)&hdr,
- dblp->readbufp + (nlsn.offset - dblp->r_off), sizeof(HDR));
- }
+/*
+ * __log_c_incursor --
+ * Check to see if the requested record is in the cursor's buffer.
+ */
+static int
+__log_c_incursor(logc, lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ u_int8_t *p;
+
+ *pp = NULL;
/*
- * Check for buffers of 0's, that's what we usually see during recovery,
- * although it's certainly not something on which we can depend. Check
- * for impossibly large records. The malloc should fail later, but we
- * have customers that run mallocs that handle allocation failure as a
- * fatal error.
+ * Test to see if the requested LSN could be part of the cursor's
+ * buffer.
+ *
+ * The record must be part of the same file as the cursor's buffer.
+ * The record must start at a byte offset equal to or greater than
+ * the cursor buffer.
+ * The record must not start at a byte offset after the cursor
+ * buffer's end.
*/
- if (hdr.len == 0)
- goto next_file;
- if (hdr.len <= sizeof(HDR) || hdr.len > lp->persist.lg_max)
- goto corrupt;
- len = hdr.len - sizeof(HDR);
-
- /* If we've already moved to the in-memory buffer, fill from there. */
- if (shortp != NULL) {
- if (lp->b_off < ((u_int8_t *)shortp - dblp->bufp) + len)
- goto corrupt;
- if ((ret = __db_retcopy(NULL, dbt, shortp, len,
- &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
- goto err2;
- goto cksum;
- }
+ if (logc->bp_lsn.file != lsn->file)
+ return (0);
+ if (logc->bp_lsn.offset > lsn->offset)
+ return (0);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->size)
+ return (0);
- if (dblp->readbufp != NULL) {
- if (nlsn.offset + hdr.len < dblp->r_off + dblp->r_size) {
- if ((ret = __db_retcopy(NULL, dbt, dblp->readbufp +
- (nlsn.offset - dblp->r_off) + sizeof(HDR),
- len, &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
- goto err2;
- goto cksum;
- } else if ((ret = __os_seek(dblp->dbenv, &dblp->c_fh, 0,
- 0, nlsn.offset + sizeof(HDR), 0, DB_OS_SEEK_SET)) != 0) {
- fail = "seek";
- goto err1;
- }
+ /*
+ * Read the record's header and check if the record is entirely held
+ * in the buffer. If the record is not entirely held, get it again.
+ * (The only advantage in having part of the record locally is that
+ * we might avoid a system call because we already have the HDR in
+ * memory.)
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
+ */
+ p = logc->bp + (lsn->offset - logc->bp_lsn.offset);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_lsn.offset + logc->bp_rlen <= lsn->offset + hdr->len)
+ return (0);
+
+ *pp = p; /* Success. */
+
+ return (0);
+}
+
+/*
+ * __log_c_inregion --
+ * Check to see if the requested record is in the region's buffer.
+ */
+static int
+__log_c_inregion(logc, lsn, rlockp, last_lsn, hdr, pp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ RLOCK *rlockp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ size_t len, nr;
+ u_int32_t b_disk, b_region;
+ int ret;
+ u_int8_t *p;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+ lp = ((DB_LOG *)logc->dbenv->lg_handle)->reginfo.primary;
+
+ ret = 0;
+ *pp = NULL;
+
+ /* If we haven't yet acquired the log region lock, do so. */
+ if (*rlockp == L_NONE) {
+ *rlockp = L_ACQUIRED;
+ R_LOCK(dbenv, &dblp->reginfo);
}
/*
- * Allocate temporary memory to hold the record.
+ * The routines to read from disk must avoid reading past the logical
+ * end of the log, so pass that information back to it.
*
- * XXX
- * We're calling malloc(3) with a region locked. This isn't
- * a good idea.
+ * Since they're reading directly from the disk, they must also avoid
+ * reading past the offset we've written out. If the log was
+ * truncated, it's possible that there are zeroes or garbage on
+ * disk after this offset, and the logical end of the log can
+ * come later than this point if the log buffer isn't empty.
*/
- if ((ret = __os_malloc(dbenv, len, NULL, &tbuf)) != 0)
- goto err1;
+ *last_lsn = lp->lsn;
+ if (last_lsn->offset > lp->w_off)
+ last_lsn->offset = lp->w_off;
/*
- * Read the record into the buffer. If read returns a short count,
- * there was an error or the rest of the record is in the in-memory
- * buffer. Note, the information may be garbage if we're in recovery,
- * so don't read past the end of the buffer's memory.
- *
- * Because the file may be pre-allocated, we have to make sure that
- * we're not reading past the information in the start of the in-memory
+ * Test to see if the requested LSN could be part of the region's
* buffer.
+ *
+ * During recovery, we read the log files getting the information to
+ * initialize the region. In that case, the region's lsn field will
+ * not yet have been filled in, use only the disk.
+ *
+ * The record must not start at a byte offset after the region buffer's
+ * end, since that means the request is for a record after the end of
+ * the log. Do this test even if the region's buffer is empty -- after
+ * recovery, the log files may continue past the declared end-of-log,
+ * and the disk reading routine will incorrectly attempt to read the
+ * remainder of the log.
+ *
+ * Otherwise, test to see if the region's buffer actually has what we
+ * want:
+ *
+ * The buffer must have some useful content.
+ * The record must be in the same file as the region's buffer and must
+ * start at a byte offset equal to or greater than the region's buffer.
+ */
+ if (IS_ZERO_LSN(lp->lsn))
+ return (0);
+ if (lsn->file > lp->lsn.file ||
+ (lsn->file == lp->lsn.file && lsn->offset >= lp->lsn.offset))
+ return (DB_NOTFOUND);
+ if (lp->b_off == 0)
+ return (0);
+ if (lsn->file < lp->f_lsn.file || lsn->offset < lp->f_lsn.offset)
+ return (0);
+
+ /*
+ * The current contents of the cursor's buffer will be useless for a
+ * future call -- trash it rather than try and make it look correct.
+ */
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * If the requested LSN is greater than the region buffer's first
+ * byte, we know the entire record is in the buffer.
+ *
+ * If the header check fails for any reason, it must be because the
+ * LSN is bogus. Fail hard.
*/
- if (nlsn.file == lp->lsn.file &&
- nlsn.offset + sizeof(HDR) + len > lp->w_off)
- nr = lp->w_off - (nlsn.offset + sizeof(HDR));
+ if (lsn->offset > lp->f_lsn.offset) {
+ p = dblp->bufp + (lsn->offset - lp->w_off);
+ memcpy(hdr, p, hdr->size);
+ if (__log_c_hdrchk(logc, hdr, NULL))
+ return (DB_NOTFOUND);
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret =
+ __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
+ }
+ memcpy(logc->bp, p, hdr->len);
+ *pp = logc->bp;
+ return (0);
+ }
+
+ /*
+ * There's a partial record, that is, the requested record starts
+ * in a log file and finishes in the region buffer. We have to
+ * find out how many bytes of the record are in the region buffer
+ * so we can copy them out into the cursor buffer. First, check
+ * to see if the requested record is the only record in the region
+ * buffer, in which case we should copy the entire region buffer.
+ *
+ * Else, walk back through the region's buffer to find the first LSN
+ * after the record that crosses the buffer boundary -- we can detect
+ * that LSN, because its "prev" field will reference the record we
+ * want. The bytes we need to copy from the region buffer are the
+ * bytes up to the record we find. The bytes we'll need to allocate
+ * to hold the log record are the bytes between the two offsets.
+ */
+ b_disk = lp->w_off - lsn->offset;
+ if (lp->b_off <= lp->len)
+ b_region = (u_int32_t)lp->b_off;
else
- nr = len;
- if ((ret = __os_read(dblp->dbenv, &dblp->c_fh, tbuf, nr, &nr)) != 0) {
- fail = "read";
- goto err1;
+ for (p = dblp->bufp + (lp->b_off - lp->len);;) {
+ memcpy(hdr, p, hdr->size);
+ if (hdr->prev == lsn->offset) {
+ b_region = (u_int32_t)(p - dblp->bufp);
+ break;
+ }
+ p = dblp->bufp + (hdr->prev - lp->w_off);
+ }
+
+ /*
+ * If we don't have enough room for the record, we have to allocate
+ * space. We have to do it while holding the region lock, which is
+ * truly annoying, but there's no way around it. This call is why
+ * we allocate cursor buffer space when allocating the cursor instead
+ * of waiting.
+ */
+ if (logc->bp_size <= b_region + b_disk) {
+ len = ALIGN((b_region + b_disk) * 2, 128);
+ if ((ret = __os_realloc(logc->dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
}
- if (len - nr > lp->buffer_size)
- goto corrupt;
- if (nr != len) {
- if (lp->b_off < len - nr)
- goto corrupt;
-
- /* Get the rest of the record from the in-memory buffer. */
- memcpy((u_int8_t *)tbuf + nr, dblp->bufp, len - nr);
+
+ /* Copy the region's bytes to the end of the cursor's buffer. */
+ p = (logc->bp + logc->bp_size) - b_region;
+ memcpy(p, dblp->bufp, b_region);
+
+ /* Release the region lock. */
+ if (*rlockp == L_ACQUIRED) {
+ *rlockp = L_NONE;
+ R_UNLOCK(dbenv, &dblp->reginfo);
}
- /* Copy the record into the user's DBT. */
- if ((ret = __db_retcopy(NULL, dbt, tbuf, len,
- &dblp->c_dbt.data, &dblp->c_dbt.ulen)) != 0)
- goto err2;
- __os_free(tbuf, 0);
- tbuf = NULL;
+ /*
+ * Read the rest of the information from disk. Neither short reads
+ * or EOF are acceptable, the bytes we want had better be there.
+ */
+ if (b_disk != 0) {
+ p -= b_disk;
+ nr = b_disk;
+ if ((ret = __log_c_io(
+ logc, lsn->file, lsn->offset, p, &nr, NULL)) != 0)
+ return (ret);
+ if (nr < b_disk)
+ return (__log_c_shortread(logc, 0));
+ }
-cksum: /*
- * If the user specified a partial record read, the checksum can't
- * match. It's not an obvious thing to do, but a user testing for
- * the length of a record might do it.
+ /* Copy the header information into the caller's structure. */
+ memcpy(hdr, p, hdr->size);
+
+ *pp = p;
+ return (0);
+}
+
+/*
+ * __log_c_ondisk --
+ * Read a record off disk.
+ */
+static int
+__log_c_ondisk(logc, lsn, last_lsn, flags, hdr, pp, eofp)
+ DB_LOGC *logc;
+ DB_LSN *lsn, *last_lsn;
+ int flags, *eofp;
+ HDR *hdr;
+ u_int8_t **pp;
+{
+ DB_ENV *dbenv;
+ size_t len, nr;
+ u_int32_t offset;
+ int ret;
+
+ dbenv = logc->dbenv;
+ *eofp = 0;
+
+ nr = hdr->size;
+ if ((ret =
+ __log_c_io(logc, lsn->file, lsn->offset, hdr, &nr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* If we read 0 bytes, assume we've hit EOF. */
+ if (nr == 0) {
+ *eofp = 1;
+ return (0);
+ }
+
+ /* Check the HDR. */
+ if ((ret = __log_c_hdrchk(logc, hdr, eofp)) != 0)
+ return (ret);
+ if (*eofp)
+ return (0);
+
+ /* Otherwise, we should have gotten the bytes we wanted. */
+ if (nr < hdr->size)
+ return (__log_c_shortread(logc, 0));
+
+ /*
+ * Regardless of how we return, the previous contents of the cursor's
+ * buffer are useless -- trash it.
*/
- if (!F_ISSET(dbt, DB_DBT_PARTIAL) &&
- hdr.cksum != __ham_func4(NULL, dbt->data, dbt->size)) {
- if (!silent)
- __db_err(dbenv, "log_get: checksum mismatch");
- goto corrupt;
+ ZERO_LSN(logc->bp_lsn);
+
+ /*
+ * Otherwise, we now (finally!) know how big the record is. (Maybe
+ * we should have just stuck the length of the record into the LSN!?)
+ * Make sure we have enough space.
+ */
+ if (logc->bp_size <= hdr->len) {
+ len = ALIGN(hdr->len * 2, 128);
+ if ((ret = __os_realloc(dbenv, len, &logc->bp)) != 0)
+ return (ret);
+ logc->bp_size = (u_int32_t)len;
}
- /* Update the cursor and the return lsn. */
- dblp->c_off = hdr.prev;
- dblp->c_len = hdr.len;
- dblp->c_lsn = nlsn;
- *alsn = nlsn;
+ /*
+ * If we're moving forward in the log file, read this record in at the
+ * beginning of the buffer. Otherwise, read this record in at the end
+ * of the buffer, making sure we don't try and read before the start
+ * of the file. (We prefer positioning at the end because transaction
+ * aborts use DB_SET to move backward through the log and we might get
+ * lucky.)
+ *
+ * Read a buffer's worth, without reading past the logical EOF. The
+ * last_lsn may be a zero LSN, but that's OK, the test works anyway.
+ */
+ if (flags == DB_FIRST || flags == DB_NEXT)
+ offset = lsn->offset;
+ else if (lsn->offset + hdr->len < logc->bp_size)
+ offset = 0;
+ else
+ offset = (lsn->offset + hdr->len) - logc->bp_size;
+
+ nr = logc->bp_size;
+ if (lsn->file == last_lsn->file && offset + nr >= last_lsn->offset)
+ nr = last_lsn->offset - offset;
+
+ if ((ret =
+ __log_c_io(logc, lsn->file, offset, logc->bp, &nr, eofp)) != 0)
+ return (ret);
+
+ /*
+ * We should have at least gotten the bytes up-to-and-including the
+ * record we're reading.
+ */
+ if (nr < (lsn->offset + hdr->len) - offset)
+ return (__log_c_shortread(logc, 1));
+
+ /* Set up the return information. */
+ logc->bp_rlen = (u_int32_t)nr;
+ logc->bp_lsn.file = lsn->file;
+ logc->bp_lsn.offset = offset;
+ *pp = logc->bp + (lsn->offset - offset);
+
+ return (0);
+}
+
+/*
+ * __log_c_hdrchk --
+ *
+ * Check for corrupted HDRs before we use them to allocate memory or find
+ * records.
+ *
+ * If the log files were pre-allocated, a zero-filled HDR structure is the
+ * logical file end. However, we can see buffers filled with 0's during
+ * recovery, too (because multiple log buffers were written asynchronously,
+ * and one made it to disk before a different one that logically precedes
+ * it in the log file.
+ *
+ * XXX
+ * I think there's a potential pre-allocation recovery flaw here -- if we
+ * fail to write a buffer at the end of a log file (by scheduling its
+ * write asynchronously, and it never making it to disk), then succeed in
+ * writing a log file block to a subsequent log file, I don't think we will
+ * detect that the buffer of 0's should have marked the end of the log files
+ * during recovery. I think we may need to always write some garbage after
+ * each block write if we pre-allocate log files. (At the moment, we do not
+ * pre-allocate, so this isn't currently an issue.)
+ *
+ * Check for impossibly large records. The malloc should fail later, but we
+ * have customers that run mallocs that treat all allocation failures as fatal
+ * errors.
+ *
+ * Note that none of this is necessarily something awful happening. We let
+ * the application hand us any LSN they want, and it could be a pointer into
+ * the middle of a log record, there's no way to tell.
+ */
+static int
+__log_c_hdrchk(logc, hdr, eofp)
+ DB_LOGC *logc;
+ HDR *hdr;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ int ret;
+
+ dbenv = logc->dbenv;
+
+ /* Sanity check the log record's size. */
+ if (hdr->len <= hdr->size)
+ goto err;
+ /*
+ * If the cursor's max-record value isn't yet set, it means we aren't
+ * reading these records from a log file and no check is necessary.
+ */
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec) {
+ /*
+ * If we fail the check, there's the pathological case that
+ * we're reading the last file, it's growing, and our initial
+ * check information was wrong. Get it again, to be sure.
+ */
+ if ((ret = __log_c_set_maxrec(logc, NULL)) != 0) {
+ __db_err(dbenv, "DB_LOGC->get: %s", db_strerror(ret));
+ return (ret);
+ }
+ if (logc->bp_maxrec != 0 && hdr->len > logc->bp_maxrec)
+ goto err;
+ }
+
+ if (eofp != NULL) {
+ if (hdr->prev == 0 && hdr->chksum[0] == 0 && hdr->len == 0) {
+ *eofp = 1;
+ return (0);
+ }
+ *eofp = 0;
+ }
return (0);
-corrupt:/*
- * This is the catchall -- for some reason we didn't find enough
- * information or it wasn't reasonable information, and it wasn't
- * because a system call failed.
+err: if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: invalid log record header");
+ return (EIO);
+}
+
+/*
+ * __log_c_io --
+ * Read records from a log file.
+ */
+static int
+__log_c_io(logc, fnum, offset, p, nrp, eofp)
+ DB_LOGC *logc;
+ u_int32_t fnum, offset;
+ void *p;
+ size_t *nrp;
+ int *eofp;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ int ret;
+ char *np;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * If we've switched files, discard the current file handle and acquire
+ * a new one.
*/
- ret = EIO;
- fail = "read";
+ if (F_ISSET(logc->c_fh, DB_FH_VALID) && logc->bp_lsn.file != fnum)
+ if ((ret = __os_closehandle(dbenv, logc->c_fh)) != 0)
+ return (ret);
+ if (!F_ISSET(logc->c_fh, DB_FH_VALID)) {
+ if ((ret = __log_name(dblp, fnum,
+ &np, logc->c_fh, DB_OSO_RDONLY | DB_OSO_SEQ)) != 0) {
+ /*
+ * If we're allowed to return EOF, assume that's the
+ * problem, set the EOF status flag and return 0.
+ */
+ if (eofp != NULL) {
+ *eofp = 1;
+ ret = 0;
+ } else if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv, "DB_LOGC->get: %s: %s",
+ np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
-err1: if (!silent) {
- if (fail == NULL)
- __db_err(dbenv, "log_get: %s", db_strerror(ret));
- else
+ if ((ret = __log_c_set_maxrec(logc, np)) != 0) {
__db_err(dbenv,
- "log_get: %s: %s", fail, db_strerror(ret));
+ "DB_LOGC->get: %s: %s", np, db_strerror(ret));
+ __os_free(dbenv, np);
+ return (ret);
+ }
+ __os_free(dbenv, np);
}
-err2: if (np != NULL)
- __os_freestr(np);
- if (tbuf != NULL)
- __os_free(tbuf, 0);
- return (ret);
+ /* Seek to the record's offset. */
+ if ((ret = __os_seek(dbenv,
+ logc->c_fh, 0, 0, offset, 0, DB_OS_SEEK_SET)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: seek: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ /* Read the data. */
+ if ((ret = __os_read(dbenv, logc->c_fh, p, *nrp, nrp)) != 0) {
+ if (!F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(dbenv,
+ "DB_LOGC->get: read: %s", db_strerror(ret));
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * __log_c_shortread --
+ * Read was short -- return a consistent error message and error.
+ */
+static int
+__log_c_shortread(logc, silent)
+ DB_LOGC *logc;
+ int silent;
+{
+ if (!silent || !F_ISSET(logc, DB_LOG_SILENT_ERR))
+ __db_err(logc->dbenv, "DB_LOGC->get: short read");
+ return (EIO);
+}
+
+/*
+ * __log_c_set_maxrec --
+ * Bound the maximum log record size in a log file.
+ */
+static int
+__log_c_set_maxrec(logc, np)
+ DB_LOGC *logc;
+ char *np;
+{
+ DB_ENV *dbenv;
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t mbytes, bytes;
+ int ret;
+
+ dbenv = logc->dbenv;
+ dblp = dbenv->lg_handle;
+
+ /*
+ * We don't want to try and allocate huge chunks of memory because
+ * applications with error-checking malloc's often consider that a
+ * hard failure. If we're about to look at a corrupted record with
+ * a bizarre size, we need to know before trying to allocate space
+ * to hold it. We could read the persistent data at the beginning
+ * of the file but that's hard -- we may have to decrypt it, checksum
+ * it and so on. Stat the file instead.
+ */
+ if ((ret =
+ __os_ioinfo(dbenv, np, logc->c_fh, &mbytes, &bytes, NULL)) != 0)
+ return (ret);
+
+ logc->bp_maxrec = mbytes * MEGABYTE + bytes;
+
+ /*
+ * If reading from the log file currently being written, we could get
+ * an incorrect size, that is, if the cursor was opened on the file
+ * when it had only a few hundred bytes, and then the cursor used to
+ * move forward in the file, after more log records were written, the
+ * original stat value would be wrong. Use the maximum of the current
+ * log file size and the size of the buffer -- that should represent
+ * the max of any log record currently in the file.
+ *
+ * The log buffer size is set when the environment is opened and never
+ * changed, we don't need a lock on it.
+ */
+ lp = dblp->reginfo.primary;
+ logc->bp_maxrec += lp->buffer_size;
+
+ return (0);
}
diff --git a/bdb/log/log_method.c b/bdb/log/log_method.c
index 883f485d891..42adaf11c6c 100644
--- a/bdb/log/log_method.c
+++ b/bdb/log/log_method.c
@@ -1,38 +1,39 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log_method.c,v 11.14 2000/11/30 00:58:40 ubell Exp $";
+static const char revid[] = "$Id: log_method.c,v 11.32 2002/05/30 22:16:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "log.h"
+#include "dbinc/log.h"
#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
#endif
-static int __log_set_lg_max __P((DB_ENV *, u_int32_t));
static int __log_set_lg_bsize __P((DB_ENV *, u_int32_t));
static int __log_set_lg_dir __P((DB_ENV *, const char *));
+static int __log_set_lg_max __P((DB_ENV *, u_int32_t));
+static int __log_set_lg_regionmax __P((DB_ENV *, u_int32_t));
/*
* __log_dbenv_create --
@@ -44,13 +45,16 @@ void
__log_dbenv_create(dbenv)
DB_ENV *dbenv;
{
- dbenv->lg_bsize = LG_BSIZE_DEFAULT;
- dbenv->set_lg_bsize = __log_set_lg_bsize;
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
- dbenv->lg_max = LG_MAX_DEFAULT;
- dbenv->set_lg_max = __log_set_lg_max;
+ dbenv->lg_bsize = LG_BSIZE_DEFAULT;
+ dbenv->lg_regionmax = LG_BASE_REGION_SIZE;
- dbenv->set_lg_dir = __log_set_lg_dir;
#ifdef HAVE_RPC
/*
* If we have a client, overwrite what we just setup to
@@ -58,10 +62,29 @@ __log_dbenv_create(dbenv)
*/
if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
dbenv->set_lg_bsize = __dbcl_set_lg_bsize;
- dbenv->set_lg_max = __dbcl_set_lg_max;
dbenv->set_lg_dir = __dbcl_set_lg_dir;
- }
+ dbenv->set_lg_max = __dbcl_set_lg_max;
+ dbenv->set_lg_regionmax = __dbcl_set_lg_regionmax;
+ dbenv->log_archive = __dbcl_log_archive;
+ dbenv->log_cursor = __dbcl_log_cursor;
+ dbenv->log_file = __dbcl_log_file;
+ dbenv->log_flush = __dbcl_log_flush;
+ dbenv->log_put = __dbcl_log_put;
+ dbenv->log_stat = __dbcl_log_stat;
+ } else
#endif
+ {
+ dbenv->set_lg_bsize = __log_set_lg_bsize;
+ dbenv->set_lg_dir = __log_set_lg_dir;
+ dbenv->set_lg_max = __log_set_lg_max;
+ dbenv->set_lg_regionmax = __log_set_lg_regionmax;
+ dbenv->log_archive = __log_archive;
+ dbenv->log_cursor = __log_cursor;
+ dbenv->log_file = __log_file;
+ dbenv->log_flush = __log_flush;
+ dbenv->log_put = __log_put;
+ dbenv->log_stat = __log_stat;
+ }
}
/*
@@ -73,10 +96,16 @@ __log_set_lg_bsize(dbenv, lg_bsize)
DB_ENV *dbenv;
u_int32_t lg_bsize;
{
+ u_int32_t lg_max;
+
ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_bsize");
+ if (lg_bsize == 0)
+ lg_bsize = LG_BSIZE_DEFAULT;
+
/* Let's not be silly. */
- if (lg_bsize > dbenv->lg_max / 4) {
+ lg_max = dbenv->lg_size == 0 ? LG_MAX_DEFAULT : dbenv->lg_size;
+ if (lg_bsize > lg_max / 4) {
__db_err(dbenv, "log buffer size must be <= log file size / 4");
return (EINVAL);
}
@@ -94,15 +123,53 @@ __log_set_lg_max(dbenv, lg_max)
DB_ENV *dbenv;
u_int32_t lg_max;
{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_max");
+ LOG *region;
+
+ if (lg_max == 0)
+ lg_max = LG_MAX_DEFAULT;
+
+ if (F_ISSET(dbenv, DB_ENV_OPEN_CALLED)) {
+ if (!LOGGING_ON(dbenv))
+ return (__db_env_config(
+ dbenv, "set_lg_max", DB_INIT_LOG));
+ region = ((DB_LOG *)dbenv->lg_handle)->reginfo.primary;
+
+ /* Let's not be silly. */
+ if (lg_max < region->buffer_size * 4)
+ goto err;
+ region->log_nsize = lg_max;
+ } else {
+ /* Let's not be silly. */
+ if (lg_max < dbenv->lg_bsize * 4)
+ goto err;
+ dbenv->lg_size = lg_max;
+ }
+
+ return (0);
+
+err: __db_err(dbenv, "log file size must be >= log buffer size * 4");
+ return (EINVAL);
+}
+
+/*
+ * __log_set_lg_regionmax --
+ * Set the region size.
+ */
+static int
+__log_set_lg_regionmax(dbenv, lg_regionmax)
+ DB_ENV *dbenv;
+ u_int32_t lg_regionmax;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_lg_regionmax");
/* Let's not be silly. */
- if (lg_max < dbenv->lg_bsize * 4) {
- __db_err(dbenv, "log file size must be >= log buffer size * 4");
+ if (lg_regionmax != 0 && lg_regionmax < LG_BASE_REGION_SIZE) {
+ __db_err(dbenv,
+ "log file size must be >= %d", LG_BASE_REGION_SIZE);
return (EINVAL);
}
- dbenv->lg_max = lg_max;
+ dbenv->lg_regionmax = lg_regionmax;
return (0);
}
@@ -116,6 +183,6 @@ __log_set_lg_dir(dbenv, dir)
const char *dir;
{
if (dbenv->db_log_dir != NULL)
- __os_freestr(dbenv->db_log_dir);
+ __os_free(dbenv, dbenv->db_log_dir);
return (__os_strdup(dbenv, dir, &dbenv->db_log_dir));
}
diff --git a/bdb/log/log_put.c b/bdb/log/log_put.c
index c61f53e6c3d..bf6de2b0f7b 100644
--- a/bdb/log/log_put.c
+++ b/bdb/log/log_put.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: log_put.c,v 11.26 2000/11/30 00:58:40 ubell Exp $";
+static const char revid[] = "$Id: log_put.c,v 11.112 2002/09/10 02:39:26 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -29,109 +29,424 @@ static const char revid[] = "$Id: log_put.c,v 11.26 2000/11/30 00:58:40 ubell Ex
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "log.h"
-#include "hash.h"
-#include "clib_ext.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+static int __log_encrypt_record __P((DB_ENV *, DBT *, HDR *, u_int32_t));
static int __log_fill __P((DB_LOG *, DB_LSN *, void *, u_int32_t));
-static int __log_flush __P((DB_LOG *, const DB_LSN *));
+static int __log_flush_commit __P((DB_ENV *, const DB_LSN *, u_int32_t));
+static int __log_flush_int __P((DB_LOG *, const DB_LSN *, int));
static int __log_newfh __P((DB_LOG *));
-static int __log_putr __P((DB_LOG *, DB_LSN *, const DBT *, u_int32_t));
-static int __log_open_files __P((DB_ENV *));
+static int __log_put_next __P((DB_ENV *,
+ DB_LSN *, const DBT *, HDR *, DB_LSN *));
+static int __log_putr __P((DB_LOG *,
+ DB_LSN *, const DBT *, u_int32_t, HDR *));
static int __log_write __P((DB_LOG *, void *, u_int32_t));
/*
- * log_put --
- * Write a log record.
+ * __log_put --
+ * Write a log record. This is the public interface, DB_ENV->log_put.
+ *
+ * PUBLIC: int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
*/
int
-log_put(dbenv, lsn, dbt, flags)
+__log_put(dbenv, lsnp, udbt, flags)
DB_ENV *dbenv;
- DB_LSN *lsn;
- const DBT *dbt;
+ DB_LSN *lsnp;
+ const DBT *udbt;
u_int32_t flags;
{
+ DB_CIPHER *db_cipher;
+ DBT *dbt, t;
DB_LOG *dblp;
- int ret;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_put(dbenv, lsn, dbt, flags));
-#endif
+ DB_LSN lsn, old_lsn;
+ HDR hdr;
+ LOG *lp;
+ u_int32_t do_flush, op, writeonly;
+ int lock_held, need_free, ret;
+ u_int8_t *key;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_put", DB_INIT_LOG);
/* Validate arguments. */
- if (flags != 0 && flags != DB_CHECKPOINT &&
- flags != DB_CURLSN && flags != DB_FLUSH)
- return (__db_ferr(dbenv, "log_put", 0));
+ op = DB_OPFLAGS_MASK & flags;
+ if (op != 0 && op != DB_COMMIT)
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* Check for allowed bit-flags. */
+ if (LF_ISSET(~(DB_OPFLAGS_MASK |
+ DB_FLUSH | DB_NOCOPY | DB_PERMANENT | DB_WRNOSYNC)))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 0));
+
+ /* DB_WRNOSYNC and DB_FLUSH are mutually exclusive. */
+ if (LF_ISSET(DB_WRNOSYNC) && LF_ISSET(DB_FLUSH))
+ return (__db_ferr(dbenv, "DB_ENV->log_put", 1));
+
+ /* Replication clients should never write log records. */
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT) ||
+ F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->log_put is illegal on replication clients");
+ return (EINVAL);
+ }
dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ db_cipher = dbenv->crypto_handle;
+ dbt = &t;
+ t = *udbt;
+ lock_held = need_free = 0;
+ do_flush = LF_ISSET(DB_FLUSH);
+ writeonly = LF_ISSET(DB_WRNOSYNC);
+
+ /*
+ * If we are coming from the logging code, we use an internal
+ * flag, DB_NOCOPY, because we know we can overwrite/encrypt
+ * the log record in place. Otherwise, if a user called log_put
+ * then we must copy it to new memory so that we know we can
+ * write it.
+ *
+ * We also must copy it to new memory if we are a replication
+ * master so that we retain an unencrypted copy of the log
+ * record to send to clients.
+ */
+ if (!LF_ISSET(DB_NOCOPY) || F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(udbt->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, udbt->data, udbt->size);
+ }
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, udbt->size)) != 0)
+ goto err;
+ if (CRYPTO_ON(dbenv))
+ key = db_cipher->mac_key;
+ else
+ key = NULL;
+ /* Otherwise, we actually have a record to put. Put it. */
+
+ /* Before we grab the region lock, calculate the record's checksum. */
+ __db_chksum(dbt->data, dbt->size, key, hdr.chksum);
+
R_LOCK(dbenv, &dblp->reginfo);
- ret = __log_put(dbenv, lsn, dbt, flags);
- R_UNLOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+
+ ZERO_LSN(old_lsn);
+ if ((ret = __log_put_next(dbenv, &lsn, dbt, &hdr, &old_lsn)) != 0)
+ goto err;
+
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ /*
+ * Replication masters need to drop the lock to send
+ * messages, but we want to drop and reacquire it a minimal
+ * number of times.
+ */
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ lock_held = 0;
+
+ /*
+ * If we changed files and we're in a replicated
+ * environment, we need to inform our clients now that
+ * we've dropped the region lock.
+ *
+ * Note that a failed NEWFILE send is a dropped message
+ * that our client can handle, so we can ignore it. It's
+ * possible that the record we already put is a commit, so
+ * we don't just want to return failure.
+ */
+ if (!IS_ZERO_LSN(old_lsn))
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWFILE, &old_lsn, NULL, 0);
+
+ /*
+ * Then send the log record itself on to our clients.
+ *
+ * If the send fails and we're a commit or checkpoint,
+ * there's nothing we can do; the record's in the log.
+ * Flush it, even if we're running with TXN_NOSYNC, on the
+ * grounds that it should be in durable form somewhere.
+ */
+ /*
+ * !!!
+ * In the crypto case, we MUST send the udbt, not the
+ * now-encrypted dbt. Clients have no way to decrypt
+ * without the header.
+ */
+ if ((__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, udbt, flags) != 0) &&
+ LF_ISSET(DB_PERMANENT))
+ do_flush |= DB_FLUSH;
+ }
+
+ /*
+ * If needed, do a flush. Note that failures at this point
+ * are only permissible if we know we haven't written a commit
+ * record; __log_flush_commit is responsible for enforcing this.
+ *
+ * If a flush is not needed, see if WRITE_NOSYNC was set and we
+ * need to write out the log buffer.
+ */
+ if (do_flush || writeonly) {
+ if (!lock_held) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lock_held = 1;
+ }
+ if (do_flush)
+ ret = __log_flush_commit(dbenv, &lsn, flags);
+ else if (lp->b_off != 0)
+ /*
+ * writeonly: if there's anything in the current
+ * log buffer, we need to write it out.
+ */
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) == 0)
+ lp->b_off = 0;
+ }
+
+err: if (lock_held)
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (need_free)
+ __os_free(dbenv, dbt->data);
+
+ if (ret == 0)
+ *lsnp = lsn;
+
return (ret);
}
/*
- * __log_put --
- * Write a log record; internal version.
+ * __log_txn_lsn --
*
- * PUBLIC: int __log_put __P((DB_ENV *, DB_LSN *, const DBT *, u_int32_t));
+ * PUBLIC: void __log_txn_lsn
+ * PUBLIC: __P((DB_ENV *, DB_LSN *, u_int32_t *, u_int32_t *));
*/
-int
-__log_put(dbenv, lsn, dbt, flags)
+void
+__log_txn_lsn(dbenv, lsnp, mbytesp, bytesp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ u_int32_t *mbytesp, *bytesp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We are trying to get the LSN of the last entry in the log. We use
+ * this in two places: 1) DB_ENV->txn_checkpiont uses it as a first
+ * value when trying to compute an LSN such that all transactions begun
+ * before it are complete. 2) DB_ENV->txn_begin uses it as the
+ * begin_lsn.
+ *
+ * Typically, it's easy to get the last written LSN, you simply look
+ * at the current log pointer and back up the number of bytes of the
+ * last log record. However, if the last thing we did was write the
+ * log header of a new log file, then, this doesn't work, so we return
+ * the first log record that will be written in this new file.
+ */
+ *lsnp = lp->lsn;
+ if (lp->lsn.offset > lp->len)
+ lsnp->offset -= lp->len;
+
+ /*
+ * Since we're holding the log region lock, return the bytes put into
+ * the log since the last checkpoint, transaction checkpoint needs it.
+ *
+ * We add the current buffer offset so as to count bytes that have not
+ * yet been written, but are sitting in the log buffer.
+ */
+ if (mbytesp != NULL) {
+ *mbytesp = lp->stat.st_wc_mbytes;
+ *bytesp = (u_int32_t)(lp->stat.st_wc_bytes + lp->b_off);
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+}
+
+/*
+ * __log_put_next --
+ * Put the given record as the next in the log, wherever that may
+ * turn out to be.
+ */
+static int
+__log_put_next(dbenv, lsn, dbt, hdr, old_lsnp)
DB_ENV *dbenv;
DB_LSN *lsn;
const DBT *dbt;
- u_int32_t flags;
+ HDR *hdr;
+ DB_LSN *old_lsnp;
{
- DBT t;
DB_LOG *dblp;
+ DB_LSN old_lsn;
LOG *lp;
- u_int32_t lastoff;
- int ret;
+ int newfile, ret;
dblp = dbenv->lg_handle;
lp = dblp->reginfo.primary;
/*
- * If the application just wants to know where we are, fill in
- * the information. Currently used by the transaction manager
- * to avoid writing TXN_begin records.
+ * Save a copy of lp->lsn before we might decide to switch log
+ * files and change it. If we do switch log files, and we're
+ * doing replication, we'll need to tell our clients about the
+ * switch, and they need to receive a NEWFILE message
+ * with this "would-be" LSN in order to know they're not
+ * missing any log records.
*/
- if (flags == DB_CURLSN) {
- lsn->file = lp->lsn.file;
- lsn->offset = lp->lsn.offset;
- return (0);
- }
+ old_lsn = lp->lsn;
+ newfile = 0;
- /* If this information won't fit in the file, swap files. */
- if (lp->lsn.offset + sizeof(HDR) + dbt->size > lp->persist.lg_max) {
- if (sizeof(HDR) +
- sizeof(LOGP) + dbt->size > lp->persist.lg_max) {
+ /*
+ * If this information won't fit in the file, or if we're a
+ * replication client environment and have been told to do so,
+ * swap files.
+ */
+ if (lp->lsn.offset == 0 ||
+ lp->lsn.offset + hdr->size + dbt->size > lp->log_size) {
+ if (hdr->size + sizeof(LOGP) + dbt->size > lp->log_size) {
__db_err(dbenv,
- "log_put: record larger than maximum file size");
+ "DB_ENV->log_put: record larger than maximum file size");
return (EINVAL);
}
- /* Flush the log. */
- if ((ret = __log_flush(dblp, NULL)) != 0)
+ if ((ret = __log_newfile(dblp, NULL)) != 0)
return (ret);
/*
+ * Flag that we switched files, in case we're a master
+ * and need to send this information to our clients.
+ * We postpone doing the actual send until we can
+ * safely release the log region lock and are doing so
+ * anyway.
+ */
+ newfile = 1;
+
+ if (dbenv->db_noticecall != NULL)
+ dbenv->db_noticecall(dbenv, DB_NOTICE_LOGFILE_CHANGED);
+ }
+
+ /*
+ * The offset into the log file at this point is the LSN where
+ * we're about to put this record, and is the LSN the caller wants.
+ */
+ *lsn = lp->lsn;
+
+ /* If we switched log files, let our caller know where. */
+ if (newfile)
+ *old_lsnp = old_lsn;
+
+ /* Actually put the record. */
+ return (__log_putr(dblp, lsn, dbt, lp->lsn.offset - lp->len, hdr));
+}
+
+/*
+ * __log_flush_commit --
+ * Flush a record for which the DB_FLUSH flag to log_put has been set.
+ */
+static int
+__log_flush_commit(dbenv, lsnp, flags)
+ DB_ENV *dbenv;
+ const DB_LSN *lsnp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN flush_lsn;
+ LOG *lp;
+ int ret;
+ u_int32_t op;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ flush_lsn = *lsnp;
+ op = DB_OPFLAGS_MASK & flags;
+
+ if ((ret = __log_flush_int(dblp, &flush_lsn, 1)) == 0)
+ return (0);
+
+ /*
+ * If a flush supporting a transaction commit fails, we must abort the
+ * transaction. (If we aren't doing a commit, return the failure; if
+ * if the commit we care about made it to disk successfully, we just
+ * ignore the failure, because there's no way to undo the commit.)
+ */
+ if (op != DB_COMMIT)
+ return (ret);
+
+ if (flush_lsn.file != lp->lsn.file || flush_lsn.offset < lp->w_off)
+ return (0);
+
+ /*
+ * Else, make sure that the commit record does not get out after we
+ * abort the transaction. Do this by overwriting the commit record
+ * in the buffer. (Note that other commits in this buffer will wait
+ * wait until a sucessful write happens, we do not wake them.) We
+ * point at the right part of the buffer and write an abort record
+ * over the commit. We must then try and flush the buffer again,
+ * since the interesting part of the buffer may have actually made
+ * it out to disk before there was a failure, we can't know for sure.
+ */
+ if (__txn_force_abort(dbenv,
+ dblp->bufp + flush_lsn.offset - lp->w_off) == 0)
+ (void)__log_flush_int(dblp, &flush_lsn, 0);
+
+ return (ret);
+}
+
+/*
+ * __log_newfile --
+ * Initialize and switch to a new log file. (Note that this is
+ * called both when no log yet exists and when we fill a log file.)
+ *
+ * PUBLIC: int __log_newfile __P((DB_LOG *, DB_LSN *));
+ */
+int
+__log_newfile(dblp, lsnp)
+ DB_LOG *dblp;
+ DB_LSN *lsnp;
+{
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN lsn;
+ DBT t;
+ HDR hdr;
+ LOG *lp;
+ int need_free, ret;
+ u_int32_t lastoff;
+ size_t tsize;
+ u_int8_t *tmp;
+
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
+ /* If we're not at the beginning of a file already, start a new one. */
+ if (lp->lsn.offset != 0) {
+ /*
+ * Flush the log so this file is out and can be closed. We
+ * cannot release the region lock here because we need to
+ * protect the end of the file while we switch. In
+ * particular, a thread with a smaller record than ours
+ * could detect that there is space in the log. Even
+ * blocking that event by declaring the file full would
+ * require all threads to wait here so that the lsn.file
+ * can be moved ahead after the flush completes. This
+ * probably can be changed if we had an lsn for the
+ * previous file and one for the curent, but it does not
+ * seem like this would get much more throughput, if any.
+ */
+ if ((ret = __log_flush_int(dblp, NULL, 0)) != 0)
+ return (ret);
+
+ DB_ASSERT(lp->b_off == 0);
+ /*
* Save the last known offset from the previous file, we'll
* need it to initialize the persistent header information.
*/
@@ -143,78 +458,50 @@ __log_put(dbenv, lsn, dbt, flags)
/* Reset the file write offset. */
lp->w_off = 0;
-
- if (dbenv->db_noticecall != NULL)
- dbenv->db_noticecall(dbenv, DB_NOTICE_LOGFILE_CHANGED);
} else
lastoff = 0;
- /* Initialize the LSN information returned to the user. */
- lsn->file = lp->lsn.file;
- lsn->offset = lp->lsn.offset;
-
/*
* Insert persistent information as the first record in every file.
* Note that the previous length is wrong for the very first record
* of the log, but that's okay, we check for it during retrieval.
*/
- if (lp->lsn.offset == 0) {
- t.data = &lp->persist;
- t.size = sizeof(LOGP);
- if ((ret = __log_putr(dblp, lsn,
- &t, lastoff == 0 ? 0 : lastoff - lp->len)) != 0)
- return (ret);
+ DB_ASSERT(lp->b_off == 0);
- /*
- * Record files open in this log.
- * If we are recovering then we are in the
- * process of outputting the files, don't do
- * it again.
- */
- if (!F_ISSET(dblp, DBLOG_RECOVER) &&
- (ret = __log_open_files(dbenv)) != 0)
- return (ret);
-
- /* Update the LSN information returned to the user. */
- lsn->file = lp->lsn.file;
- lsn->offset = lp->lsn.offset;
- }
+ memset(&t, 0, sizeof(t));
+ memset(&hdr, 0, sizeof(HDR));
- /* Write the application's log record. */
- if ((ret = __log_putr(dblp, lsn, dbt, lp->lsn.offset - lp->len)) != 0)
+ need_free = 0;
+ tsize = sizeof(LOGP);
+ db_cipher = dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ tsize += db_cipher->adj_size(tsize);
+ if ((ret = __os_calloc(dbenv, 1, tsize, &tmp)) != 0)
return (ret);
+ lp->persist.log_size = lp->log_size = lp->log_nsize;
+ memcpy(tmp, &lp->persist, sizeof(LOGP));
+ t.data = tmp;
+ t.size = (u_int32_t)tsize;
+ need_free = 1;
- /*
- * On a checkpoint, we:
- * Put out the checkpoint record (above).
- * Save the LSN of the checkpoint in the shared region.
- * Append the set of file name information into the log.
- */
- if (flags == DB_CHECKPOINT) {
- lp->chkpt_lsn = *lsn;
- if ((ret = __log_open_files(dbenv)) != 0)
- return (ret);
- }
+ if ((ret =
+ __log_encrypt_record(dbenv, &t, &hdr, (u_int32_t)tsize)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+ lsn = lp->lsn;
+ if ((ret = __log_putr(dblp, &lsn,
+ &t, lastoff == 0 ? 0 : lastoff - lp->len, &hdr)) != 0)
+ goto err;
- /*
- * On a checkpoint or when flush is requested, we:
- * Flush the current buffer contents to disk.
- * Sync the log to disk.
- */
- if (flags == DB_FLUSH || flags == DB_CHECKPOINT)
- if ((ret = __log_flush(dblp, NULL)) != 0)
- return (ret);
+ /* Update the LSN information returned to the caller. */
+ if (lsnp != NULL)
+ *lsnp = lp->lsn;
- /*
- * On a checkpoint, we:
- * Save the time the checkpoint was written.
- * Reset the bytes written since the last checkpoint.
- */
- if (flags == DB_CHECKPOINT) {
- (void)time(&lp->chkpt);
- lp->stat.st_wc_bytes = lp->stat.st_wc_mbytes = 0;
- }
- return (0);
+err:
+ if (need_free)
+ __os_free(dbenv, tmp);
+ return (ret);
}
/*
@@ -222,100 +509,253 @@ __log_put(dbenv, lsn, dbt, flags)
* Actually put a record into the log.
*/
static int
-__log_putr(dblp, lsn, dbt, prev)
+__log_putr(dblp, lsn, dbt, prev, h)
DB_LOG *dblp;
DB_LSN *lsn;
const DBT *dbt;
u_int32_t prev;
+ HDR *h;
{
- HDR hdr;
+ DB_CIPHER *db_cipher;
+ DB_ENV *dbenv;
+ DB_LSN f_lsn;
LOG *lp;
- int ret;
+ HDR tmp, *hdr;
+ int ret, t_ret;
+ size_t b_off, nr;
+ u_int32_t w_off;
+ dbenv = dblp->dbenv;
lp = dblp->reginfo.primary;
/*
+ * If we weren't given a header, use a local one.
+ */
+ db_cipher = dbenv->crypto_handle;
+ if (h == NULL) {
+ hdr = &tmp;
+ memset(hdr, 0, sizeof(HDR));
+ if (CRYPTO_ON(dbenv))
+ hdr->size = HDR_CRYPTO_SZ;
+ else
+ hdr->size = HDR_NORMAL_SZ;
+ } else
+ hdr = h;
+
+ /* Save our position in case we fail. */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+
+ /*
* Initialize the header. If we just switched files, lsn.offset will
* be 0, and what we really want is the offset of the previous record
* in the previous file. Fortunately, prev holds the value we want.
*/
- hdr.prev = prev;
- hdr.len = sizeof(HDR) + dbt->size;
- hdr.cksum = __ham_func4(NULL, dbt->data, dbt->size);
+ hdr->prev = prev;
+ hdr->len = (u_int32_t)hdr->size + dbt->size;
- if ((ret = __log_fill(dblp, lsn, &hdr, sizeof(HDR))) != 0)
- return (ret);
- lp->len = sizeof(HDR);
- lp->lsn.offset += sizeof(HDR);
+ /*
+ * If we were passed in a nonzero checksum, our caller calculated
+ * the checksum before acquiring the log mutex, as an optimization.
+ *
+ * If our caller calculated a real checksum of 0, we'll needlessly
+ * recalculate it. C'est la vie; there's no out-of-bounds value
+ * here.
+ */
+ if (hdr->chksum[0] == 0)
+ __db_chksum(dbt->data, dbt->size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL,
+ hdr->chksum);
+
+ if ((ret = __log_fill(dblp, lsn, hdr, (u_int32_t)hdr->size)) != 0)
+ goto err;
if ((ret = __log_fill(dblp, lsn, dbt->data, dbt->size)) != 0)
- return (ret);
- lp->len += dbt->size;
- lp->lsn.offset += dbt->size;
+ goto err;
+
+ lp->len = (u_int32_t)(hdr->size + dbt->size);
+ lp->lsn.offset += (u_int32_t)(hdr->size + dbt->size);
return (0);
+err:
+ /*
+ * If we wrote more than one buffer before failing, get the
+ * first one back. The extra buffers will fail the checksums
+ * and be ignored.
+ */
+ if (w_off + lp->buffer_size < lp->w_off) {
+ if ((t_ret =
+ __os_seek(dbenv,
+ &dblp->lfh, 0, 0, w_off, 0, DB_OS_SEEK_SET)) != 0 ||
+ (t_ret = __os_read(dbenv, &dblp->lfh, dblp->bufp,
+ b_off, &nr)) != 0)
+ return (__db_panic(dbenv, t_ret));
+ if (nr != b_off) {
+ __db_err(dbenv, "Short read while restoring log");
+ return (__db_panic(dbenv, EIO));
+ }
+ }
+
+ /* Reset to where we started. */
+ lp->w_off = w_off;
+ lp->b_off = b_off;
+ lp->f_lsn = f_lsn;
+
+ return (ret);
}
/*
- * log_flush --
+ * __log_flush --
* Write all records less than or equal to the specified LSN.
+ *
+ * PUBLIC: int __log_flush __P((DB_ENV *, const DB_LSN *));
*/
int
-log_flush(dbenv, lsn)
+__log_flush(dbenv, lsn)
DB_ENV *dbenv;
const DB_LSN *lsn;
{
DB_LOG *dblp;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_flush(dbenv, lsn));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_flush", DB_INIT_LOG);
dblp = dbenv->lg_handle;
R_LOCK(dbenv, &dblp->reginfo);
- ret = __log_flush(dblp, lsn);
+ ret = __log_flush_int(dblp, lsn, 1);
R_UNLOCK(dbenv, &dblp->reginfo);
return (ret);
}
/*
- * __log_flush --
+ * __log_flush_int --
* Write all records less than or equal to the specified LSN; internal
* version.
*/
static int
-__log_flush(dblp, lsn)
+__log_flush_int(dblp, lsnp, release)
DB_LOG *dblp;
- const DB_LSN *lsn;
+ const DB_LSN *lsnp;
+ int release;
{
- DB_LSN t_lsn;
+ DB_ENV *dbenv;
+ DB_LSN flush_lsn, f_lsn;
+ DB_MUTEX *flush_mutexp;
LOG *lp;
- int current, ret;
+ int current, do_flush, first, ret;
+ size_t b_off;
+ struct __db_commit *commit;
+ u_int32_t ncommit, w_off;
ret = 0;
+ ncommit = 0;
+ dbenv = dblp->dbenv;
lp = dblp->reginfo.primary;
+ flush_mutexp = R_ADDR(&dblp->reginfo, lp->flush_mutex_off);
/*
* If no LSN specified, flush the entire log by setting the flush LSN
* to the last LSN written in the log. Otherwise, check that the LSN
* isn't a non-existent record for the log.
*/
- if (lsn == NULL) {
- t_lsn.file = lp->lsn.file;
- t_lsn.offset = lp->lsn.offset - lp->len;
- lsn = &t_lsn;
- } else
- if (lsn->file > lp->lsn.file ||
- (lsn->file == lp->lsn.file &&
- lsn->offset > lp->lsn.offset - lp->len)) {
- __db_err(dblp->dbenv,
- "log_flush: LSN past current end-of-log");
- return (EINVAL);
- }
+ if (lsnp == NULL) {
+ flush_lsn.file = lp->lsn.file;
+ flush_lsn.offset = lp->lsn.offset - lp->len;
+ } else if (lsnp->file > lp->lsn.file ||
+ (lsnp->file == lp->lsn.file &&
+ lsnp->offset > lp->lsn.offset - lp->len)) {
+ __db_err(dbenv,
+ "DB_ENV->log_flush: LSN past current end-of-log");
+ return (EINVAL);
+ } else {
+ /*
+ * See if we need to wait. s_lsn is not locked so some
+ * care is needed. The sync point can only move forward.
+ * If the file we want is in the past we are done.
+ * If the file numbers are the same check the offset.
+ * If this fails check the file numbers again since the
+ * offset might have changed while we were looking.
+ * This all assumes we can read an integer in one
+ * state or the other, not in transition.
+ */
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ if (lp->s_lsn.file == lsnp->file &&
+ lp->s_lsn.offset > lsnp->offset)
+ return (0);
+
+ if (lp->s_lsn.file > lsnp->file)
+ return (0);
+
+ flush_lsn = *lsnp;
+ }
+
+ /*
+ * If a flush is in progress and we're allowed to do so, drop
+ * the region lock and block waiting for the next flush.
+ */
+ if (release && lp->in_flush != 0) {
+ if ((commit = SH_TAILQ_FIRST(
+ &lp->free_commits, __db_commit)) == NULL) {
+ if ((ret =
+ __db_shalloc(dblp->reginfo.addr,
+ sizeof(struct __db_commit),
+ MUTEX_ALIGN, &commit)) != 0)
+ goto flush;
+ memset(commit, 0, sizeof(*commit));
+ if ((ret = __db_mutex_setup(dbenv, &dblp->reginfo,
+ &commit->mutex, MUTEX_SELF_BLOCK |
+ MUTEX_NO_RLOCK)) != 0) {
+ __db_shalloc_free(dblp->reginfo.addr, commit);
+ return (ret);
+ }
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ } else
+ SH_TAILQ_REMOVE(
+ &lp->free_commits, commit, links, __db_commit);
+
+ lp->ncommit++;
+
+ /*
+ * Flushes may be requested out of LSN order; be
+ * sure we only move lp->t_lsn forward.
+ */
+ if (log_compare(&lp->t_lsn, &flush_lsn) < 0)
+ lp->t_lsn = flush_lsn;
+
+ commit->lsn = flush_lsn;
+ SH_TAILQ_INSERT_HEAD(
+ &lp->commits, commit, links, __db_commit);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ /* Wait here for the in-progress flush to finish. */
+ MUTEX_LOCK(dbenv, &commit->mutex);
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->ncommit--;
+ /*
+ * Grab the flag before freeing the struct to see if
+ * we need to flush the log to commit. If so,
+ * use the maximal lsn for any committing thread.
+ */
+ do_flush = F_ISSET(commit, DB_COMMIT_FLUSH);
+ F_CLR(commit, DB_COMMIT_FLUSH);
+ SH_TAILQ_INSERT_HEAD(
+ &lp->free_commits, commit, links, __db_commit);
+ if (do_flush) {
+ lp->in_flush--;
+ flush_lsn = lp->t_lsn;
+ } else
+ return (0);
+ }
+
+ /*
+ * Protect flushing with its own mutex so we can release
+ * the region lock except during file switches.
+ */
+flush: MUTEX_LOCK(dbenv, flush_mutexp);
/*
* If the LSN is less than or equal to the last-sync'd LSN, we're done.
@@ -323,9 +763,12 @@ __log_flush(dblp, lsn)
* after the byte we absolutely know was written to disk, so the test
* is <, not <=.
*/
- if (lsn->file < lp->s_lsn.file ||
- (lsn->file == lp->s_lsn.file && lsn->offset < lp->s_lsn.offset))
- return (0);
+ if (flush_lsn.file < lp->s_lsn.file ||
+ (flush_lsn.file == lp->s_lsn.file &&
+ flush_lsn.offset < lp->s_lsn.offset)) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
/*
* We may need to write the current buffer. We have to write the
@@ -333,9 +776,12 @@ __log_flush(dblp, lsn)
* buffer's starting LSN.
*/
current = 0;
- if (lp->b_off != 0 && log_compare(lsn, &lp->f_lsn) >= 0) {
- if ((ret = __log_write(dblp, dblp->bufp, lp->b_off)) != 0)
- return (ret);
+ if (lp->b_off != 0 && log_compare(&flush_lsn, &lp->f_lsn) >= 0) {
+ if ((ret = __log_write(dblp,
+ dblp->bufp, (u_int32_t)lp->b_off)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
lp->b_off = 0;
current = 1;
@@ -348,23 +794,90 @@ __log_flush(dblp, lsn)
* buffer, don't bother. We have nothing to write and nothing to
* sync.
*/
- if (dblp->lfname != lp->lsn.file) {
- if (!current)
- return (0);
- if ((ret = __log_newfh(dblp)) != 0)
- return (ret);
- }
+ if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
+ if (!current || (ret = __log_newfh(dblp)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ goto done;
+ }
+
+ /*
+ * We are going to flush, release the region.
+ * First get the current state of the buffer since
+ * another write may come in, but we may not flush it.
+ */
+ b_off = lp->b_off;
+ w_off = lp->w_off;
+ f_lsn = lp->f_lsn;
+ lp->in_flush++;
+ if (release)
+ R_UNLOCK(dbenv, &dblp->reginfo);
/* Sync all writes to disk. */
- if ((ret = __os_fsync(dblp->dbenv, &dblp->lfh)) != 0)
- return (__db_panic(dblp->dbenv, ret));
+ if ((ret = __os_fsync(dbenv, &dblp->lfh)) != 0) {
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+ ret = __db_panic(dbenv, ret);
+ return (ret);
+ }
+
+ /*
+ * Set the last-synced LSN.
+ * This value must be set to the LSN past the last complete
+ * record that has been flushed. This is at least the first
+ * lsn, f_lsn. If the buffer is empty, b_off == 0, then
+ * we can move up to write point since the first lsn is not
+ * set for the new buffer.
+ */
+ lp->s_lsn = f_lsn;
+ if (b_off == 0)
+ lp->s_lsn.offset = w_off;
+
+ MUTEX_UNLOCK(dbenv, flush_mutexp);
+ if (release)
+ R_LOCK(dbenv, &dblp->reginfo);
+
+ lp->in_flush--;
++lp->stat.st_scount;
- /* Set the last-synced LSN, using the on-disk write offset. */
- lp->s_lsn.file = lp->f_lsn.file;
- lp->s_lsn.offset = lp->w_off;
+ /*
+ * How many flush calls (usually commits) did this call actually sync?
+ * At least one, if it got here.
+ */
+ ncommit = 1;
+done:
+ if (lp->ncommit != 0) {
+ first = 1;
+ for (commit = SH_TAILQ_FIRST(&lp->commits, __db_commit);
+ commit != NULL;
+ commit = SH_TAILQ_NEXT(commit, links, __db_commit))
+ if (log_compare(&lp->s_lsn, &commit->lsn) > 0) {
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ ncommit++;
+ } else if (first == 1) {
+ F_SET(commit, DB_COMMIT_FLUSH);
+ MUTEX_UNLOCK(dbenv, &commit->mutex);
+ SH_TAILQ_REMOVE(
+ &lp->commits, commit, links, __db_commit);
+ /*
+ * This thread will wake and flush.
+ * If another thread commits and flushes
+ * first we will waste a trip trough the
+ * mutex.
+ */
+ lp->in_flush++;
+ first = 0;
+ }
+ }
+ if (lp->stat.st_maxcommitperflush < ncommit)
+ lp->stat.st_maxcommitperflush = ncommit;
+ if (lp->stat.st_mincommitperflush > ncommit ||
+ lp->stat.st_mincommitperflush == 0)
+ lp->stat.st_mincommitperflush = ncommit;
- return (0);
+ return (ret);
}
/*
@@ -415,7 +928,7 @@ __log_fill(dblp, lsn, addr, len)
nw = remain > len ? len : remain;
memcpy(dblp->bufp + lp->b_off, addr, nw);
addr = (u_int8_t *)addr + nw;
- len -= nw;
+ len -= (u_int32_t)nw;
lp->b_off += nw;
/* If we fill the buffer, flush it. */
@@ -439,15 +952,18 @@ __log_write(dblp, addr, len)
void *addr;
u_int32_t len;
{
+ DB_ENV *dbenv;
LOG *lp;
size_t nw;
int ret;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
/*
* If we haven't opened the log file yet or the current one
* has changed, acquire a new log file.
*/
- lp = dblp->reginfo.primary;
if (!F_ISSET(&dblp->lfh, DB_FH_VALID) || dblp->lfname != lp->lsn.file)
if ((ret = __log_newfh(dblp)) != 0)
return (ret);
@@ -457,14 +973,10 @@ __log_write(dblp, addr, len)
* since we last did).
*/
if ((ret =
- __os_seek(dblp->dbenv,
+ __os_seek(dbenv,
&dblp->lfh, 0, 0, lp->w_off, 0, DB_OS_SEEK_SET)) != 0 ||
- (ret = __os_write(dblp->dbenv, &dblp->lfh, addr, len, &nw)) != 0)
- return (__db_panic(dblp->dbenv, ret));
- if (nw != len) {
- __db_err(dblp->dbenv, "Short write while writing log");
- return (EIO);
- }
+ (ret = __os_write(dbenv, &dblp->lfh, addr, len, &nw)) != 0)
+ return (ret);
/* Reset the buffer offset and update the seek offset. */
lp->w_off += len;
@@ -484,11 +996,13 @@ __log_write(dblp, addr, len)
}
/*
- * log_file --
+ * __log_file --
* Map a DB_LSN to a file name.
+ *
+ * PUBLIC: int __log_file __P((DB_ENV *, const DB_LSN *, char *, size_t));
*/
int
-log_file(dbenv, lsn, namep, len)
+__log_file(dbenv, lsn, namep, len)
DB_ENV *dbenv;
const DB_LSN *lsn;
char *namep;
@@ -498,13 +1012,9 @@ log_file(dbenv, lsn, namep, len)
int ret;
char *name;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_file(dbenv, lsn, namep, len));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "DB_ENV->log_file", DB_INIT_LOG);
dblp = dbenv->lg_handle;
R_LOCK(dbenv, &dblp->reginfo);
@@ -516,11 +1026,11 @@ log_file(dbenv, lsn, namep, len)
/* Check to make sure there's enough room and copy the name. */
if (len < strlen(name) + 1) {
*namep = '\0';
- __db_err(dbenv, "log_file: name buffer is too short");
+ __db_err(dbenv, "DB_ENV->log_file: name buffer is too short");
return (EINVAL);
}
(void)strcpy(namep, name);
- __os_freestr(name);
+ __os_free(dbenv, name);
return (0);
}
@@ -533,19 +1043,21 @@ static int
__log_newfh(dblp)
DB_LOG *dblp;
{
+ DB_ENV *dbenv;
LOG *lp;
int ret;
char *name;
+ dbenv = dblp->dbenv;
+ lp = dblp->reginfo.primary;
+
/* Close any previous file descriptor. */
if (F_ISSET(&dblp->lfh, DB_FH_VALID))
- (void)__os_closehandle(&dblp->lfh);
-
- /* Get the path of the new file and open it. */
- lp = dblp->reginfo.primary;
- dblp->lfname = lp->lsn.file;
+ (void)__os_closehandle(dbenv, &dblp->lfh);
/*
+ * Get the path of the new file and open it.
+ *
* Adding DB_OSO_LOG to the flags may add additional platform-specific
* optimizations. On WinNT, the logfile is preallocated, which may
* have a time penalty at startup, but have better overall throughput.
@@ -557,14 +1069,16 @@ __log_newfh(dblp)
* maximum size down into the Windows __os_open routine, because it
* wants to pre-allocate it.
*/
- dblp->lfh.log_size = dblp->dbenv->lg_max;
+ dblp->lfname = lp->lsn.file;
+ dblp->lfh.log_size = lp->log_size;
if ((ret = __log_name(dblp, dblp->lfname,
&name, &dblp->lfh,
- DB_OSO_CREATE |/* DB_OSO_LOG |*/ DB_OSO_SEQ)) != 0)
- __db_err(dblp->dbenv,
- "log_put: %s: %s", name, db_strerror(ret));
+ DB_OSO_CREATE |/* DB_OSO_LOG |*/ DB_OSO_SEQ |
+ (F_ISSET(dbenv, DB_ENV_DIRECT_LOG) ? DB_OSO_DIRECT : 0))) != 0)
+ __db_err(dbenv,
+ "DB_ENV->log_put: %s: %s", name, db_strerror(ret));
- __os_freestr(name);
+ __os_free(dbenv, name);
return (ret);
}
@@ -582,11 +1096,13 @@ __log_name(dblp, filenumber, namep, fhp, flags)
char **namep;
DB_FH *fhp;
{
+ DB_ENV *dbenv;
LOG *lp;
int ret;
char *oname;
char old[sizeof(LFPREFIX) + 5 + 20], new[sizeof(LFPREFIX) + 10 + 20];
+ dbenv = dblp->dbenv;
lp = dblp->reginfo.primary;
/*
@@ -608,13 +1124,12 @@ __log_name(dblp, filenumber, namep, fhp, flags)
* file, return regardless.
*/
(void)snprintf(new, sizeof(new), LFNAME, filenumber);
- if ((ret = __db_appname(dblp->dbenv,
- DB_APP_LOG, NULL, new, 0, NULL, namep)) != 0 || fhp == NULL)
+ if ((ret = __db_appname(dbenv,
+ DB_APP_LOG, new, 0, NULL, namep)) != 0 || fhp == NULL)
return (ret);
/* Open the new-style file -- if we succeed, we're done. */
- if ((ret = __os_open(dblp->dbenv,
- *namep, flags, lp->persist.mode, fhp)) == 0)
+ if ((ret = __os_open(dbenv, *namep, flags, lp->persist.mode, fhp)) == 0)
return (0);
/*
@@ -622,15 +1137,14 @@ __log_name(dblp, filenumber, namep, fhp, flags)
* the caller isn't interested in old-style files.
*/
if (!LF_ISSET(DB_OSO_RDONLY)) {
- __db_err(dblp->dbenv,
+ __db_err(dbenv,
"%s: log file open failed: %s", *namep, db_strerror(ret));
- return (__db_panic(dblp->dbenv, ret));
+ return (__db_panic(dbenv, ret));
}
/* Create an old-style file name. */
(void)snprintf(old, sizeof(old), LFNAME_V1, filenumber);
- if ((ret = __db_appname(dblp->dbenv,
- DB_APP_LOG, NULL, old, 0, NULL, &oname)) != 0)
+ if ((ret = __db_appname(dbenv, DB_APP_LOG, old, 0, NULL, &oname)) != 0)
goto err;
/*
@@ -638,9 +1152,9 @@ __log_name(dblp, filenumber, namep, fhp, flags)
* space allocated for the new-style name and return the old-style
* name to the caller.
*/
- if ((ret = __os_open(dblp->dbenv,
+ if ((ret = __os_open(dbenv,
oname, flags, lp->persist.mode, fhp)) == 0) {
- __os_freestr(*namep);
+ __os_free(dbenv, *namep);
*namep = oname;
return (0);
}
@@ -653,52 +1167,82 @@ __log_name(dblp, filenumber, namep, fhp, flags)
* old-style name, but we expected it to exist and we weren't just
* looking for any log file. That's not a likely error.
*/
-err: __os_freestr(oname);
+err: __os_free(dbenv, oname);
return (ret);
}
-static int
-__log_open_files(dbenv)
+/*
+ * __log_rep_put --
+ * Short-circuit way for replication clients to put records into the
+ * log. Replication clients' logs need to be laid out exactly their masters'
+ * are, so we let replication take responsibility for when the log gets
+ * flushed, when log switches files, etc. This is just a thin PUBLIC wrapper
+ * for __log_putr with a slightly prettier interface.
+ *
+ * Note that the log region mutex should be held when this is called.
+ *
+ * PUBLIC: int __log_rep_put __P((DB_ENV *, DB_LSN *, const DBT *));
+ */
+int
+__log_rep_put(dbenv, lsnp, rec)
DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ const DBT *rec;
{
+ DB_CIPHER *db_cipher;
DB_LOG *dblp;
- DB_LSN r_unused;
- DBT fid_dbt, t;
- FNAME *fnp;
+ HDR hdr;
+ DBT *dbt, t;
LOG *lp;
- int ret;
+ int need_free, ret;
dblp = dbenv->lg_handle;
lp = dblp->reginfo.primary;
- for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
- if (fnp->ref == 0) /* Entry not in use. */
- continue;
- if (fnp->name_off != INVALID_ROFF) {
- memset(&t, 0, sizeof(t));
- t.data = R_ADDR(&dblp->reginfo, fnp->name_off);
- t.size = strlen(t.data) + 1;
- }
- memset(&fid_dbt, 0, sizeof(fid_dbt));
- fid_dbt.data = fnp->ufid;
- fid_dbt.size = DB_FILE_ID_LEN;
- /*
- * Output LOG_CHECKPOINT records which will be
- * processed during the OPENFILES pass of recovery.
- * At the end of recovery we want to output the
- * files that were open so that a future recovery
- * run will have the correct files open during
- * a backward pass. For this we output LOG_CLOSE
- * records so that the files will be closed on
- * the forward pass.
- */
- if ((ret = __log_register_log(dbenv,
- NULL, &r_unused, 0,
- F_ISSET(dblp, DBLOG_RECOVER) ? LOG_CLOSE : LOG_CHECKPOINT,
- fnp->name_off == INVALID_ROFF ? NULL : &t,
- &fid_dbt, fnp->id, fnp->s_type, fnp->meta_pgno)) != 0)
+ memset(&hdr, 0, sizeof(HDR));
+ t = *rec;
+ dbt = &t;
+ need_free = 0;
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ if (CRYPTO_ON(dbenv))
+ t.size += db_cipher->adj_size(rec->size);
+ if ((ret = __os_calloc(dbenv, 1, t.size, &t.data)) != 0)
+ goto err;
+ need_free = 1;
+ memcpy(t.data, rec->data, rec->size);
+
+ if ((ret = __log_encrypt_record(dbenv, dbt, &hdr, rec->size)) != 0)
+ goto err;
+ __db_chksum(t.data, t.size,
+ (CRYPTO_ON(dbenv)) ? db_cipher->mac_key : NULL, hdr.chksum);
+
+ DB_ASSERT(log_compare(lsnp, &lp->lsn) == 0);
+ ret = __log_putr(dblp, lsnp, dbt, lp->lsn.offset - lp->len, &hdr);
+err:
+ if (need_free)
+ __os_free(dbenv, t.data);
+ return (ret);
+}
+
+static int
+__log_encrypt_record(dbenv, dbt, hdr, orig)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ HDR *hdr;
+ u_int32_t orig;
+{
+ DB_CIPHER *db_cipher;
+ int ret;
+
+ if (CRYPTO_ON(dbenv)) {
+ db_cipher = (DB_CIPHER *)dbenv->crypto_handle;
+ hdr->size = HDR_CRYPTO_SZ;
+ hdr->orig_size = orig;
+ if ((ret = db_cipher->encrypt(dbenv, db_cipher->data,
+ hdr->iv, dbt->data, dbt->size)) != 0)
return (ret);
+ } else {
+ hdr->size = HDR_NORMAL_SZ;
}
return (0);
}
diff --git a/bdb/log/log_rec.c b/bdb/log/log_rec.c
deleted file mode 100644
index 493dd06d4c6..00000000000
--- a/bdb/log/log_rec.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-/*
- * Copyright (c) 1995, 1996
- * The President and Fellows of Harvard University. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: log_rec.c,v 11.48 2001/01/11 18:19:53 bostic Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
-#include "db_int.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "log.h"
-
-static int __log_check_master __P((DB_ENV *, u_int8_t *, char *));
-static int __log_do_open __P((DB_ENV *, DB_LOG *,
- u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t));
-static int __log_open_file __P((DB_ENV *, DB_LOG *, __log_register_args *));
-
-/*
- * PUBLIC: int __log_register_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__log_register_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- DB_ENTRY *dbe;
- DB_LOG *logp;
- DB *dbp;
- __log_register_args *argp;
- int do_rem, ret, t_ret;
-
- logp = dbenv->lg_handle;
- dbp = NULL;
-
-#ifdef DEBUG_RECOVER
- REC_PRINT(__log_register_print);
-#endif
- COMPQUIET(lsnp, NULL);
-
- if ((ret = __log_register_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- if ((argp->opcode == LOG_OPEN &&
- (DB_REDO(op) || op == DB_TXN_OPENFILES)) ||
- (argp->opcode == LOG_CLOSE && DB_UNDO(op))) {
- /*
- * If we are redoing an open or undoing a close, then we need
- * to open a file. We must open the file even if
- * the meta page is not yet written as we may be creating it.
- */
- if (op == DB_TXN_OPENFILES)
- F_SET(logp, DBLOG_FORCE_OPEN);
- ret = __log_open_file(dbenv, logp, argp);
- F_CLR(logp, DBLOG_FORCE_OPEN);
- if (ret == ENOENT || ret == EINVAL) {
- if (op == DB_TXN_OPENFILES && argp->name.size != 0 &&
- (ret = __db_txnlist_delete(dbenv, info,
- argp->name.data, argp->fileid, 0)) != 0)
- goto out;
- ret = 0;
- }
- } else if (argp->opcode != LOG_CHECKPOINT) {
- /*
- * If we are undoing an open, then we need to close the file.
- *
- * If the file is deleted, then we can just ignore this close.
- * Otherwise, we should usually have a valid dbp we should
- * close or whose reference count should be decremented.
- * However, if we shut down without closing a file, we may, in
- * fact, not have the file open, and that's OK.
- */
- do_rem = 0;
- MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
- if (argp->fileid < logp->dbentry_cnt) {
- dbe = &logp->dbentry[argp->fileid];
-
- DB_ASSERT(dbe->refcount == 1);
-
- ret = __db_txnlist_close(info,
- argp->fileid, dbe->count);
- if ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL)
- (void)log_unregister(dbenv, dbp);
- do_rem = 1;
- }
- MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
- if (do_rem) {
- (void)__log_rem_logid(logp, dbp, argp->fileid);
- /*
- * If remove or rename has closed the file, don't
- * sync.
- */
- if (dbp != NULL &&
- (t_ret = dbp->close(dbp,
- dbp->mpf == NULL ? DB_NOSYNC : 0)) != 0 && ret == 0)
- ret = t_ret;
- }
- } else if (DB_UNDO(op) || op == DB_TXN_OPENFILES) {
- /*
- * It's a checkpoint and we are rolling backward. It
- * is possible that the system was shut down and thus
- * ended with a stable checkpoint; this file was never
- * closed and has therefore not been reopened yet. If
- * so, we need to try to open it.
- */
- ret = __log_open_file(dbenv, logp, argp);
- if (ret == ENOENT || ret == EINVAL) {
- if (argp->name.size != 0 && (ret =
- __db_txnlist_delete(dbenv, info,
- argp->name.data, argp->fileid, 0)) != 0)
- goto out;
- ret = 0;
- }
- }
-
-out: if (argp != NULL)
- __os_free(argp, 0);
- return (ret);
-}
-
-/*
- * __log_open_file --
- * Called during log_register recovery. Make sure that we have an
- * entry in the dbentry table for this ndx. Returns 0 on success,
- * non-zero on error.
- */
-static int
-__log_open_file(dbenv, lp, argp)
- DB_ENV *dbenv;
- DB_LOG *lp;
- __log_register_args *argp;
-{
- DB_ENTRY *dbe;
- DB *dbp;
-
- /*
- * We never re-open temporary files. Temp files are only
- * useful during aborts in which case the dbp was entered
- * when the file was registered. During recovery, we treat
- * temp files as properly deleted files, allowing the open to
- * fail and not reporting any errors when recovery fails to
- * get a valid dbp from db_fileid_to_db.
- */
- if (argp->name.size == 0) {
- (void)__log_add_logid(dbenv, lp, NULL, argp->fileid);
- return (ENOENT);
- }
-
- /*
- * Because of reference counting, we cannot automatically close files
- * during recovery, so when we're opening, we have to check that the
- * name we are opening is what we expect. If it's not, then we close
- * the old file and open the new one.
- */
- MUTEX_THREAD_LOCK(dbenv, lp->mutexp);
- if (argp->fileid < lp->dbentry_cnt)
- dbe = &lp->dbentry[argp->fileid];
- else
- dbe = NULL;
-
- if (dbe != NULL) {
- dbe->deleted = 0;
- if ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL) {
- if (dbp->meta_pgno != argp->meta_pgno ||
- memcmp(dbp->fileid,
- argp->uid.data, DB_FILE_ID_LEN) != 0) {
- MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
- goto reopen;
- }
- if (!F_ISSET(lp, DBLOG_RECOVER))
- dbe->refcount++;
- MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
- return (0);
- }
- }
-
- MUTEX_THREAD_UNLOCK(dbenv, lp->mutexp);
- if (0) {
-reopen: (void)log_unregister(dbp->dbenv, dbp);
- (void)__log_rem_logid(lp, dbp, argp->fileid);
- dbp->close(dbp, 0);
- }
-
- return (__log_do_open(dbenv, lp,
- argp->uid.data, argp->name.data,
- argp->ftype, argp->fileid, argp->meta_pgno));
-}
-
-/*
- * log_reopen_file -- close and reopen a db file.
- * Must be called when a metadata page changes.
- *
- * PUBLIC: int __log_reopen_file __P((DB_ENV *,
- * PUBLIC: char *, int32_t, u_int8_t *, db_pgno_t));
- *
- */
-int
-__log_reopen_file(dbenv, name, ndx, fileid, meta_pgno)
- DB_ENV *dbenv;
- char *name;
- int32_t ndx;
- u_int8_t *fileid;
- db_pgno_t meta_pgno;
-{
- DB *dbp;
- DB_LOG *logp;
- DBTYPE ftype;
- FNAME *fnp;
- LOG *lp;
- char *tmp_name;
- int ret;
-
- logp = dbenv->lg_handle;
-
- if (name == NULL) {
- R_LOCK(dbenv, &logp->reginfo);
-
- lp = logp->reginfo.primary;
-
- for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
- if (fnp->ref == 0) /* Entry not in use. */
- continue;
- if (memcmp(fnp->ufid, fileid, DB_FILE_ID_LEN) == 0)
- break;
- }
-
- if (fnp == 0 || fnp->name_off == INVALID_ROFF) {
- __db_err(dbenv,
- "metasub recover: non-existent file id");
- return (EINVAL);
- }
-
- name = R_ADDR(&logp->reginfo, fnp->name_off);
- ret = __os_strdup(dbenv, name, &tmp_name);
- R_UNLOCK(dbenv, &logp->reginfo);
- if (ret != 0)
- goto out;
- name = tmp_name;
- } else
- tmp_name = NULL;
-
- if ((ret = __db_fileid_to_db(dbenv, &dbp, ndx, 0)) != 0)
- goto out;
- ftype = dbp->type;
- (void)log_unregister(dbenv, dbp);
- (void)__log_rem_logid(logp, dbp, ndx);
- (void)dbp->close(dbp, 0);
-
- ret = __log_do_open(dbenv, logp, fileid, name, ftype, ndx, meta_pgno);
-
- if (tmp_name != NULL)
- __os_free(tmp_name, 0);
-
-out: return (ret);
-}
-
-/*
- * __log_do_open --
- * Open files referenced in the log. This is the part of the open that
- * is not protected by the thread mutex.
- */
-static int
-__log_do_open(dbenv, lp, uid, name, ftype, ndx, meta_pgno)
- DB_ENV *dbenv;
- DB_LOG *lp;
- u_int8_t *uid;
- char *name;
- DBTYPE ftype;
- int32_t ndx;
- db_pgno_t meta_pgno;
-{
- DB *dbp;
- int ret;
- u_int8_t zeroid[DB_FILE_ID_LEN];
-
- if ((ret = db_create(&dbp, lp->dbenv, 0)) != 0)
- return (ret);
-
- dbp->log_fileid = ndx;
-
- /*
- * This is needed to signal to the locking routines called while
- * opening databases that we are potentially undoing a transaction
- * from an XA process. Since the XA process does not share
- * locks with the aborting transaction this prevents us from
- * deadlocking during the open during rollback.
- * Because this routine is called either during recovery or during an
- * XA_ABORT, we can safely set DB_AM_RECOVER in the dbp since it
- * will not be shared with other threads.
- */
- F_SET(dbp, DB_AM_RECOVER);
- if (meta_pgno != PGNO_BASE_MD)
- memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
- dbp->type = ftype;
- if ((ret =
- __db_dbopen(dbp, name, 0, __db_omode("rw----"), meta_pgno)) == 0) {
- /*
- * Verify that we are opening the same file that we were
- * referring to when we wrote this log record.
- */
- if (meta_pgno != PGNO_BASE_MD &&
- __log_check_master(dbenv, uid, name) != 0)
- goto not_right;
- if (memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0) {
- memset(zeroid, 0, DB_FILE_ID_LEN);
- if (memcmp(dbp->fileid, zeroid, DB_FILE_ID_LEN) != 0)
- goto not_right;
- memcpy(dbp->fileid, uid, DB_FILE_ID_LEN);
- }
- if (IS_RECOVERING(dbenv)) {
- (void)log_register(dbp->dbenv, dbp, name);
- (void)__log_add_logid(dbenv, lp, dbp, ndx);
- }
- return (0);
- }
-
-not_right:
- (void)dbp->close(dbp, 0);
- (void)__log_add_logid(dbenv, lp, NULL, ndx);
-
- return (ENOENT);
-}
-
-static int
-__log_check_master(dbenv, uid, name)
- DB_ENV *dbenv;
- u_int8_t *uid;
- char *name;
-{
- DB *dbp;
- int ret;
-
- ret = 0;
- if ((ret = db_create(&dbp, dbenv, 0)) != 0)
- return (ret);
- dbp->type = DB_BTREE;
- ret = __db_dbopen(dbp, name, 0, __db_omode("rw----"), PGNO_BASE_MD);
-
- if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
- ret = EINVAL;
-
- (void) dbp->close(dbp, 0);
- return (ret);
-}
-
-/*
- * __log_add_logid --
- * Adds a DB entry to the log's DB entry table.
- *
- * PUBLIC: int __log_add_logid __P((DB_ENV *, DB_LOG *, DB *, int32_t));
- */
-int
-__log_add_logid(dbenv, logp, dbp, ndx)
- DB_ENV *dbenv;
- DB_LOG *logp;
- DB *dbp;
- int32_t ndx;
-{
- DB *dbtmp;
- int32_t i;
- int ret;
-
- ret = 0;
-
- MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
-
- /*
- * Check if we need to grow the table. Note, ndx is 0-based (the
- * index into the DB entry table) an dbentry_cnt is 1-based, the
- * number of available slots.
- */
- if (logp->dbentry_cnt <= ndx) {
- if ((ret = __os_realloc(dbenv,
- (ndx + DB_GROW_SIZE) * sizeof(DB_ENTRY),
- NULL, &logp->dbentry)) != 0)
- goto err;
-
- /*
- * We have moved the head of the queue.
- * Fix up the queue header of an empty queue or the previous
- * pointer of the first element.
- */
- for (i = 0; i < logp->dbentry_cnt; i++) {
- if ((dbtmp =
- TAILQ_FIRST(&logp->dbentry[i].dblist)) == NULL)
- TAILQ_INIT(&logp->dbentry[i].dblist);
- else
- TAILQ_REINSERT_HEAD(
- &logp->dbentry[i].dblist, dbtmp, links);
- }
-
- /* Initialize the new entries. */
- for (i = logp->dbentry_cnt; i < ndx + DB_GROW_SIZE; i++) {
- logp->dbentry[i].count = 0;
- TAILQ_INIT(&logp->dbentry[i].dblist);
- logp->dbentry[i].deleted = 0;
- logp->dbentry[i].refcount = 0;
- }
-
- logp->dbentry_cnt = i;
- }
-
- if (logp->dbentry[ndx].deleted == 0 &&
- TAILQ_FIRST(&logp->dbentry[ndx].dblist) == NULL) {
- logp->dbentry[ndx].count = 0;
- if (dbp != NULL)
- TAILQ_INSERT_HEAD(&logp->dbentry[ndx].dblist,
- dbp, links);
- logp->dbentry[ndx].deleted = dbp == NULL;
- logp->dbentry[ndx].refcount = 1;
- } else if (!F_ISSET(logp, DBLOG_RECOVER)) {
- if (dbp != NULL)
- TAILQ_INSERT_HEAD(&logp->dbentry[ndx].dblist,
- dbp, links);
- logp->dbentry[ndx].refcount++;
- }
-
-err: MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
- return (ret);
-}
-
-/*
- * __db_fileid_to_db --
- * Return the DB corresponding to the specified fileid.
- *
- * PUBLIC: int __db_fileid_to_db __P((DB_ENV *, DB **, int32_t, int));
- */
-int
-__db_fileid_to_db(dbenv, dbpp, ndx, inc)
- DB_ENV *dbenv;
- DB **dbpp;
- int32_t ndx;
- int inc;
-{
- DB_LOG *logp;
- DB *dbp;
- FNAME *fname;
- int ret;
- char *name;
-
- ret = 0;
- logp = dbenv->lg_handle;
-
- MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
-
- /*
- * Under XA, a process different than the one issuing DB operations
- * may abort a transaction. In this case, recovery routines are run
- * by a process that does not necessarily have the file open, so we
- * we must open the file explicitly.
- */
- if (ndx >= logp->dbentry_cnt ||
- (!logp->dbentry[ndx].deleted &&
- (dbp = TAILQ_FIRST(&logp->dbentry[ndx].dblist)) == NULL)) {
- if (F_ISSET(logp, DBLOG_RECOVER)) {
- ret = ENOENT;
- goto err;
- }
- if (__log_lid_to_fname(logp, ndx, &fname) != 0) {
- /* Couldn't find entry; this is a fatal error. */
- __db_err(dbenv, "Missing log fileid entry");
- ret = EINVAL;
- goto err;
- }
- name = R_ADDR(&logp->reginfo, fname->name_off);
-
- /*
- * __log_do_open is called without protection of the
- * log thread lock.
- */
- MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
-
- /*
- * At this point, we are not holding the thread lock, so exit
- * directly instead of going through the exit code at the
- * bottom. If the __log_do_open succeeded, then we don't need
- * to do any of the remaining error checking at the end of this
- * routine.
- */
- if ((ret = __log_do_open(dbenv, logp,
- fname->ufid, name, fname->s_type,
- ndx, fname->meta_pgno)) != 0)
- return (ret);
-
- *dbpp = TAILQ_FIRST(&logp->dbentry[ndx].dblist);
- return (0);
- }
-
- /*
- * Return DB_DELETED if the file has been deleted (it's not an error).
- */
- if (logp->dbentry[ndx].deleted) {
- ret = DB_DELETED;
- if (inc)
- logp->dbentry[ndx].count++;
- goto err;
- }
-
- /*
- * Otherwise return 0, but if we don't have a corresponding DB, it's
- * an error.
- */
- if ((*dbpp = TAILQ_FIRST(&logp->dbentry[ndx].dblist)) == NULL)
- ret = ENOENT;
-
-err: MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
- return (ret);
-}
-
-/*
- * __log_close_files --
- * Close files that were opened by the recovery daemon. We sync the
- * file, unless its mpf pointer has been NULLed by a db_remove or
- * db_rename. We may not have flushed the log_register record that
- * closes the file.
- *
- * PUBLIC: void __log_close_files __P((DB_ENV *));
- */
-void
-__log_close_files(dbenv)
- DB_ENV *dbenv;
-{
- DB_ENTRY *dbe;
- DB_LOG *logp;
- DB *dbp;
- int32_t i;
-
- logp = dbenv->lg_handle;
- MUTEX_THREAD_LOCK(dbenv, logp->mutexp);
- for (i = 0; i < logp->dbentry_cnt; i++) {
- dbe = &logp->dbentry[i];
- while ((dbp = TAILQ_FIRST(&dbe->dblist)) != NULL) {
- (void)log_unregister(dbenv, dbp);
- TAILQ_REMOVE(&dbe->dblist, dbp, links);
- (void)dbp->close(dbp, dbp->mpf == NULL ? DB_NOSYNC : 0);
- }
- dbe->deleted = 0;
- dbe->refcount = 0;
- }
- MUTEX_THREAD_UNLOCK(dbenv, logp->mutexp);
-}
-
-/*
- * __log_rem_logid
- * Remove an entry from the log table. Find the appropriate DB and
- * unlink it from the linked list off the table. If the DB is NULL, treat
- * this as a simple refcount decrement.
- *
- * PUBLIC: void __log_rem_logid __P((DB_LOG *, DB *, int32_t));
- */
-void
-__log_rem_logid(logp, dbp, ndx)
- DB_LOG *logp;
- DB *dbp;
- int32_t ndx;
-{
- DB *xdbp;
-
- MUTEX_THREAD_LOCK(logp->dbenv, logp->mutexp);
- if (--logp->dbentry[ndx].refcount == 0) {
- TAILQ_INIT(&logp->dbentry[ndx].dblist);
- logp->dbentry[ndx].deleted = 0;
- } else if (dbp != NULL)
- for (xdbp = TAILQ_FIRST(&logp->dbentry[ndx].dblist);
- xdbp != NULL;
- xdbp = TAILQ_NEXT(xdbp, links))
- if (xdbp == dbp) {
- TAILQ_REMOVE(&logp->dbentry[ndx].dblist,
- xdbp, links);
- break;
- }
-
- MUTEX_THREAD_UNLOCK(logp->dbenv, logp->mutexp);
-}
-
-/*
- * __log_lid_to_fname --
- * Traverse the shared-memory region looking for the entry that
- * matches the passed log fileid. Returns 0 on success; -1 on error.
- * PUBLIC: int __log_lid_to_fname __P((DB_LOG *, int32_t, FNAME **));
- */
-int
-__log_lid_to_fname(dblp, lid, fnamep)
- DB_LOG *dblp;
- int32_t lid;
- FNAME **fnamep;
-{
- FNAME *fnp;
- LOG *lp;
-
- lp = dblp->reginfo.primary;
-
- for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
- if (fnp->ref == 0) /* Entry not in use. */
- continue;
- if (fnp->id == lid) {
- *fnamep = fnp;
- return (0);
- }
- }
- return (-1);
-}
diff --git a/bdb/log/log_register.c b/bdb/log/log_register.c
deleted file mode 100644
index 1e0e523d8b9..00000000000
--- a/bdb/log/log_register.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: log_register.c,v 11.35 2001/01/10 16:04:19 bostic Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
-#include "db_int.h"
-#include "log.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
-
-/*
- * log_register --
- * Register a file name.
- */
-int
-log_register(dbenv, dbp, name)
- DB_ENV *dbenv;
- DB *dbp;
- const char *name;
-{
- DBT fid_dbt, r_name;
- DB_LOG *dblp;
- DB_LSN r_unused;
- FNAME *found_fnp, *fnp, *recover_fnp, *reuse_fnp;
- LOG *lp;
- size_t len;
- int32_t maxid;
- int inserted, ok, ret;
- void *namep;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_register(dbenv, dbp, name));
-#endif
-
- PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
-
- dblp = dbenv->lg_handle;
- lp = dblp->reginfo.primary;
- fnp = reuse_fnp = NULL;
- inserted = ret = 0;
- namep = NULL;
-
- /* Check the arguments. */
- if (dbp->type != DB_BTREE && dbp->type != DB_QUEUE &&
- dbp->type != DB_HASH && dbp->type != DB_RECNO) {
- __db_err(dbenv, "log_register: unknown DB file type");
- return (EINVAL);
- }
-
- R_LOCK(dbenv, &dblp->reginfo);
-
- /*
- * See if we've already got this file in the log, finding the
- * (maximum+1) in-use file id and some available file id (if we
- * find an available fid, we'll use it, else we'll have to allocate
- * one after the maximum that we found).
- */
- ok = 0;
- found_fnp = recover_fnp = NULL;
- for (maxid = 0, fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
- if (F_ISSET(dblp, DBLOG_RECOVER) && fnp->id == dbp->log_fileid)
- recover_fnp = fnp;
- if (fnp->ref == 0) { /* Entry is not in use. */
- if (reuse_fnp == NULL)
- reuse_fnp = fnp;
- continue;
- }
- if (memcmp(dbp->fileid, fnp->ufid, DB_FILE_ID_LEN) == 0) {
- if (fnp->meta_pgno == 0) {
- if (fnp->locked == 1) {
- __db_err(dbenv, "File is locked");
- return (EINVAL);
- }
- if (found_fnp != NULL) {
- fnp = found_fnp;
- goto found;
- }
- ok = 1;
- }
- if (dbp->meta_pgno == fnp->meta_pgno) {
- if (F_ISSET(dblp, DBLOG_RECOVER)) {
- if (fnp->id != dbp->log_fileid) {
- /*
- * If we are in recovery, there
- * is only one dbp on the list.
- * If the refcount goes to 0,
- * we will clear the list. If
- * it doesn't, we want to leave
- * the dbp where it is, so
- * passing a NULL to rem_logid
- * is correct.
- */
- __log_rem_logid(dblp,
- NULL, fnp->id);
- if (recover_fnp != NULL)
- break;
- continue;
- }
- fnp->ref = 1;
- goto found;
- }
- ++fnp->ref;
- if (ok)
- goto found;
- found_fnp = fnp;
- }
- }
- if (maxid <= fnp->id)
- maxid = fnp->id + 1;
- }
- if ((fnp = found_fnp) != NULL)
- goto found;
-
- /* Fill in fnp structure. */
- if (recover_fnp != NULL) /* This has the right number */
- fnp = recover_fnp;
- else if (reuse_fnp != NULL) /* Reuse existing one. */
- fnp = reuse_fnp;
- else { /* Allocate a new one. */
- if ((ret = __db_shalloc(dblp->reginfo.addr,
- sizeof(FNAME), 0, &fnp)) != 0)
- goto mem_err;
- fnp->id = maxid;
- }
-
- if (F_ISSET(dblp, DBLOG_RECOVER))
- fnp->id = dbp->log_fileid;
-
- fnp->ref = 1;
- fnp->locked = 0;
- fnp->s_type = dbp->type;
- memcpy(fnp->ufid, dbp->fileid, DB_FILE_ID_LEN);
- fnp->meta_pgno = dbp->meta_pgno;
-
- if (name != NULL) {
- len = strlen(name) + 1;
- if ((ret =
- __db_shalloc(dblp->reginfo.addr, len, 0, &namep)) != 0) {
-mem_err: __db_err(dbenv,
- "Unable to allocate memory to register %s", name);
- goto err;
- }
- fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
- memcpy(namep, name, len);
- } else
- fnp->name_off = INVALID_ROFF;
-
- /* Only do the insert if we allocated a new fnp. */
- if (reuse_fnp == NULL && recover_fnp == NULL)
- SH_TAILQ_INSERT_HEAD(&lp->fq, fnp, q, __fname);
- inserted = 1;
-
- /* Log the registry. */
- if (!F_ISSET(dblp, DBLOG_RECOVER)) {
- /*
- * We allow logging on in-memory databases, so the name here
- * could be NULL.
- */
- if (name != NULL) {
- r_name.data = (void *)name;
- r_name.size = strlen(name) + 1;
- }
- memset(&fid_dbt, 0, sizeof(fid_dbt));
- fid_dbt.data = dbp->fileid;
- fid_dbt.size = DB_FILE_ID_LEN;
- if ((ret = __log_register_log(dbenv, NULL, &r_unused,
- 0, LOG_OPEN, name == NULL ? NULL : &r_name,
- &fid_dbt, fnp->id, dbp->type, dbp->meta_pgno)) != 0)
- goto err;
- }
-
-found: /*
- * If we found the entry in the shared area, then the file is
- * already open, so there is no need to log the open. We only
- * log the open and closes on the first open and last close.
- */
- if (!F_ISSET(dblp, DBLOG_RECOVER) &&
- (ret = __log_add_logid(dbenv, dblp, dbp, fnp->id)) != 0)
- goto err;
-
- if (!F_ISSET(dblp, DBLOG_RECOVER))
- dbp->log_fileid = fnp->id;
-
- if (0) {
-err: if (inserted)
- SH_TAILQ_REMOVE(&lp->fq, fnp, q, __fname);
- if (namep != NULL)
- __db_shalloc_free(dblp->reginfo.addr, namep);
- if (fnp != NULL)
- __db_shalloc_free(dblp->reginfo.addr, fnp);
- }
-
- R_UNLOCK(dbenv, &dblp->reginfo);
-
- return (ret);
-}
-
-/*
- * log_unregister --
- * Discard a registered file name.
- */
-int
-log_unregister(dbenv, dbp)
- DB_ENV *dbenv;
- DB *dbp;
-{
- int ret;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_log_unregister(dbenv, dbp));
-#endif
-
- PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->lg_handle, DB_INIT_LOG);
-
- ret = __log_filelist_update(dbenv, dbp, dbp->log_fileid, NULL, NULL);
- dbp->log_fileid = DB_LOGFILEID_INVALID;
- return (ret);
-}
-
-/*
- * PUBLIC: int __log_filelist_update
- * PUBLIC: __P((DB_ENV *, DB *, int32_t, const char *, int *));
- *
- * Utility player for updating and logging the file list. Called
- * for 3 reasons:
- * 1) mark file closed: newname == NULL.
- * 2) change filename: newname != NULL.
- * 3) from recovery to verify & change filename if necessary, set != NULL.
- */
-int
-__log_filelist_update(dbenv, dbp, fid, newname, set)
- DB_ENV *dbenv;
- DB *dbp;
- int32_t fid;
- const char *newname;
- int *set;
-{
- DBT fid_dbt, r_name;
- DB_LOG *dblp;
- DB_LSN r_unused;
- FNAME *fnp;
- LOG *lp;
- u_int32_t len, newlen;
- int ret;
- void *namep;
-
- ret = 0;
- dblp = dbenv->lg_handle;
- lp = dblp->reginfo.primary;
-
- R_LOCK(dbenv, &dblp->reginfo);
-
- /* Find the entry in the log. */
- for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname))
- if (fid == fnp->id)
- break;
- if (fnp == NULL) {
- __db_err(dbenv, "log_unregister: non-existent file id");
- ret = EINVAL;
- goto ret1;
- }
-
- /*
- * Log the unregistry only if this is the last one and we are
- * really closing the file or if this is an abort of a created
- * file and we need to make sure there is a record in the log.
- */
- namep = NULL;
- len = 0;
- if (fnp->name_off != INVALID_ROFF) {
- namep = R_ADDR(&dblp->reginfo, fnp->name_off);
- len = strlen(namep) + 1;
- }
- if (!F_ISSET(dblp, DBLOG_RECOVER) && fnp->ref == 1) {
- if (namep != NULL) {
- memset(&r_name, 0, sizeof(r_name));
- r_name.data = namep;
- r_name.size = len;
- }
- memset(&fid_dbt, 0, sizeof(fid_dbt));
- fid_dbt.data = fnp->ufid;
- fid_dbt.size = DB_FILE_ID_LEN;
- if ((ret = __log_register_log(dbenv, NULL, &r_unused,
- 0, LOG_CLOSE,
- fnp->name_off == INVALID_ROFF ? NULL : &r_name,
- &fid_dbt, fid, fnp->s_type, fnp->meta_pgno))
- != 0)
- goto ret1;
- }
-
- /*
- * If we are changing the name we must log this fact.
- */
- if (newname != NULL) {
- DB_ASSERT(fnp->ref == 1);
- newlen = strlen(newname) + 1;
- if (!F_ISSET(dblp, DBLOG_RECOVER)) {
- r_name.data = (void *) newname;
- r_name.size = newlen;
- if ((ret = __log_register_log(dbenv,
- NULL, &r_unused, 0, LOG_OPEN, &r_name, &fid_dbt,
- fnp->id, fnp->s_type, fnp->meta_pgno)) != 0)
- goto ret1;
- }
-
- /*
- * Check to see if the name is already correct.
- */
- if (set != NULL) {
- if (len != newlen || memcmp(namep, newname, len) != 0)
- *set = 1;
- else {
- *set = 0;
- goto ret1;
- }
- }
-
- /*
- * Change the name, realloc memory if necessary
- */
- if (len < newlen) {
- __db_shalloc_free(dblp->reginfo.addr,
- R_ADDR(&dblp->reginfo, fnp->name_off));
- if ((ret = __db_shalloc(
- dblp->reginfo.addr, newlen, 0, &namep)) != 0) {
- __db_err(dbenv,
- "Unable to allocate memory to register %s",
- newname);
- goto ret1;
- }
- fnp->name_off = R_OFFSET(&dblp->reginfo, namep);
- } else
- namep = R_ADDR(&dblp->reginfo, fnp->name_off);
- memcpy(namep, newname, newlen);
- } else {
-
- /*
- * If more than 1 reference, just decrement the reference
- * and return. Otherwise, free the name if one exists.
- */
- DB_ASSERT(fnp->ref >= 1);
- --fnp->ref;
- if (fnp->ref == 0) {
- if (fnp->name_off != INVALID_ROFF)
- __db_shalloc_free(dblp->reginfo.addr,
- R_ADDR(&dblp->reginfo, fnp->name_off));
- fnp->name_off = INVALID_ROFF;
- }
-
- /*
- * Remove from the process local table. If this
- * operation is taking place during recovery, then
- * the logid was never added to the table, so do not remove it.
- */
- if (!F_ISSET(dblp, DBLOG_RECOVER))
- __log_rem_logid(dblp, dbp, fid);
- }
-
-ret1: R_UNLOCK(dbenv, &dblp->reginfo);
- return (ret);
-}
-
-/*
- * __log_file_lock -- lock a file for single access
- * This only works if logging is on.
- *
- * PUBLIC: int __log_file_lock __P((DB *));
- */
-int
-__log_file_lock(dbp)
- DB *dbp;
-{
- DB_ENV *dbenv;
- DB_LOG *dblp;
- FNAME *fnp;
- LOG *lp;
- int ret;
-
- dbenv = dbp->dbenv;
- dblp = dbenv->lg_handle;
- lp = dblp->reginfo.primary;
-
- ret = 0;
- R_LOCK(dbenv, &dblp->reginfo);
-
- for (fnp = SH_TAILQ_FIRST(&lp->fq, __fname);
- fnp != NULL; fnp = SH_TAILQ_NEXT(fnp, q, __fname)) {
- if (fnp->ref == 0)
- continue;
-
- if (!memcmp(dbp->fileid, fnp->ufid, DB_FILE_ID_LEN)) {
- if (fnp->meta_pgno == 0) {
- if (fnp->ref != 1)
- goto err;
-
- fnp->locked = 1;
- } else {
-err: __db_err(dbp->dbenv, "File is open");
- ret = EINVAL;
- goto done;
- }
-
- }
- }
-done: R_UNLOCK(dbenv, &dblp->reginfo);
- return (ret);
-}
diff --git a/bdb/mp/Design b/bdb/mp/Design
deleted file mode 100644
index 1b26aae6cba..00000000000
--- a/bdb/mp/Design
+++ /dev/null
@@ -1,52 +0,0 @@
-$Id: Design,v 11.2 1999/11/21 23:08:27 bostic Exp $
-
-There are three ways we do locking in the mpool code:
-
-Locking a handle mutex to provide concurrency for DB_THREAD operations.
-Locking the region mutex to provide mutual exclusion while reading and
- writing structures in the shared region.
-Locking buffer header mutexes during I/O.
-
-The first will not be further described here. We use the shared mpool
-region lock to provide mutual exclusion while reading/modifying all of
-the data structures, including the buffer headers. We use a per-buffer
-header lock to wait on buffer I/O. The order of locking is as follows:
-
-Searching for a buffer:
- Acquire the region lock.
- Find the buffer header.
- Increment the reference count (guarantee the buffer stays).
- While the BH_LOCKED flag is set (I/O is going on) {
- Release the region lock.
- Explicitly yield the processor if it's not the first pass
- through this loop, otherwise, we can simply spin because
- we'll be simply switching between the two locks.
- Request the buffer lock.
- The I/O will complete...
- Acquire the buffer lock.
- Release the buffer lock.
- Acquire the region lock.
- }
- Return the buffer.
-
-Reading/writing a buffer:
- Acquire the region lock.
- Find/create the buffer header.
- If reading, increment the reference count (guarantee the buffer stays).
- Set the BH_LOCKED flag.
- Acquire the buffer lock (guaranteed not to block).
- Release the region lock.
- Do the I/O and/or initialize the buffer contents.
- Release the buffer lock.
- At this point, the buffer lock is available, but the logical
- operation (flagged by BH_LOCKED) is not yet completed. For
- this reason, among others, threads checking the BH_LOCKED flag
- must loop around their test.
- Acquire the region lock.
- Clear the BH_LOCKED flag.
- Release the region lock.
- Return/discard the buffer.
-
-Pointers to DB_MPOOL, MPOOL, DB_MPOOLFILE and MPOOLFILE structures are
-not reacquired when a region lock is reacquired because they couldn't
-have been closed/discarded and because they never move in memory.
diff --git a/bdb/mp/mp_alloc.c b/bdb/mp/mp_alloc.c
index 731f569f57f..96dd612d7ba 100644
--- a/bdb/mp/mp_alloc.c
+++ b/bdb/mp/mp_alloc.c
@@ -1,22 +1,31 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_alloc.c,v 11.7 2000/04/20 21:14:18 bostic Exp $";
+static const char revid[] = "$Id: mp_alloc.c,v 11.31 2002/08/14 17:21:37 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <string.h>
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+typedef struct {
+ DB_MPOOL_HASH *bucket;
+ u_int32_t priority;
+} HS;
+
+static void __memp_bad_buffer __P((DB_MPOOL_HASH *));
+static void __memp_reset_lru __P((DB_ENV *, REGINFO *, MPOOL *));
/*
* __memp_alloc --
@@ -34,14 +43,32 @@ __memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
roff_t *offsetp;
void *retp;
{
- BH *bhp, *nbhp;
+ BH *bhp;
+ DB_ENV *dbenv;
+ DB_MPOOL_HASH *dbht, *hp, *hp_end, *hp_tmp;
+ DB_MUTEX *mutexp;
MPOOL *c_mp;
MPOOLFILE *bh_mfp;
- size_t total;
- int nomore, restart, ret, wrote;
+ size_t freed_space;
+ u_int32_t buckets, buffers, high_priority, max_na, priority;
+ int aggressive, ret;
void *p;
+ dbenv = dbmp->dbenv;
c_mp = memreg->primary;
+ dbht = R_ADDR(memreg, c_mp->htab);
+ hp_end = &dbht[c_mp->htab_buckets];
+
+ buckets = buffers = 0;
+ aggressive = 0;
+
+ c_mp->stat.st_alloc++;
+
+ /*
+ * Get aggressive if we've tried to flush the number of pages as are
+ * in the system without finding space.
+ */
+ max_na = 5 * c_mp->htab_buckets;
/*
* If we're allocating a buffer, and the one we're discarding is the
@@ -53,100 +80,363 @@ __memp_alloc(dbmp, memreg, mfp, len, offsetp, retp)
if (mfp != NULL)
len = (sizeof(BH) - sizeof(u_int8_t)) + mfp->stat.st_pagesize;
- nomore = 0;
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * On every buffer allocation we update the buffer generation number
+ * and check for wraparound.
+ */
+ if (++c_mp->lru_count == UINT32_T_MAX)
+ __memp_reset_lru(dbenv, memreg, c_mp);
+
+ /*
+ * Anything newer than 1/10th of the buffer pool is ignored during
+ * allocation (unless allocation starts failing).
+ */
+ DB_ASSERT(c_mp->lru_count > c_mp->stat.st_pages / 10);
+ high_priority = c_mp->lru_count - c_mp->stat.st_pages / 10;
+
+ /*
+ * First we try to allocate from free memory. If that fails, scan the
+ * buffer pool to find buffers with low priorities. We consider small
+ * sets of hash buckets each time to limit the amount of work needing
+ * to be done. This approximates LRU, but not very well. We either
+ * find a buffer of the same size to use, or we will free 3 times what
+ * we need in the hopes it will coalesce into a contiguous chunk of the
+ * right size. In the latter case we branch back here and try again.
+ */
alloc: if ((ret = __db_shalloc(memreg->addr, len, MUTEX_ALIGN, &p)) == 0) {
- if (offsetp != NULL)
+ if (mfp != NULL)
+ c_mp->stat.st_pages++;
+ R_UNLOCK(dbenv, memreg);
+
+found: if (offsetp != NULL)
*offsetp = R_OFFSET(memreg, p);
*(void **)retp = p;
+
+ /*
+ * Update the search statistics.
+ *
+ * We're not holding the region locked here, these statistics
+ * can't be trusted.
+ */
+ if (buckets != 0) {
+ if (buckets > c_mp->stat.st_alloc_max_buckets)
+ c_mp->stat.st_alloc_max_buckets = buckets;
+ c_mp->stat.st_alloc_buckets += buckets;
+ }
+ if (buffers != 0) {
+ if (buffers > c_mp->stat.st_alloc_max_pages)
+ c_mp->stat.st_alloc_max_pages = buffers;
+ c_mp->stat.st_alloc_pages += buffers;
+ }
return (0);
}
- if (nomore) {
- __db_err(dbmp->dbenv,
- "Unable to allocate %lu bytes from mpool shared region: %s\n",
- (u_long)len, db_strerror(ret));
- return (ret);
- }
-retry: /* Find a buffer we can flush; pure LRU. */
- restart = total = 0;
- for (bhp =
- SH_TAILQ_FIRST(&c_mp->bhq, __bh); bhp != NULL; bhp = nbhp) {
- nbhp = SH_TAILQ_NEXT(bhp, q, __bh);
+ /*
+ * We re-attempt the allocation every time we've freed 3 times what
+ * we need. Reset our free-space counter.
+ */
+ freed_space = 0;
- /* Ignore pinned or locked (I/O in progress) buffers. */
- if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED))
+ /*
+ * Walk the hash buckets and find the next two with potentially useful
+ * buffers. Free the buffer with the lowest priority from the buckets'
+ * chains.
+ */
+ for (hp_tmp = NULL;;) {
+ /* Check for wrap around. */
+ hp = &dbht[c_mp->last_checked++];
+ if (hp >= hp_end) {
+ c_mp->last_checked = 0;
+
+ /*
+ * If we've gone through all of the hash buckets, try
+ * an allocation. If the cache is small, the old page
+ * size is small, and the new page size is large, we
+ * might have freed enough memory (but not 3 times the
+ * memory).
+ */
+ goto alloc;
+ }
+
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
continue;
- /* Find the associated MPOOLFILE. */
- bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ /*
+ * The failure mode is when there are too many buffers we can't
+ * write or there's not enough memory in the system. We don't
+ * have a metric for deciding if allocation has no possible way
+ * to succeed, so we don't ever fail, we assume memory will be
+ * available if we wait long enough.
+ *
+ * Get aggressive if we've tried to flush 5 times the number of
+ * hash buckets as are in the system -- it's possible we have
+ * been repeatedly trying to flush the same buffers, although
+ * it's unlikely. Aggressive means:
+ *
+ * a: set a flag to attempt to flush high priority buffers as
+ * well as other buffers.
+ * b: sync the mpool to force out queue extent pages. While we
+ * might not have enough space for what we want and flushing
+ * is expensive, why not?
+ * c: sleep for a second -- hopefully someone else will run and
+ * free up some memory. Try to allocate memory too, in case
+ * the other thread returns its memory to the region.
+ * d: look at a buffer in every hash bucket rather than choose
+ * the more preferable of two.
+ *
+ * !!!
+ * This test ignores pathological cases like no buffers in the
+ * system -- that shouldn't be possible.
+ */
+ if ((++buckets % max_na) == 0) {
+ aggressive = 1;
- /* Write the page if it's dirty. */
- if (F_ISSET(bhp, BH_DIRTY)) {
- ++bhp->ref;
- if ((ret = __memp_bhwrite(dbmp,
- bh_mfp, bhp, &restart, &wrote)) != 0)
- return (ret);
- --bhp->ref;
+ R_UNLOCK(dbenv, memreg);
- /*
- * Another process may have acquired this buffer and
- * incremented the ref count after we wrote it.
- */
- if (bhp->ref != 0)
- goto retry;
+ (void)__memp_sync_int(
+ dbenv, NULL, 0, DB_SYNC_ALLOC, NULL);
+
+ (void)__os_sleep(dbenv, 1, 0);
+
+ R_LOCK(dbenv, memreg);
+ goto alloc;
+ }
+
+ if (!aggressive) {
+ /* Skip high priority buckets. */
+ if (hp->hash_priority > high_priority)
+ continue;
/*
- * If we wrote the page, continue and free the buffer.
- * We don't have to rewalk the list to acquire the
- * buffer because it was never available for any other
- * process to modify it.
- *
- * If we didn't write the page, but we discarded and
- * reacquired the region lock, restart the list walk.
- *
- * If we neither wrote the buffer nor discarded the
- * region lock, continue down the buffer list.
+ * Find two buckets and select the one with the lowest
+ * priority. Performance testing shows that looking
+ * at two improves the LRUness and looking at more only
+ * does a little better.
*/
- if (wrote)
- ++c_mp->stat.st_rw_evict;
- else {
- if (restart)
- goto retry;
+ if (hp_tmp == NULL) {
+ hp_tmp = hp;
continue;
}
+ if (hp->hash_priority > hp_tmp->hash_priority)
+ hp = hp_tmp;
+ hp_tmp = NULL;
+ }
+
+ /* Remember the priority of the buffer we're looking for. */
+ priority = hp->hash_priority;
+
+ /* Unlock the region and lock the hash bucket. */
+ R_UNLOCK(dbenv, memreg);
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
+ /*
+ * The lowest priority page is first in the bucket, as they are
+ * maintained in sorted order.
+ *
+ * The buffer may have been freed or its priority changed while
+ * we switched from the region lock to the hash lock. If so,
+ * we have to restart. We will still take the first buffer on
+ * the bucket's list, though, if it has a low enough priority.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL ||
+ bhp->ref != 0 || bhp->priority > priority)
+ goto next_hb;
+
+ buffers++;
+
+ /* Find the associated MPOOLFILE. */
+ bh_mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /* If the page is dirty, pin it and write it. */
+ ret = 0;
+ if (F_ISSET(bhp, BH_DIRTY)) {
+ ++bhp->ref;
+ ret = __memp_bhwrite(dbmp, hp, bh_mfp, bhp, 0);
+ --bhp->ref;
+ if (ret == 0)
+ ++c_mp->stat.st_rw_evict;
} else
++c_mp->stat.st_ro_evict;
/*
+ * If a write fails for any reason, we can't proceed.
+ *
+ * We released the hash bucket lock while doing I/O, so another
+ * thread may have acquired this buffer and incremented the ref
+ * count after we wrote it, in which case we can't have it.
+ *
+ * If there's a write error, avoid selecting this buffer again
+ * by making it the bucket's least-desirable buffer.
+ */
+ if (ret != 0 || bhp->ref != 0) {
+ if (ret != 0 && aggressive)
+ __memp_bad_buffer(hp);
+ goto next_hb;
+ }
+
+ /*
* Check to see if the buffer is the size we're looking for.
- * If it is, simply reuse it.
+ * If so, we can simply reuse it. Else, free the buffer and
+ * its space and keep looking.
*/
if (mfp != NULL &&
mfp->stat.st_pagesize == bh_mfp->stat.st_pagesize) {
- __memp_bhfree(dbmp, bhp, 0);
+ __memp_bhfree(dbmp, hp, bhp, 0);
- if (offsetp != NULL)
- *offsetp = R_OFFSET(memreg, bhp);
- *(void **)retp = bhp;
- return (0);
+ p = bhp;
+ goto found;
}
- /* Note how much space we've freed, and free the buffer. */
- total += __db_shsizeof(bhp);
- __memp_bhfree(dbmp, bhp, 1);
+ freed_space += __db_shsizeof(bhp);
+ __memp_bhfree(dbmp, hp, bhp, 1);
/*
- * Retry as soon as we've freed up sufficient space. If we
- * have to coalesce of memory to satisfy the request, don't
- * try until it's likely (possible?) that we'll succeed.
+ * Unlock this hash bucket and re-acquire the region lock. If
+ * we're reaching here as a result of calling memp_bhfree, the
+ * hash bucket lock has already been discarded.
*/
- if (total >= 3 * len)
+ if (0) {
+next_hb: MUTEX_UNLOCK(dbenv, mutexp);
+ }
+ R_LOCK(dbenv, memreg);
+
+ /*
+ * Retry the allocation as soon as we've freed up sufficient
+ * space. We're likely to have to coalesce of memory to
+ * satisfy the request, don't try until it's likely (possible?)
+ * we'll succeed.
+ */
+ if (freed_space >= 3 * len)
goto alloc;
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * __memp_bad_buffer --
+ * Make the first buffer in a hash bucket the least desirable buffer.
+ */
+static void
+__memp_bad_buffer(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp, *t_bhp;
+ u_int32_t priority;
+
+ /* Remove the first buffer from the bucket. */
+ bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+
+ /*
+ * Find the highest priority buffer in the bucket. Buffers are
+ * sorted by priority, so it's the last one in the bucket.
+ *
+ * XXX
+ * Should use SH_TAILQ_LAST, but I think that macro is broken.
+ */
+ priority = bhp->priority;
+ for (t_bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ t_bhp != NULL; t_bhp = SH_TAILQ_NEXT(t_bhp, hq, __bh))
+ priority = t_bhp->priority;
+
+ /*
+ * Set our buffer's priority to be just as bad, and append it to
+ * the bucket.
+ */
+ bhp->priority = priority;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
- /* Restart the walk if we discarded the region lock. */
- if (restart)
- goto retry;
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+}
+
+/*
+ * __memp_reset_lru --
+ * Reset the cache LRU counter.
+ */
+static void
+__memp_reset_lru(dbenv, memreg, c_mp)
+ DB_ENV *dbenv;
+ REGINFO *memreg;
+ MPOOL *c_mp;
+{
+ BH *bhp;
+ DB_MPOOL_HASH *hp;
+ int bucket;
+
+ /*
+ * Update the counter so all future allocations will start at the
+ * bottom.
+ */
+ c_mp->lru_count -= MPOOL_BASE_DECREMENT;
+
+ /* Release the region lock. */
+ R_UNLOCK(dbenv, memreg);
+
+ /* Adjust the priority of every buffer in the system. */
+ for (hp = R_ADDR(memreg, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ /*
+ * Skip empty buckets.
+ *
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero.
+ */
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority != UINT32_T_MAX &&
+ bhp->priority > MPOOL_BASE_DECREMENT)
+ bhp->priority -= MPOOL_BASE_DECREMENT;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
}
- nomore = 1;
- goto alloc;
+
+ /* Reacquire the region lock. */
+ R_LOCK(dbenv, memreg);
+}
+
+#ifdef DIAGNOSTIC
+/*
+ * __memp_check_order --
+ * Verify the priority ordering of a hash bucket chain.
+ *
+ * PUBLIC: #ifdef DIAGNOSTIC
+ * PUBLIC: void __memp_check_order __P((DB_MPOOL_HASH *));
+ * PUBLIC: #endif
+ */
+void
+__memp_check_order(hp)
+ DB_MPOOL_HASH *hp;
+{
+ BH *bhp;
+ u_int32_t priority;
+
+ /*
+ * Assumes the hash bucket is locked.
+ */
+ if ((bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) == NULL)
+ return;
+
+ DB_ASSERT(bhp->priority == hp->hash_priority);
+
+ for (priority = bhp->priority;
+ (bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) != NULL;
+ priority = bhp->priority)
+ DB_ASSERT(priority <= bhp->priority);
}
+#endif
diff --git a/bdb/mp/mp_bh.c b/bdb/mp/mp_bh.c
index e802b165b2d..85d15218abf 100644
--- a/bdb/mp/mp_bh.c
+++ b/bdb/mp/mp_bh.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_bh.c,v 11.25 2001/01/10 04:50:53 ubell Exp $";
+static const char revid[] = "$Id: mp_bh.c,v 11.71 2002/09/04 19:06:45 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,40 +18,41 @@ static const char revid[] = "$Id: mp_bh.c,v 11.25 2001/01/10 04:50:53 ubell Exp
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-#include "log.h"
-#include "db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+#include "dbinc/log.h"
+#include "dbinc/db_page.h"
+static int __memp_pgwrite
+ __P((DB_MPOOL *, DB_MPOOLFILE *, DB_MPOOL_HASH *, BH *));
static int __memp_upgrade __P((DB_MPOOL *, DB_MPOOLFILE *, MPOOLFILE *));
/*
* __memp_bhwrite --
- * Write the page associated with a given bucket header.
+ * Write the page associated with a given buffer header.
*
- * PUBLIC: int __memp_bhwrite
- * PUBLIC: __P((DB_MPOOL *, MPOOLFILE *, BH *, int *, int *));
+ * PUBLIC: int __memp_bhwrite __P((DB_MPOOL *,
+ * PUBLIC: DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
*/
int
-__memp_bhwrite(dbmp, mfp, bhp, restartp, wrotep)
+__memp_bhwrite(dbmp, hp, mfp, bhp, open_extents)
DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
MPOOLFILE *mfp;
BH *bhp;
- int *restartp, *wrotep;
+ int open_extents;
{
+ DB_ENV *dbenv;
DB_MPOOLFILE *dbmfp;
DB_MPREG *mpreg;
- int incremented, ret;
+ int local_open, incremented, ret;
- if (restartp != NULL)
- *restartp = 0;
- if (wrotep != NULL)
- *wrotep = 0;
- incremented = 0;
+ dbenv = dbmp->dbenv;
+ local_open = incremented = 0;
/*
- * If the file has been removed or is a closed temporary file, Jump
- * right ahead and pretend that we've found the file we want-- the
+ * If the file has been removed or is a closed temporary file, jump
+ * right ahead and pretend that we've found the file we want -- the
* page-write function knows how to handle the fact that we don't have
* (or need!) any real file descriptor information.
*/
@@ -66,52 +67,60 @@ __memp_bhwrite(dbmp, mfp, bhp, restartp, wrotep)
* If we find a descriptor on the file that's not open for writing, we
* try and upgrade it to make it writeable. If that fails, we're done.
*/
- MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
if (dbmfp->mfp == mfp) {
if (F_ISSET(dbmfp, MP_READONLY) &&
- __memp_upgrade(dbmp, dbmfp, mfp)) {
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
- return (0);
+ !F_ISSET(dbmfp, MP_UPGRADE) &&
+ (F_ISSET(dbmfp, MP_UPGRADE_FAIL) ||
+ __memp_upgrade(dbmp, dbmfp, mfp))) {
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ return (EPERM);
}
/*
* Increment the reference count -- see the comment in
- * memp_fclose().
+ * __memp_fclose_int().
*/
++dbmfp->ref;
incremented = 1;
break;
}
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+
if (dbmfp != NULL)
goto found;
/*
* !!!
+ * It's the caller's choice if we're going to open extent files.
+ */
+ if (!open_extents && F_ISSET(mfp, MP_EXTENT))
+ return (EPERM);
+
+ /*
+ * !!!
* Don't try to attach to temporary files. There are two problems in
* trying to do that. First, if we have different privileges than the
* process that "owns" the temporary file, we might create the backing
* disk file such that the owning process couldn't read/write its own
- * buffers, e.g., memp_trickle() running as root creating a file owned
+ * buffers, e.g., memp_trickle running as root creating a file owned
* as root, mode 600. Second, if the temporary file has already been
* created, we don't have any way of finding out what its real name is,
* and, even if we did, it was already unlinked (so that it won't be
* left if the process dies horribly). This decision causes a problem,
* however: if the temporary file consumes the entire buffer cache,
* and the owner doesn't flush the buffers to disk, we could end up
- * with resource starvation, and the memp_trickle() thread couldn't do
+ * with resource starvation, and the memp_trickle thread couldn't do
* anything about it. That's a pretty unlikely scenario, though.
*
- * Note that we should never get here when the temporary file
- * in question has already been closed in another process, in which
- * case it should be marked MP_DEADFILE.
+ * Note we should never get here when the temporary file in question
+ * has already been closed in another process, in which case it should
+ * be marked MP_DEADFILE.
*/
- if (F_ISSET(mfp, MP_TEMP)) {
- DB_ASSERT(!F_ISSET(mfp, MP_DEADFILE));
- return (0);
- }
+ if (F_ISSET(mfp, MP_TEMP))
+ return (EPERM);
/*
* It's not a page from a file we've opened. If the file requires
@@ -120,14 +129,14 @@ __memp_bhwrite(dbmp, mfp, bhp, restartp, wrotep)
* nothing we can do.
*/
if (mfp->ftype != 0) {
- MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
for (mpreg = LIST_FIRST(&dbmp->dbregq);
mpreg != NULL; mpreg = LIST_NEXT(mpreg, q))
if (mpreg->ftype == mfp->ftype)
break;
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
if (mpreg == NULL)
- return (0);
+ return (EPERM);
}
/*
@@ -138,17 +147,24 @@ __memp_bhwrite(dbmp, mfp, bhp, restartp, wrotep)
* There's no negative cache, so we may repeatedly try and open files
* that we have previously tried (and failed) to open.
*/
- if (__memp_fopen(dbmp, mfp, R_ADDR(dbmp->reginfo, mfp->path_off),
- 0, 0, mfp->stat.st_pagesize, 0, NULL, &dbmfp) != 0)
- return (0);
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ return (ret);
+ if ((ret = __memp_fopen_int(dbmfp, mfp,
+ R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize)) != 0) {
+ (void)dbmfp->close(dbmfp, 0);
+ return (ret);
+ }
+ local_open = 1;
-found: ret = __memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep);
+found: ret = __memp_pgwrite(dbmp, dbmfp, hp, bhp);
- if (incremented) {
- MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ if (incremented)
--dbmfp->ref;
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
- }
+ else if (local_open)
+ F_SET(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
return (ret);
}
@@ -157,11 +173,12 @@ found: ret = __memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep);
* __memp_pgread --
* Read a page from a file.
*
- * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
+ * PUBLIC: int __memp_pgread __P((DB_MPOOLFILE *, DB_MUTEX *, BH *, int));
*/
int
-__memp_pgread(dbmfp, bhp, can_create)
+__memp_pgread(dbmfp, mutexp, bhp, can_create)
DB_MPOOLFILE *dbmfp;
+ DB_MUTEX *mutexp;
BH *bhp;
int can_create;
{
@@ -169,171 +186,129 @@ __memp_pgread(dbmfp, bhp, can_create)
DB_ENV *dbenv;
DB_MPOOL *dbmp;
MPOOLFILE *mfp;
- size_t len, pagesize;
- size_t nr;
- int created, ret;
+ size_t len, nr, pagesize;
+ int ret;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
mfp = dbmfp->mfp;
pagesize = mfp->stat.st_pagesize;
+ /* We should never be called with a dirty or a locked buffer. */
+ DB_ASSERT(!F_ISSET(bhp, BH_DIRTY | BH_DIRTY_CREATE | BH_LOCKED));
+
+ /* Lock the buffer and swap the hash bucket lock for the buffer lock. */
F_SET(bhp, BH_LOCKED | BH_TRASH);
- MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
- R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, mutexp);
/*
* Temporary files may not yet have been created. We don't create
* them now, we create them when the pages have to be flushed.
*/
nr = 0;
- if (F_ISSET(&dbmfp->fh, DB_FH_VALID)) {
- /*
- * Ignore read errors if we have permission to create the page.
- * Assume that the page doesn't exist, and that we'll create it
- * when we write it out.
- *
- * XXX
- * Theoretically, we could overwrite a page of data if it were
- * possible for a file to be successfully opened for reading
- * and then for the read to fail. Shouldn't ever happen, but
- * it might be worth checking to see if the offset is past the
- * known end-of-file.
- */
- db_io.fhp = &dbmfp->fh;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
+ db_io.fhp = dbmfp->fhp;
db_io.mutexp = dbmfp->mutexp;
db_io.pagesize = db_io.bytes = pagesize;
db_io.pgno = bhp->pgno;
db_io.buf = bhp->buf;
- ret = __os_io(dbenv, &db_io, DB_IO_READ, &nr);
- } else
- ret = 0;
+ /*
+ * The page may not exist; if it doesn't, nr may well be 0,
+ * but we expect the underlying OS calls not to return an
+ * error code in this case.
+ */
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_READ, &nr)) != 0)
+ goto err;
+ }
- created = 0;
if (nr < pagesize) {
- if (can_create)
- created = 1;
- else {
- /*
- * If we had a short read, ret may be 0. This may not
- * be an error -- in particular DB recovery processing
- * may request pages that have never been written to
- * disk, in which case we won't find the page. So, the
- * caller must know how to handle the error.
- */
- if (ret == 0)
- ret = EIO;
+ /*
+ * Don't output error messages for short reads. In particular,
+ * DB recovery processing may request pages never written to
+ * disk or for which only some part have been written to disk,
+ * in which case we won't find the page. The caller must know
+ * how to handle the error.
+ */
+ if (can_create == 0) {
+ ret = DB_PAGE_NOTFOUND;
goto err;
}
- }
- /*
- * Clear any bytes we didn't read that need to be cleared. If we're
- * running in diagnostic mode, smash any bytes on the page that are
- * unknown quantities for the caller.
- */
- if (nr != pagesize) {
+ /* Clear any bytes that need to be cleared. */
len = mfp->clear_len == 0 ? pagesize : mfp->clear_len;
- if (nr < len)
- memset(bhp->buf + nr, 0, len - nr);
-#ifdef DIAGNOSTIC
- if (nr > len)
- len = nr;
+ memset(bhp->buf, 0, len);
+
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ /*
+ * If we're running in diagnostic mode, corrupt any bytes on
+ * the page that are unknown quantities for the caller.
+ */
if (len < pagesize)
memset(bhp->buf + len, CLEAR_BYTE, pagesize - len);
#endif
- }
+ ++mfp->stat.st_page_create;
+ } else
+ ++mfp->stat.st_page_in;
/* Call any pgin function. */
ret = mfp->ftype == 0 ? 0 : __memp_pg(dbmfp, bhp, 1);
- /* Unlock the buffer and reacquire the region lock. */
+ /* Unlock the buffer and reacquire the hash bucket lock. */
err: MUTEX_UNLOCK(dbenv, &bhp->mutex);
- R_LOCK(dbenv, dbmp->reginfo);
+ MUTEX_LOCK(dbenv, mutexp);
/*
* If no errors occurred, the data is now valid, clear the BH_TRASH
* flag; regardless, clear the lock bit and let other threads proceed.
*/
F_CLR(bhp, BH_LOCKED);
- if (ret == 0) {
+ if (ret == 0)
F_CLR(bhp, BH_TRASH);
- /* Update the statistics. */
- if (created)
- ++mfp->stat.st_page_create;
- else
- ++mfp->stat.st_page_in;
- }
-
return (ret);
}
/*
* __memp_pgwrite --
* Write a page to a file.
- *
- * PUBLIC: int __memp_pgwrite
- * PUBLIC: __P((DB_MPOOL *, DB_MPOOLFILE *, BH *, int *, int *));
*/
-int
-__memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep)
+static int
+__memp_pgwrite(dbmp, dbmfp, hp, bhp)
DB_MPOOL *dbmp;
DB_MPOOLFILE *dbmfp;
+ DB_MPOOL_HASH *hp;
BH *bhp;
- int *restartp, *wrotep;
{
DB_ENV *dbenv;
DB_IO db_io;
DB_LSN lsn;
- MPOOL *c_mp, *mp;
MPOOLFILE *mfp;
size_t nw;
- int callpgin, dosync, ret, syncfail;
- const char *fail;
+ int callpgin, ret;
dbenv = dbmp->dbenv;
- mp = dbmp->reginfo[0].primary;
mfp = dbmfp == NULL ? NULL : dbmfp->mfp;
-
- if (restartp != NULL)
- *restartp = 0;
- if (wrotep != NULL)
- *wrotep = 0;
- callpgin = 0;
+ callpgin = ret = 0;
/*
- * Check the dirty bit -- this buffer may have been written since we
- * decided to write it.
+ * We should never be called with a clean or trash buffer.
+ * The sync code does call us with already locked buffers.
*/
- if (!F_ISSET(bhp, BH_DIRTY)) {
- if (wrotep != NULL)
- *wrotep = 1;
- return (0);
- }
-
- MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
+ DB_ASSERT(F_ISSET(bhp, BH_DIRTY));
+ DB_ASSERT(!F_ISSET(bhp, BH_TRASH));
/*
- * If there were two writers, we may have just been waiting while the
- * other writer completed I/O on this buffer. Check the dirty bit one
- * more time.
+ * If we have not already traded the hash bucket lock for the buffer
+ * lock, do so now.
*/
- if (!F_ISSET(bhp, BH_DIRTY)) {
- MUTEX_UNLOCK(dbenv, &bhp->mutex);
-
- if (wrotep != NULL)
- *wrotep = 1;
- return (0);
+ if (!F_ISSET(bhp, BH_LOCKED)) {
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
}
- F_SET(bhp, BH_LOCKED);
- R_UNLOCK(dbenv, dbmp->reginfo);
-
- if (restartp != NULL)
- *restartp = 1;
-
/*
* It's possible that the underlying file doesn't exist, either
* because of an outright removal or because it was a temporary
@@ -347,155 +322,122 @@ __memp_pgwrite(dbmp, dbmfp, bhp, restartp, wrotep)
goto file_dead;
/*
- * Ensure the appropriate log records are on disk. If the page is
- * being written as part of a sync operation, the flush has already
- * been done, unless it was written by the application *after* the
- * sync was scheduled.
+ * If the page is in a file for which we have LSN information, we have
+ * to ensure the appropriate log records are on disk.
*/
- if (LOGGING_ON(dbenv) &&
- (!F_ISSET(bhp, BH_SYNC) || F_ISSET(bhp, BH_SYNC_LOGFLSH))) {
+ if (LOGGING_ON(dbenv) && mfp->lsn_off != -1) {
memcpy(&lsn, bhp->buf + mfp->lsn_off, sizeof(DB_LSN));
- if ((ret = log_flush(dbenv, &lsn)) != 0)
+ if ((ret = dbenv->log_flush(dbenv, &lsn)) != 0)
goto err;
}
- DB_ASSERT(!LOGGING_ON(dbenv) ||
- log_compare(&((LOG *)((DB_LOG *)
- dbenv->lg_handle)->reginfo.primary)->s_lsn, &LSN(bhp->buf)) > 0);
+
+#ifdef DIAGNOSTIC
+ /*
+ * Verify write-ahead logging semantics.
+ *
+ * !!!
+ * One special case. There is a single field on the meta-data page,
+ * the last-page-number-in-the-file field, for which we do not log
+ * changes. If the page was originally created in a database that
+ * didn't have logging turned on, we can see a page marked dirty but
+ * for which no corresponding log record has been written. However,
+ * the only way that a page can be created for which there isn't a
+ * previous log record and valid LSN is when the page was created
+ * without logging turned on, and so we check for that special-case
+ * LSN value.
+ */
+ if (LOGGING_ON(dbenv) && !IS_NOT_LOGGED_LSN(LSN(bhp->buf))) {
+ /*
+ * There is a potential race here. If we are in the midst of
+ * switching log files, it's possible we could test against the
+ * old file and the new offset in the log region's LSN. If we
+ * fail the first test, acquire the log mutex and check again.
+ */
+ DB_LOG *dblp;
+ LOG *lp;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ if (!IS_NOT_LOGGED_LSN(LSN(bhp->buf)) &&
+ log_compare(&lp->s_lsn, &LSN(bhp->buf)) <= 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ DB_ASSERT(log_compare(&lp->s_lsn, &LSN(bhp->buf)) > 0);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+ }
+#endif
/*
* Call any pgout function. We set the callpgin flag so that we flag
* that the contents of the buffer will need to be passed through pgin
* before they are reused.
*/
- if (mfp->ftype == 0)
- ret = 0;
- else {
+ if (mfp->ftype != 0) {
callpgin = 1;
if ((ret = __memp_pg(dbmfp, bhp, 0)) != 0)
goto err;
}
/* Temporary files may not yet have been created. */
- if (!F_ISSET(&dbmfp->fh, DB_FH_VALID)) {
+ if (!F_ISSET(dbmfp->fhp, DB_FH_VALID)) {
MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
- if (!F_ISSET(&dbmfp->fh, DB_FH_VALID) &&
- ((ret = __db_appname(dbenv, DB_APP_TMP, NULL, NULL,
- DB_OSO_CREATE | DB_OSO_EXCL | DB_OSO_TEMP,
- &dbmfp->fh, NULL)) != 0 ||
- !F_ISSET(&dbmfp->fh, DB_FH_VALID))) {
- MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ ret = F_ISSET(dbmfp->fhp, DB_FH_VALID) ? 0 :
+ __db_appname(dbenv, DB_APP_TMP, NULL,
+ F_ISSET(dbenv, DB_ENV_DIRECT_DB) ? DB_OSO_DIRECT : 0,
+ dbmfp->fhp, NULL);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0) {
__db_err(dbenv,
"unable to create temporary backing file");
goto err;
}
- MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
}
/* Write the page. */
- db_io.fhp = &dbmfp->fh;
+ db_io.fhp = dbmfp->fhp;
db_io.mutexp = dbmfp->mutexp;
db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
db_io.pgno = bhp->pgno;
db_io.buf = bhp->buf;
if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
- ret = __db_panic(dbenv, ret);
- fail = "write";
- goto syserr;
- }
- if (nw != mfp->stat.st_pagesize) {
- ret = EIO;
- fail = "write";
- goto syserr;
+ __db_err(dbenv, "%s: write failed for page %lu",
+ __memp_fn(dbmfp), (u_long)bhp->pgno);
+ goto err;
}
+ ++mfp->stat.st_page_out;
+err:
file_dead:
/*
* !!!
* Once we pass this point, dbmfp and mfp may be NULL, we may not have
* a valid file reference.
*
- * Unlock the buffer and reacquire the region lock.
+ * Unlock the buffer and reacquire the hash lock.
*/
MUTEX_UNLOCK(dbenv, &bhp->mutex);
- R_LOCK(dbenv, dbmp->reginfo);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
/*
- * Clean up the flags based on a successful write.
- *
* If we rewrote the page, it will need processing by the pgin
* routine before reuse.
*/
if (callpgin)
F_SET(bhp, BH_CALLPGIN);
- F_CLR(bhp, BH_DIRTY | BH_LOCKED);
/*
- * If we write a buffer for which a checkpoint is waiting, update
- * the count of pending buffers (both in the mpool as a whole and
- * for this file). If the count for this file goes to zero, set a
- * flag so we flush the writes.
+ * Update the hash bucket statistics, reset the flags.
+ * If we were successful, the page is no longer dirty.
*/
- dosync = 0;
- if (F_ISSET(bhp, BH_SYNC)) {
- F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
-
- --mp->lsn_cnt;
- if (mfp != NULL)
- dosync = --mfp->lsn_cnt == 0 ? 1 : 0;
- }
-
- /* Update the page clean/dirty statistics. */
- c_mp = BH_TO_CACHE(dbmp, bhp);
- ++c_mp->stat.st_page_clean;
- --c_mp->stat.st_page_dirty;
-
- /* Update I/O statistics. */
- if (mfp != NULL)
- ++mfp->stat.st_page_out;
+ if (ret == 0) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
- /*
- * Do the sync after everything else has been updated, so any incoming
- * checkpoint doesn't see inconsistent information.
- *
- * XXX:
- * Don't lock the region around the sync, fsync(2) has no atomicity
- * issues.
- *
- * XXX:
- * We ignore errors from the sync -- it makes no sense to return an
- * error to the calling process, so set a flag causing the checkpoint
- * to be retried later. There is a possibility, of course, that a
- * subsequent checkpoint was started and that we're going to force it
- * to fail. That should be unlikely, and fixing it would be difficult.
- */
- if (dosync) {
- R_UNLOCK(dbenv, dbmp->reginfo);
- syncfail = __os_fsync(dbenv, &dbmfp->fh) != 0;
- R_LOCK(dbenv, dbmp->reginfo);
- if (syncfail)
- F_SET(mp, MP_LSN_RETRY);
+ F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
}
- if (wrotep != NULL)
- *wrotep = 1;
-
- return (0);
-
-syserr: __db_err(dbenv, "%s: %s failed for page %lu",
- __memp_fn(dbmfp), fail, (u_long)bhp->pgno);
-
-err: /* Unlock the buffer and reacquire the region lock. */
- MUTEX_UNLOCK(dbenv, &bhp->mutex);
- R_LOCK(dbenv, dbmp->reginfo);
-
- /*
- * Clean up the flags based on a failure.
- *
- * The page remains dirty but we remove our lock. If we rewrote the
- * page, it will need processing by the pgin routine before reuse.
- */
- if (callpgin)
- F_SET(bhp, BH_CALLPGIN);
+ /* Regardless, clear any sync wait-for count and remove our lock. */
+ bhp->ref_sync = 0;
F_CLR(bhp, BH_LOCKED);
return (ret);
@@ -514,15 +456,17 @@ __memp_pg(dbmfp, bhp, is_pgin)
int is_pgin;
{
DBT dbt, *dbtp;
+ DB_ENV *dbenv;
DB_MPOOL *dbmp;
DB_MPREG *mpreg;
MPOOLFILE *mfp;
int ftype, ret;
dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
mfp = dbmfp->mfp;
- MUTEX_THREAD_LOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
ftype = mfp->ftype;
for (mpreg = LIST_FIRST(&dbmp->dbregq);
@@ -536,28 +480,28 @@ __memp_pg(dbmfp, bhp, is_pgin)
dbt.data = R_ADDR(dbmp->reginfo, mfp->pgcookie_off);
dbtp = &dbt;
}
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
if (is_pgin) {
if (mpreg->pgin != NULL &&
- (ret = mpreg->pgin(dbmp->dbenv,
+ (ret = mpreg->pgin(dbenv,
bhp->pgno, bhp->buf, dbtp)) != 0)
goto err;
} else
if (mpreg->pgout != NULL &&
- (ret = mpreg->pgout(dbmp->dbenv,
+ (ret = mpreg->pgout(dbenv,
bhp->pgno, bhp->buf, dbtp)) != 0)
goto err;
break;
}
if (mpreg == NULL)
- MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
return (0);
-err: MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
- __db_err(dbmp->dbenv, "%s: %s failed for page %lu",
+err: MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ __db_err(dbenv, "%s: %s failed for page %lu",
__memp_fn(dbmfp), is_pgin ? "pgin" : "pgout", (u_long)bhp->pgno);
return (ret);
}
@@ -566,55 +510,78 @@ err: MUTEX_THREAD_UNLOCK(dbmp->dbenv, dbmp->mutexp);
* __memp_bhfree --
* Free a bucket header and its referenced data.
*
- * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, BH *, int));
+ * PUBLIC: void __memp_bhfree __P((DB_MPOOL *, DB_MPOOL_HASH *, BH *, int));
*/
void
-__memp_bhfree(dbmp, bhp, free_mem)
+__memp_bhfree(dbmp, hp, bhp, free_mem)
DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
BH *bhp;
int free_mem;
{
- DB_HASHTAB *dbht;
+ DB_ENV *dbenv;
MPOOL *c_mp, *mp;
MPOOLFILE *mfp;
- int n_bucket, n_cache;
+ u_int32_t n_cache;
+ /*
+ * Assumes the hash bucket is locked and the MPOOL is not.
+ */
+ dbenv = dbmp->dbenv;
mp = dbmp->reginfo[0].primary;
- c_mp = BH_TO_CACHE(dbmp, bhp);
- n_cache = NCACHE(mp, bhp->pgno);
- n_bucket = NBUCKET(c_mp, bhp->mf_offset, bhp->pgno);
- dbht = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ n_cache = NCACHE(mp, bhp->mf_offset, bhp->pgno);
- /* Delete the buffer header from the hash bucket queue. */
- SH_TAILQ_REMOVE(&dbht[n_bucket], bhp, hq, __bh);
+ /*
+ * Delete the buffer header from the hash bucket queue and reset
+ * the hash bucket's priority, if necessary.
+ */
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ if (bhp->priority == hp->hash_priority)
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL ?
+ 0 : SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
- /* Delete the buffer header from the LRU queue. */
- SH_TAILQ_REMOVE(&c_mp->bhq, bhp, q, __bh);
+ /*
+ * Discard the hash bucket's mutex, it's no longer needed, and
+ * we don't want to be holding it when acquiring other locks.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
- /* Clear the mutex this buffer recorded */
- __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
- (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
/*
* Find the underlying MPOOLFILE and decrement its reference count.
* If this is its last reference, remove it.
*/
mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ MUTEX_LOCK(dbenv, &mfp->mutex);
if (--mfp->block_cnt == 0 && mfp->mpf_cnt == 0)
__memp_mf_discard(dbmp, mfp);
+ else
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+
+ /*
+ * Clear the mutex this buffer recorded; requires the region lock
+ * be held.
+ */
+ __db_shlocks_clear(&bhp->mutex, &dbmp->reginfo[n_cache],
+ (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], mp->maint_off));
/*
- * If we're not reusing it immediately, free the buffer header
+ * If we're not reusing the buffer immediately, free the buffer header
* and data for real.
*/
if (free_mem) {
- --c_mp->stat.st_page_clean;
__db_shalloc_free(dbmp->reginfo[n_cache].addr, bhp);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ c_mp->stat.st_pages--;
}
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
}
/*
* __memp_upgrade --
- * Upgrade a file descriptor from readonly to readwrite.
+ * Upgrade a file descriptor from read-only to read-write.
*/
static int
__memp_upgrade(dbmp, dbmfp, mfp)
@@ -622,41 +589,58 @@ __memp_upgrade(dbmp, dbmfp, mfp)
DB_MPOOLFILE *dbmfp;
MPOOLFILE *mfp;
{
- DB_FH fh;
+ DB_ENV *dbenv;
+ DB_FH *fhp, *tfhp;
int ret;
char *rpath;
- /*
- * !!!
- * We expect the handle to already be locked.
- */
-
- /* Check to see if we've already upgraded. */
- if (F_ISSET(dbmfp, MP_UPGRADE))
- return (0);
-
- /* Check to see if we've already failed. */
- if (F_ISSET(dbmfp, MP_UPGRADE_FAIL))
- return (1);
+ dbenv = dbmp->dbenv;
+ fhp = NULL;
+ rpath = NULL;
/*
* Calculate the real name for this file and try to open it read/write.
* We know we have a valid pathname for the file because it's the only
* way we could have gotten a file descriptor of any kind.
*/
- if ((ret = __db_appname(dbmp->dbenv, DB_APP_DATA,
- NULL, R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0)
- return (ret);
- if (__os_open(dbmp->dbenv, rpath, 0, 0, &fh) != 0) {
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &fhp)) != 0)
+ goto err;
+
+ if ((ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) != 0)
+ goto err;
+
+ if (__os_open(dbenv, rpath,
+ F_ISSET(mfp, MP_DIRECT) ? DB_OSO_DIRECT : 0, 0, fhp) != 0) {
F_SET(dbmfp, MP_UPGRADE_FAIL);
- ret = 1;
- } else {
- /* Swap the descriptors and set the upgrade flag. */
- (void)__os_closehandle(&dbmfp->fh);
- dbmfp->fh = fh;
- F_SET(dbmfp, MP_UPGRADE);
- ret = 0;
+ goto err;
}
- __os_freestr(rpath);
+
+ /*
+ * Swap the descriptors and set the upgrade flag.
+ *
+ * XXX
+ * There is a race here. If another process schedules a read using the
+ * existing file descriptor and is swapped out before making the system
+ * call, this code could theoretically close the file descriptor out
+ * from under it. While it's very unlikely, this code should still be
+ * rewritten.
+ */
+ tfhp = dbmfp->fhp;
+ dbmfp->fhp = fhp;
+ fhp = tfhp;
+
+ (void)__os_closehandle(dbenv, fhp);
+ F_SET(dbmfp, MP_UPGRADE);
+
+ ret = 0;
+ if (0) {
+err: ret = 1;
+ }
+ if (fhp != NULL)
+ __os_free(dbenv, fhp);
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
+
return (ret);
}
diff --git a/bdb/mp/mp_fget.c b/bdb/mp/mp_fget.c
index 1bff5e136ab..be0785a2184 100644
--- a/bdb/mp/mp_fget.c
+++ b/bdb/mp/mp_fget.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_fget.c,v 11.28 2001/01/10 04:50:53 ubell Exp $";
+static const char revid[] = "$Id: mp_fget.c,v 11.68 2002/08/06 04:58:09 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,51 +16,54 @@ static const char revid[] = "$Id: mp_fget.c,v 11.28 2001/01/10 04:50:53 ubell Ex
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#ifdef HAVE_FILESYSTEM_NOTZERO
+static int __memp_fs_notzero
+ __P((DB_ENV *, DB_MPOOLFILE *, MPOOLFILE *, db_pgno_t *));
#endif
/*
- * memp_fget --
+ * __memp_fget --
* Get a page from the file.
+ *
+ * PUBLIC: int __memp_fget
+ * PUBLIC: __P((DB_MPOOLFILE *, db_pgno_t *, u_int32_t, void *));
*/
int
-memp_fget(dbmfp, pgnoaddr, flags, addrp)
+__memp_fget(dbmfp, pgnoaddr, flags, addrp)
DB_MPOOLFILE *dbmfp;
db_pgno_t *pgnoaddr;
u_int32_t flags;
void *addrp;
{
- BH *bhp;
+ enum { FIRST_FOUND, FIRST_MISS, SECOND_FOUND, SECOND_MISS } state;
+ BH *alloc_bhp, *bhp;
DB_ENV *dbenv;
DB_MPOOL *dbmp;
- DB_HASHTAB *dbht;
+ DB_MPOOL_HASH *hp;
MPOOL *c_mp, *mp;
MPOOLFILE *mfp;
- size_t n_bucket, n_cache, mf_offset;
- u_int32_t st_hsearch;
- int b_incr, first, ret;
+ roff_t mf_offset;
+ u_int32_t n_cache, st_hsearch;
+ int b_incr, extending, first, ret;
+
+ *(void **)addrp = NULL;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
- mp = dbmp->reginfo[0].primary;
- mfp = dbmfp->mfp;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fget(dbmfp, pgnoaddr, flags, addrp));
-#endif
PANIC_CHECK(dbenv);
+ mp = dbmp->reginfo[0].primary;
+ mfp = dbmfp->mfp;
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ alloc_bhp = bhp = NULL;
+ hp = NULL;
+ b_incr = extending = ret = 0;
+
/*
* Validate arguments.
*
@@ -74,100 +77,35 @@ memp_fget(dbmfp, pgnoaddr, flags, addrp)
* is to keep database files small. It's sleazy as hell, but we catch
* any attempt to actually write the file in memp_fput().
*/
-#define OKFLAGS \
- (DB_MPOOL_CREATE | DB_MPOOL_LAST | \
- DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP | DB_MPOOL_EXTENT)
+#define OKFLAGS (DB_MPOOL_CREATE | DB_MPOOL_LAST | DB_MPOOL_NEW)
if (flags != 0) {
if ((ret = __db_fchk(dbenv, "memp_fget", flags, OKFLAGS)) != 0)
return (ret);
- switch (flags & ~DB_MPOOL_EXTENT) {
+ switch (flags) {
case DB_MPOOL_CREATE:
+ break;
case DB_MPOOL_LAST:
+ /* Get the last page number in the file. */
+ if (flags == DB_MPOOL_LAST) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
+ break;
case DB_MPOOL_NEW:
- case DB_MPOOL_NEW_GROUP:
- case 0:
+ /*
+ * If always creating a page, skip the first search
+ * of the hash bucket.
+ */
+ if (flags == DB_MPOOL_NEW)
+ goto alloc;
break;
default:
return (__db_ferr(dbenv, "memp_fget", 1));
}
}
-#ifdef DIAGNOSTIC
- /*
- * XXX
- * We want to switch threads as often as possible. Yield every time
- * we get a new page to ensure contention.
- */
- if (DB_GLOBAL(db_pageyield))
- __os_yield(dbenv, 1);
-#endif
-
- /* Initialize remaining local variables. */
- mf_offset = R_OFFSET(dbmp->reginfo, mfp);
- bhp = NULL;
- st_hsearch = 0;
- b_incr = ret = 0;
-
- R_LOCK(dbenv, dbmp->reginfo);
-
- /*
- * Check for the new, last or last + 1 page requests.
- *
- * Examine and update the file's last_pgno value. We don't care if
- * the last_pgno value immediately changes due to another thread --
- * at this instant in time, the value is correct. We do increment the
- * current last_pgno value if the thread is asking for a new page,
- * however, to ensure that two threads creating pages don't get the
- * same one.
- *
- * If we create a page, there is the potential that a page after it
- * in the file will be written before it will be written. Recovery
- * depends on pages that are "created" in the file by subsequent pages
- * being written be zeroed out, not have random garbage. Ensure that
- * the OS agrees.
- *
- * !!!
- * DB_MPOOL_NEW_GROUP is undocumented -- the hash access method needs
- * to allocate contiguous groups of pages in order to do subdatabases.
- * We return the first page in the group, but the caller must put an
- * LSN on the *last* page and write it, otherwise after a crash we may
- * not create all of the pages we need to create.
- */
- if (LF_ISSET(DB_MPOOL_LAST | DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP)) {
- if (LF_ISSET(DB_MPOOL_NEW)) {
- if (F_ISSET(&dbmfp->fh, DB_FH_VALID) && (ret =
- __os_fpinit(dbenv, &dbmfp->fh, mfp->last_pgno + 1,
- 1, mfp->stat.st_pagesize)) != 0) {
- R_UNLOCK(dbenv, dbmp->reginfo);
- return (ret);
- }
- ++mfp->last_pgno;
- }
- if (LF_ISSET(DB_MPOOL_NEW_GROUP)) {
- if (F_ISSET(&dbmfp->fh, DB_FH_VALID) && (ret =
- __os_fpinit(dbenv, &dbmfp->fh, mfp->last_pgno + 1,
- (int)*pgnoaddr, mfp->stat.st_pagesize)) != 0) {
- R_UNLOCK(dbenv, dbmp->reginfo);
- return (ret);
- }
- mfp->last_pgno += *pgnoaddr;
- }
- *pgnoaddr = mfp->last_pgno;
- }
-
- /*
- * Determine the hash bucket where this page will live, and get local
- * pointers to the cache and its hash table.
- */
- n_cache = NCACHE(mp, *pgnoaddr);
- c_mp = dbmp->reginfo[n_cache].primary;
- n_bucket = NBUCKET(c_mp, mf_offset, *pgnoaddr);
- dbht = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
-
- if (LF_ISSET(DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP))
- goto alloc;
-
/*
* If mmap'ing the file and the page is not past the end of the file,
* just return a pointer.
@@ -183,235 +121,534 @@ memp_fget(dbmfp, pgnoaddr, flags, addrp)
* goes through the cache. All pages previously returned will be safe,
* as long as the correct locking protocol was observed.
*
- * XXX
* We don't discard the map because we don't know when all of the
* pages will have been discarded from the process' address space.
* It would be possible to do so by reference counting the open
* pages from the mmap, but it's unclear to me that it's worth it.
*/
- if (dbmfp->addr != NULL && F_ISSET(mfp, MP_CAN_MMAP)) {
- if (*pgnoaddr > mfp->orig_last_pgno) {
- /*
- * !!!
- * See the comment above about non-existent pages and
- * the hash access method.
- */
- if (!LF_ISSET(DB_MPOOL_CREATE)) {
- if (!LF_ISSET(DB_MPOOL_EXTENT))
- __db_err(dbenv,
- "%s: page %lu doesn't exist",
- __memp_fn(dbmfp), (u_long)*pgnoaddr);
- ret = EINVAL;
- goto err;
- }
- } else {
- *(void **)addrp =
- R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
- ++mfp->stat.st_map;
- goto done;
- }
+ if (dbmfp->addr != NULL &&
+ F_ISSET(mfp, MP_CAN_MMAP) && *pgnoaddr <= mfp->orig_last_pgno) {
+ *(void **)addrp =
+ R_ADDR(dbmfp, *pgnoaddr * mfp->stat.st_pagesize);
+ ++mfp->stat.st_map;
+ return (0);
}
+hb_search:
+ /*
+ * Determine the cache and hash bucket where this page lives and get
+ * local pointers to them. Reset on each pass through this code, the
+ * page number can change.
+ */
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, mf_offset, *pgnoaddr)];
+
/* Search the hash chain for the page. */
- for (bhp = SH_TAILQ_FIRST(&dbht[n_bucket], __bh);
+retry: st_hsearch = 0;
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
++st_hsearch;
if (bhp->pgno != *pgnoaddr || bhp->mf_offset != mf_offset)
continue;
- /* Increment the reference count. */
+ /*
+ * Increment the reference count. We may discard the hash
+ * bucket lock as we evaluate and/or read the buffer, so we
+ * need to ensure it doesn't move and its contents remain
+ * unchanged.
+ */
if (bhp->ref == UINT16_T_MAX) {
__db_err(dbenv,
"%s: page %lu: reference count overflow",
__memp_fn(dbmfp), (u_long)bhp->pgno);
ret = EINVAL;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
goto err;
}
-
- /*
- * Increment the reference count. We may discard the region
- * lock as we evaluate and/or read the buffer, so we need to
- * ensure that it doesn't move and that its contents remain
- * unchanged.
- */
++bhp->ref;
b_incr = 1;
/*
- * Any buffer we find might be trouble.
- *
* BH_LOCKED --
- * I/O is in progress. Because we've incremented the buffer
- * reference count, we know the buffer can't move. Unlock
- * the region lock, wait for the I/O to complete, and reacquire
- * the region.
+ * I/O is in progress or sync is waiting on the buffer to write
+ * it. Because we've incremented the buffer reference count,
+ * we know the buffer can't move. Unlock the bucket lock, wait
+ * for the buffer to become available, reacquire the bucket.
*/
- for (first = 1; F_ISSET(bhp, BH_LOCKED); first = 0) {
- R_UNLOCK(dbenv, dbmp->reginfo);
+ for (first = 1; F_ISSET(bhp, BH_LOCKED) &&
+ !F_ISSET(dbenv, DB_ENV_NOLOCKING); first = 0) {
+ /*
+ * If someone is trying to sync this buffer and the
+ * buffer is hot, they may never get in. Give up
+ * and try again.
+ */
+ if (!first && bhp->ref_sync != 0) {
+ --bhp->ref;
+ b_incr = 0;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ __os_yield(dbenv, 1);
+ goto retry;
+ }
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
/*
- * Explicitly yield the processor if it's not the first
- * pass through this loop -- if we don't, we might end
- * up running to the end of our CPU quantum as we will
- * simply be swapping between the two locks.
+ * Explicitly yield the processor if not the first pass
+ * through this loop -- if we don't, we might run to the
+ * end of our CPU quantum as we will simply be swapping
+ * between the two locks.
*/
if (!first)
__os_yield(dbenv, 1);
- MUTEX_LOCK(dbenv, &bhp->mutex, dbenv->lockfhp);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
/* Wait for I/O to finish... */
MUTEX_UNLOCK(dbenv, &bhp->mutex);
- R_LOCK(dbenv, dbmp->reginfo);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ }
+
+ ++mfp->stat.st_cache_hit;
+ break;
+ }
+
+ /*
+ * Update the hash bucket search statistics -- do now because our next
+ * search may be for a different bucket.
+ */
+ ++c_mp->stat.st_hash_searches;
+ if (st_hsearch > c_mp->stat.st_hash_longest)
+ c_mp->stat.st_hash_longest = st_hsearch;
+ c_mp->stat.st_hash_examined += st_hsearch;
+
+ /*
+ * There are 4 possible paths to this location:
+ *
+ * FIRST_MISS:
+ * Didn't find the page in the hash bucket on our first pass:
+ * bhp == NULL, alloc_bhp == NULL
+ *
+ * FIRST_FOUND:
+ * Found the page in the hash bucket on our first pass:
+ * bhp != NULL, alloc_bhp == NULL
+ *
+ * SECOND_FOUND:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and found the page in the hash bucket on
+ * our second pass:
+ * bhp != NULL, alloc_bhp != NULL
+ *
+ * SECOND_MISS:
+ * Didn't find the page in the hash bucket on the first pass,
+ * allocated space, and didn't find the page in the hash bucket
+ * on our second pass:
+ * bhp == NULL, alloc_bhp != NULL
+ */
+ state = bhp == NULL ?
+ (alloc_bhp == NULL ? FIRST_MISS : SECOND_MISS) :
+ (alloc_bhp == NULL ? FIRST_FOUND : SECOND_FOUND);
+ switch (state) {
+ case FIRST_FOUND:
+ /* We found the buffer in our first check -- we're done. */
+ break;
+ case FIRST_MISS:
+ /*
+ * We didn't find the buffer in our first check. Figure out
+ * if the page exists, and allocate structures so we can add
+ * the page to the buffer pool.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+alloc: /*
+ * If DB_MPOOL_NEW is set, we have to allocate a page number.
+ * If neither DB_MPOOL_CREATE or DB_MPOOL_CREATE is set, then
+ * it's an error to try and get a page past the end of file.
+ */
+ COMPQUIET(n_cache, 0);
+
+ extending = ret = 0;
+ R_LOCK(dbenv, dbmp->reginfo);
+ switch (flags) {
+ case DB_MPOOL_NEW:
+ extending = 1;
+ *pgnoaddr = mfp->last_pgno + 1;
+ break;
+ case DB_MPOOL_CREATE:
+ extending = *pgnoaddr > mfp->last_pgno;
+ break;
+ default:
+ ret = *pgnoaddr > mfp->last_pgno ? DB_PAGE_NOTFOUND : 0;
+ break;
}
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
/*
- * BH_TRASH --
- * The contents of the buffer are garbage. Shouldn't happen,
- * and this read is likely to fail, but might as well try.
+ * !!!
+ * In the DB_MPOOL_NEW code path, mf_offset and n_cache have
+ * not yet been initialized.
*/
- if (F_ISSET(bhp, BH_TRASH))
- goto reread;
+ mf_offset = R_OFFSET(dbmp->reginfo, mfp);
+ n_cache = NCACHE(mp, mf_offset, *pgnoaddr);
+ /* Allocate a new buffer header and data space. */
+ if ((ret = __memp_alloc(dbmp,
+ &dbmp->reginfo[n_cache], mfp, 0, NULL, &alloc_bhp)) != 0)
+ goto err;
+#ifdef DIAGNOSTIC
+ if ((db_alignp_t)alloc_bhp->buf & (sizeof(size_t) - 1)) {
+ __db_err(dbenv,
+ "Error: buffer data is NOT size_t aligned");
+ ret = EINVAL;
+ goto err;
+ }
+#endif
/*
- * BH_CALLPGIN --
- * The buffer was converted so it could be written, and the
- * contents need to be converted again.
+ * If we are extending the file, we'll need the region lock
+ * again.
*/
- if (F_ISSET(bhp, BH_CALLPGIN)) {
- if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
+ if (extending)
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control. (That guarantee is interesting
+ * for DB_MPOOL_NEW, unlike DB_MPOOL_CREATE, because the caller
+ * did not specify the page number, and so, may reasonably not
+ * have any way to lock the page outside of mpool.) Regardless,
+ * if we allocate the page, and some other thread of control
+ * requests the page by number, we will not detect that and the
+ * thread of control that allocated using DB_MPOOL_NEW may not
+ * have a chance to initialize the page. (Note: we *could*
+ * detect this case if we set a flag in the buffer header which
+ * guaranteed that no gets of the page would succeed until the
+ * reference count went to 0, that is, until the creating page
+ * put the page.) What we do guarantee is that if two threads
+ * of control are both doing DB_MPOOL_NEW calls, they won't
+ * collide, that is, they won't both get the same page.
+ *
+ * There's a possibility that another thread allocated the page
+ * we were planning to allocate while we were off doing buffer
+ * allocation. We can do that by making sure the page number
+ * we were going to use is still available. If it's not, then
+ * we check to see if the next available page number hashes to
+ * the same mpool region as the old one -- if it does, we can
+ * continue, otherwise, we have to start over.
+ */
+ if (flags == DB_MPOOL_NEW && *pgnoaddr != mfp->last_pgno + 1) {
+ *pgnoaddr = mfp->last_pgno + 1;
+ if (n_cache != NCACHE(mp, mf_offset, *pgnoaddr)) {
+ __db_shalloc_free(
+ dbmp->reginfo[n_cache].addr, alloc_bhp);
+ /*
+ * flags == DB_MPOOL_NEW, so extending is set
+ * and we're holding the region locked.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ alloc_bhp = NULL;
+ goto alloc;
+ }
+ }
+
+ /*
+ * We released the region lock, so another thread might have
+ * extended the file. Update the last_pgno and initialize
+ * the file, as necessary, if we extended the file.
+ */
+ if (extending) {
+#ifdef HAVE_FILESYSTEM_NOTZERO
+ if (*pgnoaddr > mfp->last_pgno &&
+ __os_fs_notzero() &&
+ F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ ret = __memp_fs_notzero(
+ dbenv, dbmfp, mfp, pgnoaddr);
+ else
+ ret = 0;
+#endif
+ if (ret == 0 && *pgnoaddr > mfp->last_pgno)
+ mfp->last_pgno = *pgnoaddr;
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
goto err;
- F_CLR(bhp, BH_CALLPGIN);
}
+ goto hb_search;
+ case SECOND_FOUND:
+ /*
+ * We allocated buffer space for the requested page, but then
+ * found the page in the buffer cache on our second check.
+ * That's OK -- we can use the page we found in the pool,
+ * unless DB_MPOOL_NEW is set.
+ *
+ * Free the allocated memory, we no longer need it. Since we
+ * can't acquire the region lock while holding the hash bucket
+ * lock, we have to release the hash bucket and re-acquire it.
+ * That's OK, because we have the buffer pinned down.
+ */
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ R_LOCK(dbenv, &dbmp->reginfo[n_cache]);
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
+ alloc_bhp = NULL;
+ R_UNLOCK(dbenv, &dbmp->reginfo[n_cache]);
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
- ++mfp->stat.st_cache_hit;
- *(void **)addrp = bhp->buf;
- goto done;
- }
+ /*
+ * We can't use the page we found in the pool if DB_MPOOL_NEW
+ * was set. (For details, see the above comment beginning
+ * "DB_MPOOL_NEW does not guarantee you a page unreferenced by
+ * any other thread of control".) If DB_MPOOL_NEW is set, we
+ * release our pin on this particular buffer, and try to get
+ * another one.
+ */
+ if (flags == DB_MPOOL_NEW) {
+ --bhp->ref;
+ b_incr = 0;
+ goto alloc;
+ }
+ break;
+ case SECOND_MISS:
+ /*
+ * We allocated buffer space for the requested page, and found
+ * the page still missing on our second pass through the buffer
+ * cache. Instantiate the page.
+ */
+ bhp = alloc_bhp;
+ alloc_bhp = NULL;
-alloc: /* Allocate new buffer header and data space. */
- if ((ret = __memp_alloc(dbmp,
- &dbmp->reginfo[n_cache], mfp, 0, NULL, &bhp)) != 0)
- goto err;
+ /*
+ * Initialize all the BH and hash bucket fields so we can call
+ * __memp_bhfree if an error occurs.
+ *
+ * Append the buffer to the tail of the bucket list and update
+ * the hash bucket's priority.
+ */
+ b_incr = 1;
+
+ memset(bhp, 0, sizeof(BH));
+ bhp->ref = 1;
+ bhp->priority = UINT32_T_MAX;
+ bhp->pgno = *pgnoaddr;
+ bhp->mf_offset = mf_offset;
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+ /* If we extended the file, make sure the page is never lost. */
+ if (extending) {
+ ++hp->hash_page_dirty;
+ F_SET(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+ }
- ++c_mp->stat.st_page_clean;
+ /*
+ * If we created the page, zero it out. If we didn't create
+ * the page, read from the backing file.
+ *
+ * !!!
+ * DB_MPOOL_NEW doesn't call the pgin function.
+ *
+ * If DB_MPOOL_CREATE is used, then the application's pgin
+ * function has to be able to handle pages of 0's -- if it
+ * uses DB_MPOOL_NEW, it can detect all of its page creates,
+ * and not bother.
+ *
+ * If we're running in diagnostic mode, smash any bytes on the
+ * page that are unknown quantities for the caller.
+ *
+ * Otherwise, read the page into memory, optionally creating it
+ * if DB_MPOOL_CREATE is set.
+ */
+ if (extending) {
+ if (mfp->clear_len == 0)
+ memset(bhp->buf, 0, mfp->stat.st_pagesize);
+ else {
+ memset(bhp->buf, 0, mfp->clear_len);
+#if defined(DIAGNOSTIC) || defined(UMRW)
+ memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,
+ mfp->stat.st_pagesize - mfp->clear_len);
+#endif
+ }
- /*
- * Initialize the BH fields so that we can call the __memp_bhfree
- * routine if an error occurs.
- */
- memset(bhp, 0, sizeof(BH));
- bhp->ref = 1;
- bhp->pgno = *pgnoaddr;
- bhp->mf_offset = mf_offset;
+ if (flags == DB_MPOOL_CREATE && mfp->ftype != 0)
+ F_SET(bhp, BH_CALLPGIN);
- /* Increment the count of buffers referenced by this MPOOLFILE. */
- ++mfp->block_cnt;
+ ++mfp->stat.st_page_create;
+ } else {
+ F_SET(bhp, BH_TRASH);
+ ++mfp->stat.st_cache_miss;
+ }
- /*
- * Prepend the bucket header to the head of the appropriate MPOOL
- * bucket hash list. Append the bucket header to the tail of the
- * MPOOL LRU chain.
- */
- SH_TAILQ_INSERT_HEAD(&dbht[n_bucket], bhp, hq, __bh);
- SH_TAILQ_INSERT_TAIL(&c_mp->bhq, bhp, q);
+ /* Increment buffer count referenced by MPOOLFILE. */
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->block_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
-#ifdef DIAGNOSTIC
- if ((db_alignp_t)bhp->buf & (sizeof(size_t) - 1)) {
- __db_err(dbenv, "Internal error: BH data NOT size_t aligned.");
- ret = EINVAL;
- __memp_bhfree(dbmp, bhp, 1);
- goto err;
+ /*
+ * Initialize the mutex. This is the last initialization step,
+ * because it's the only one that can fail, and everything else
+ * must be set up or we can't jump to the err label because it
+ * will call __memp_bhfree.
+ */
+ if ((ret = __db_mutex_setup(dbenv,
+ &dbmp->reginfo[n_cache], &bhp->mutex, 0)) != 0)
+ goto err;
}
-#endif
- if ((ret = __db_shmutex_init(dbenv, &bhp->mutex,
- R_OFFSET(dbmp->reginfo, &bhp->mutex) + DB_FCNTL_OFF_MPOOL,
- 0, &dbmp->reginfo[n_cache],
- (REGMAINT *)R_ADDR(&dbmp->reginfo[n_cache], c_mp->maint_off)))
- != 0) {
- __memp_bhfree(dbmp, bhp, 1);
- goto err;
+ DB_ASSERT(bhp->ref != 0);
+
+ /*
+ * If we're the only reference, update buffer and bucket priorities.
+ * We may be about to release the hash bucket lock, and everything
+ * should be correct, first. (We've already done this if we created
+ * the buffer, so there is no need to do it again.)
+ */
+ if (state != SECOND_MISS && bhp->ref == 1) {
+ bhp->priority = UINT32_T_MAX;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, bhp, hq, __bh);
+ SH_TAILQ_INSERT_TAIL(&hp->hash_bucket, bhp, hq);
+ hp->hash_priority =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
}
/*
- * If we created the page, zero it out and continue.
- *
- * !!!
- * Note: DB_MPOOL_NEW specifically doesn't call the pgin function.
- * If DB_MPOOL_CREATE is used, then the application's pgin function
- * has to be able to handle pages of 0's -- if it uses DB_MPOOL_NEW,
- * it can detect all of its page creates, and not bother.
+ * BH_TRASH --
+ * The buffer we found may need to be filled from the disk.
*
- * If we're running in diagnostic mode, smash any bytes on the
- * page that are unknown quantities for the caller.
- *
- * Otherwise, read the page into memory, optionally creating it if
- * DB_MPOOL_CREATE is set.
+ * It's possible for the read function to fail, which means we fail as
+ * well. Note, the __memp_pgread() function discards and reacquires
+ * the hash lock, so the buffer must be pinned down so that it cannot
+ * move and its contents are unchanged. Discard the buffer on failure
+ * unless another thread is waiting on our I/O to complete. It's OK to
+ * leave the buffer around, as the waiting thread will see the BH_TRASH
+ * flag set, and will also attempt to discard it. If there's a waiter,
+ * we need to decrement our reference count.
*/
- if (LF_ISSET(DB_MPOOL_NEW | DB_MPOOL_NEW_GROUP)) {
- if (mfp->clear_len == 0)
- memset(bhp->buf, 0, mfp->stat.st_pagesize);
- else {
- memset(bhp->buf, 0, mfp->clear_len);
-#ifdef DIAGNOSTIC
- memset(bhp->buf + mfp->clear_len, CLEAR_BYTE,
- mfp->stat.st_pagesize - mfp->clear_len);
-#endif
- }
+ if (F_ISSET(bhp, BH_TRASH) &&
+ (ret = __memp_pgread(dbmfp,
+ &hp->hash_mutex, bhp, LF_ISSET(DB_MPOOL_CREATE) ? 1 : 0)) != 0)
+ goto err;
- ++mfp->stat.st_page_create;
- } else {
- /*
- * It's possible for the read function to fail, which means
- * that we fail as well. Note, the __memp_pgread() function
- * discards the region lock, so the buffer must be pinned
- * down so that it cannot move and its contents are unchanged.
- */
-reread: if ((ret = __memp_pgread(dbmfp,
- bhp, LF_ISSET(DB_MPOOL_CREATE|DB_MPOOL_EXTENT))) != 0) {
- /*
- * !!!
- * Discard the buffer unless another thread is waiting
- * on our I/O to complete. Regardless, the header has
- * the BH_TRASH flag set.
- */
- if (bhp->ref == 1)
- __memp_bhfree(dbmp, bhp, 1);
+ /*
+ * BH_CALLPGIN --
+ * The buffer was processed for being written to disk, and now has
+ * to be re-converted for use.
+ */
+ if (F_ISSET(bhp, BH_CALLPGIN)) {
+ if ((ret = __memp_pg(dbmfp, bhp, 1)) != 0)
goto err;
- }
-
- ++mfp->stat.st_cache_miss;
+ F_CLR(bhp, BH_CALLPGIN);
}
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+#ifdef DIAGNOSTIC
+ /* Update the file's pinned reference count. */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ++dbmfp->pinref;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
/*
- * If we're returning a page after our current notion of the last-page,
- * update our information. Note, there's no way to un-instantiate this
- * page, it's going to exist whether it's returned to us dirty or not.
+ * We want to switch threads as often as possible, and at awkward
+ * times. Yield every time we get a new page to ensure contention.
*/
- if (bhp->pgno > mfp->last_pgno)
- mfp->last_pgno = bhp->pgno;
+ if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
+ __os_yield(dbenv, 1);
+#endif
*(void **)addrp = bhp->buf;
+ return (0);
-done: /* Update the chain search statistics. */
- if (st_hsearch) {
- ++c_mp->stat.st_hash_searches;
- if (st_hsearch > c_mp->stat.st_hash_longest)
- c_mp->stat.st_hash_longest = st_hsearch;
- c_mp->stat.st_hash_examined += st_hsearch;
+err: /*
+ * Discard our reference. If we're the only reference, discard the
+ * the buffer entirely. If we held a reference to a buffer, we are
+ * also still holding the hash bucket mutex.
+ */
+ if (b_incr) {
+ if (bhp->ref == 1)
+ (void)__memp_bhfree(dbmp, hp, bhp, 1);
+ else {
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+ }
}
- ++dbmfp->pinref;
+ /* If alloc_bhp is set, free the memory. */
+ if (alloc_bhp != NULL)
+ __db_shalloc_free(dbmp->reginfo[n_cache].addr, alloc_bhp);
- R_UNLOCK(dbenv, dbmp->reginfo);
+ return (ret);
+}
- return (0);
+#ifdef HAVE_FILESYSTEM_NOTZERO
+/*
+ * __memp_fs_notzero --
+ * Initialize the underlying allocated pages in the file.
+ */
+static int
+__memp_fs_notzero(dbenv, dbmfp, mfp, pgnoaddr)
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *dbmfp;
+ MPOOLFILE *mfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_IO db_io;
+ u_int32_t i, npages;
+ size_t nw;
+ int ret;
+ u_int8_t *page;
+ char *fail;
-err: /* Discard our reference. */
- if (b_incr)
- --bhp->ref;
- R_UNLOCK(dbenv, dbmp->reginfo);
+ /*
+ * Pages allocated by writing pages past end-of-file are not zeroed,
+ * on some systems. Recovery could theoretically be fooled by a page
+ * showing up that contained garbage. In order to avoid this, we
+ * have to write the pages out to disk, and flush them. The reason
+ * for the flush is because if we don't sync, the allocation of another
+ * page subsequent to this one might reach the disk first, and if we
+ * crashed at the right moment, leave us with this page as the one
+ * allocated by writing a page past it in the file.
+ *
+ * Hash is the only access method that allocates groups of pages. We
+ * know that it will use the existence of the last page in a group to
+ * signify that the entire group is OK; so, write all the pages but
+ * the last one in the group, flush them to disk, and then write the
+ * last one to disk and flush it.
+ */
+ if ((ret = __os_calloc(dbenv, 1, mfp->stat.st_pagesize, &page)) != 0)
+ return (ret);
+
+ db_io.fhp = dbmfp->fhp;
+ db_io.mutexp = dbmfp->mutexp;
+ db_io.pagesize = db_io.bytes = mfp->stat.st_pagesize;
+ db_io.buf = page;
+
+ npages = *pgnoaddr - mfp->last_pgno;
+ for (i = 1; i < npages; ++i) {
+ db_io.pgno = mfp->last_pgno + i;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ }
+ if (i != 1 && (ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+ goto err;
+ }
- *(void **)addrp = NULL;
+ db_io.pgno = mfp->last_pgno + npages;
+ if ((ret = __os_io(dbenv, &db_io, DB_IO_WRITE, &nw)) != 0) {
+ fail = "write";
+ goto err;
+ }
+ if ((ret = __os_fsync(dbenv, dbmfp->fhp)) != 0) {
+ fail = "sync";
+err: __db_err(dbenv, "%s: %s failed for page %lu",
+ __memp_fn(dbmfp), fail, (u_long)db_io.pgno);
+ }
+
+ __os_free(dbenv, page);
return (ret);
}
+#endif
diff --git a/bdb/mp/mp_fopen.c b/bdb/mp/mp_fopen.c
index 3611ded18f4..a91bf264652 100644
--- a/bdb/mp/mp_fopen.c
+++ b/bdb/mp/mp_fopen.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_fopen.c,v 11.41 2001/01/10 04:50:53 ubell Exp $";
+static const char revid[] = "$Id: mp_fopen.c,v 11.90 2002/08/26 15:22:01 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,211 +16,464 @@ static const char revid[] = "$Id: mp_fopen.c,v 11.41 2001/01/10 04:50:53 ubell E
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
-
-static int __memp_mf_open __P((DB_MPOOL *, const char *,
- size_t, db_pgno_t, DB_MPOOL_FINFO *, u_int32_t, MPOOLFILE **));
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
+
+static int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_fopen __P((DB_MPOOLFILE *,
+ const char *, u_int32_t, int, size_t));
+static void __memp_get_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static void __memp_last_pgno __P((DB_MPOOLFILE *, db_pgno_t *));
+static void __memp_refcnt __P((DB_MPOOLFILE *, db_pgno_t *));
+static int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
+static int __memp_set_fileid __P((DB_MPOOLFILE *, u_int8_t *));
+static int __memp_set_ftype __P((DB_MPOOLFILE *, int));
+static int __memp_set_lsn_offset __P((DB_MPOOLFILE *, int32_t));
+static int __memp_set_pgcookie __P((DB_MPOOLFILE *, DBT *));
+static int __memp_set_priority __P((DB_MPOOLFILE *, DB_CACHE_PRIORITY));
+static void __memp_set_unlink __P((DB_MPOOLFILE *, int));
+
+/* Initialization methods cannot be called after open is called. */
+#define MPF_ILLEGAL_AFTER_OPEN(dbmfp, name) \
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED)) \
+ return (__db_mi_open((dbmfp)->dbmp->dbenv, name, 1));
/*
- * MEMP_FREMOVE --
- * Discard an MPOOLFILE and any buffers it references: update the flags
- * so we never try to write buffers associated with the file, nor can we
- * find it when looking for files to join. In addition, clear the ftype
- * field, there's no reason to post-process pages, they can be discarded
- * by any thread.
- */
-#define MEMP_FREMOVE(mfp) { \
- mfp->ftype = 0; \
- F_SET(mfp, MP_DEADFILE); \
-}
-
-/*
- * memp_fopen --
- * Open a backing file for the memory pool.
+ * __memp_fcreate --
+ * Create a DB_MPOOLFILE handle.
+ *
+ * PUBLIC: int __memp_fcreate __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
*/
int
-memp_fopen(dbenv, path, flags, mode, pagesize, finfop, retp)
+__memp_fcreate(dbenv, retp, flags)
DB_ENV *dbenv;
- const char *path;
- u_int32_t flags;
- int mode;
- size_t pagesize;
- DB_MPOOL_FINFO *finfop;
DB_MPOOLFILE **retp;
+ u_int32_t flags;
{
DB_MPOOL *dbmp;
+ DB_MPOOLFILE *dbmfp;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fopen(dbenv, path, flags,
- mode, pagesize, finfop, retp));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_fcreate", DB_INIT_MPOOL);
dbmp = dbenv->mp_handle;
/* Validate arguments. */
- if ((ret = __db_fchk(dbenv, "memp_fopen", flags,
- DB_CREATE |
- DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE)) != 0)
+ if ((ret = __db_fchk(dbenv, "memp_fcreate", flags, 0)) != 0)
return (ret);
- /* Require a non-zero pagesize. */
- if (pagesize == 0 ||
- (finfop != NULL && finfop->clear_len > pagesize)) {
- __db_err(dbenv, "memp_fopen: illegal page size.");
- return (EINVAL);
+ /* Allocate and initialize the per-process structure. */
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0)
+ return (ret);
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_FH), &dbmfp->fhp)) != 0)
+ goto err;
+
+ /* Allocate and initialize a mutex if necessary. */
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmfp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
+
+ dbmfp->ref = 1;
+ dbmfp->lsn_offset = -1;
+ dbmfp->dbmp = dbmp;
+ dbmfp->mfp = INVALID_ROFF;
+
+ dbmfp->close = __memp_fclose;
+ dbmfp->get = __memp_fget;
+ dbmfp->get_fileid = __memp_get_fileid;
+ dbmfp->last_pgno = __memp_last_pgno;
+ dbmfp->open = __memp_fopen;
+ dbmfp->put = __memp_fput;
+ dbmfp->refcnt = __memp_refcnt;
+ dbmfp->set = __memp_fset;
+ dbmfp->set_clear_len = __memp_set_clear_len;
+ dbmfp->set_fileid = __memp_set_fileid;
+ dbmfp->set_ftype = __memp_set_ftype;
+ dbmfp->set_lsn_offset = __memp_set_lsn_offset;
+ dbmfp->set_pgcookie = __memp_set_pgcookie;
+ dbmfp->set_priority = __memp_set_priority;
+ dbmfp->set_unlink = __memp_set_unlink;
+ dbmfp->sync = __memp_fsync;
+
+ *retp = dbmfp;
+ return (0);
+
+err: if (dbmfp != NULL) {
+ if (dbmfp->fhp != NULL)
+ (void)__os_free(dbenv, dbmfp->fhp);
+ (void)__os_free(dbenv, dbmfp);
}
+ return (ret);
+}
- return (__memp_fopen(dbmp,
- NULL, path, flags, mode, pagesize, 1, finfop, retp));
+/*
+ * __memp_set_clear_len --
+ * Set the clear length.
+ */
+static int
+__memp_set_clear_len(dbmfp, clear_len)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t clear_len;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_clear_len");
+
+ dbmfp->clear_len = clear_len;
+ return (0);
}
/*
- * __memp_set_unlink -- set unlink on last close flag.
- *
- * PUBLIC: void __memp_set_unlink __P((DB_MPOOLFILE *));
+ * __memp_set_fileid --
+ * Set the file ID.
*/
-void
-__memp_set_unlink(dbmpf)
- DB_MPOOLFILE *dbmpf;
+static int
+__memp_set_fileid(dbmfp, fileid)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fileid;
{
- DB_MPOOL *dbmp;
- dbmp = dbmpf->dbmp;
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_fileid");
- R_LOCK(dbmp->dbenv, dbmp->reginfo);
- F_SET(dbmpf->mfp, MP_UNLINK);
- R_UNLOCK(dbmp->dbenv, dbmp->reginfo);
+ /*
+ * XXX
+ * This is dangerous -- we're saving the caller's pointer instead
+ * of allocating memory and copying the contents.
+ */
+ dbmfp->fileid = fileid;
+ return (0);
}
/*
- * __memp_clear_unlink -- clear unlink on last close flag.
- *
- * PUBLIC: void __memp_clear_unlink __P((DB_MPOOLFILE *));
+ * __memp_set_ftype --
+ * Set the file type (as registered).
*/
-void
-__memp_clear_unlink(dbmpf)
- DB_MPOOLFILE *dbmpf;
+static int
+__memp_set_ftype(dbmfp, ftype)
+ DB_MPOOLFILE *dbmfp;
+ int ftype;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_ftype");
+
+ dbmfp->ftype = ftype;
+ return (0);
+}
+
+/*
+ * __memp_set_lsn_offset --
+ * Set the page's LSN offset.
+ */
+static int
+__memp_set_lsn_offset(dbmfp, lsn_offset)
+ DB_MPOOLFILE *dbmfp;
+ int32_t lsn_offset;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_lsn_offset");
+
+ dbmfp->lsn_offset = lsn_offset;
+ return (0);
+}
+
+/*
+ * __memp_set_pgcookie --
+ * Set the pgin/pgout cookie.
+ */
+static int
+__memp_set_pgcookie(dbmfp, pgcookie)
+ DB_MPOOLFILE *dbmfp;
+ DBT *pgcookie;
+{
+ MPF_ILLEGAL_AFTER_OPEN(dbmfp, "set_pgcookie");
+
+ dbmfp->pgcookie = pgcookie;
+ return (0);
+}
+
+/*
+ * __memp_set_priority --
+ * Set the cache priority for pages from this file.
+ */
+static int
+__memp_set_priority(dbmfp, priority)
+ DB_MPOOLFILE *dbmfp;
+ DB_CACHE_PRIORITY priority;
+{
+ switch (priority) {
+ case DB_PRIORITY_VERY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_LOW;
+ break;
+ case DB_PRIORITY_LOW:
+ dbmfp->mfp->priority = MPOOL_PRI_LOW;
+ break;
+ case DB_PRIORITY_DEFAULT:
+ dbmfp->mfp->priority = MPOOL_PRI_DEFAULT;
+ break;
+ case DB_PRIORITY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_HIGH;
+ break;
+ case DB_PRIORITY_VERY_HIGH:
+ dbmfp->mfp->priority = MPOOL_PRI_VERY_HIGH;
+ break;
+ default:
+ __db_err(dbmfp->dbmp->dbenv,
+ "Unknown priority value: %d", priority);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * __memp_fopen --
+ * Open a backing file for the memory pool.
+ */
+static int
+__memp_fopen(dbmfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
+ const char *path;
+ u_int32_t flags;
+ int mode;
+ size_t pagesize;
{
+ DB_ENV *dbenv;
DB_MPOOL *dbmp;
- dbmp = dbmpf->dbmp;
+ int ret;
+
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /* Validate arguments. */
+ if ((ret = __db_fchk(dbenv, "memp_fopen", flags,
+ DB_CREATE | DB_DIRECT | DB_EXTENT |
+ DB_NOMMAP | DB_ODDFILESIZE | DB_RDONLY | DB_TRUNCATE)) != 0)
+ return (ret);
/*
- * This bit is protected in the queue code because the metapage
- * is locked so we can avoid geting the region lock.
- * If this gets used from other than the queue code, we cannot.
+ * Require a non-zero, power-of-two pagesize, smaller than the
+ * clear length.
*/
- if (!F_ISSET(dbmpf->mfp, MP_UNLINK))
- return;
- R_LOCK(dbmp->dbenv, dbmp->reginfo);
- F_CLR(dbmpf->mfp, MP_UNLINK);
- R_UNLOCK(dbmp->dbenv, dbmp->reginfo);
+ if (pagesize == 0 || !POWER_OF_TWO(pagesize)) {
+ __db_err(dbenv,
+ "memp_fopen: page sizes must be a power-of-2");
+ return (EINVAL);
+ }
+ if (dbmfp->clear_len > pagesize) {
+ __db_err(dbenv,
+ "memp_fopen: clear length larger than page size");
+ return (EINVAL);
+ }
+
+ /* Read-only checks, and local flag. */
+ if (LF_ISSET(DB_RDONLY) && path == NULL) {
+ __db_err(dbenv,
+ "memp_fopen: temporary files can't be readonly");
+ return (EINVAL);
+ }
+
+ return (__memp_fopen_int(dbmfp, NULL, path, flags, mode, pagesize));
}
/*
- * __memp_fopen --
+ * __memp_fopen_int --
* Open a backing file for the memory pool; internal version.
*
- * PUBLIC: int __memp_fopen __P((DB_MPOOL *, MPOOLFILE *, const char *,
- * PUBLIC: u_int32_t, int, size_t, int, DB_MPOOL_FINFO *, DB_MPOOLFILE **));
+ * PUBLIC: int __memp_fopen_int __P((DB_MPOOLFILE *,
+ * PUBLIC: MPOOLFILE *, const char *, u_int32_t, int, size_t));
*/
int
-__memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
- DB_MPOOL *dbmp;
+__memp_fopen_int(dbmfp, mfp, path, flags, mode, pagesize)
+ DB_MPOOLFILE *dbmfp;
MPOOLFILE *mfp;
const char *path;
u_int32_t flags;
- int mode, needlock;
+ int mode;
size_t pagesize;
- DB_MPOOL_FINFO *finfop;
- DB_MPOOLFILE **retp;
{
DB_ENV *dbenv;
- DB_MPOOLFILE *dbmfp;
- DB_MPOOL_FINFO finfo;
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
db_pgno_t last_pgno;
size_t maxmap;
u_int32_t mbytes, bytes, oflags;
- int ret;
+ int mfp_alloc, ret;
u_int8_t idbuf[DB_FILE_ID_LEN];
char *rpath;
+ void *p;
+ dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
- ret = 0;
+ mp = dbmp->reginfo[0].primary;
+ mfp_alloc = ret = 0;
rpath = NULL;
/*
- * If mfp is provided, we take the DB_MPOOL_FINFO information from
- * the mfp. We don't bother initializing everything, because some
- * of them are expensive to acquire. If no mfp is provided and the
- * finfop argument is NULL, we default the values.
+ * Set the page size so os_open can decide whether to turn buffering
+ * off if the DB_DIRECT_DB flag is set.
*/
- if (finfop == NULL) {
- memset(&finfo, 0, sizeof(finfo));
- if (mfp != NULL) {
- finfo.ftype = mfp->ftype;
- finfo.pgcookie = NULL;
- finfo.fileid = NULL;
- finfo.lsn_offset = mfp->lsn_off;
- finfo.clear_len = mfp->clear_len;
- } else {
- finfo.ftype = 0;
- finfo.pgcookie = NULL;
- finfo.fileid = NULL;
- finfo.lsn_offset = -1;
- finfo.clear_len = 0;
- }
- finfop = &finfo;
- }
+ dbmfp->fhp->pagesize = (u_int32_t)pagesize;
- /* Allocate and initialize the per-process structure. */
- if ((ret = __os_calloc(dbenv, 1, sizeof(DB_MPOOLFILE), &dbmfp)) != 0)
- return (ret);
- dbmfp->dbmp = dbmp;
- dbmfp->ref = 1;
- if (LF_ISSET(DB_RDONLY))
+ /*
+ * If it's a temporary file, delay the open until we actually need
+ * to write the file, and we know we can't join any existing files.
+ */
+ if (path == NULL)
+ goto alloc;
+
+ /*
+ * Get the real name for this file and open it. If it's a Queue extent
+ * file, it may not exist, and that's OK.
+ */
+ oflags = 0;
+ if (LF_ISSET(DB_CREATE))
+ oflags |= DB_OSO_CREATE;
+ if (LF_ISSET(DB_DIRECT))
+ oflags |= DB_OSO_DIRECT;
+ if (LF_ISSET(DB_RDONLY)) {
F_SET(dbmfp, MP_READONLY);
+ oflags |= DB_OSO_RDONLY;
+ }
+ if ((ret =
+ __db_appname(dbenv, DB_APP_DATA, path, 0, NULL, &rpath)) != 0)
+ goto err;
+ if ((ret = __os_open(dbenv, rpath, oflags, mode, dbmfp->fhp)) != 0) {
+ if (!LF_ISSET(DB_EXTENT))
+ __db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
+ goto err;
+ }
- if (path == NULL) {
- if (LF_ISSET(DB_RDONLY)) {
- __db_err(dbenv,
- "memp_fopen: temporary files can't be readonly");
- ret = EINVAL;
+ /*
+ * Get the file id if we weren't given one. Generated file id's
+ * don't use timestamps, otherwise there'd be no chance of any
+ * other process joining the party.
+ */
+ if (dbmfp->fileid == NULL) {
+ if ((ret = __os_fileid(dbenv, rpath, 0, idbuf)) != 0)
goto err;
+ dbmfp->fileid = idbuf;
+ }
+
+ /*
+ * If our caller knows what mfp we're using, increment the ref count,
+ * no need to search.
+ *
+ * We don't need to acquire a lock other than the mfp itself, because
+ * we know there's another reference and it's not going away.
+ */
+ if (mfp != NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ goto check_map;
+ }
+
+ /*
+ * If not creating a temporary file, walk the list of MPOOLFILE's,
+ * looking for a matching file. Files backed by temporary files
+ * or previously removed files can't match.
+ *
+ * DB_TRUNCATE support.
+ *
+ * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
+ * pair) plus a timestamp. If files are removed and created in less
+ * than a second, the fileID can be repeated. The problem with
+ * repetition happens when the file that previously had the fileID
+ * value still has pages in the pool, since we don't want to use them
+ * to satisfy requests for the new file.
+ *
+ * Because the DB_TRUNCATE flag reuses the dev/inode pair, repeated
+ * opens with that flag set guarantees matching fileIDs when the
+ * machine can open a file and then re-open with truncate within a
+ * second. For this reason, we pass that flag down, and, if we find
+ * a matching entry, we ensure that it's never found again, and we
+ * create a new entry for the current request.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Skip dead files and temporary files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Skip non-matching files. */
+ if (memcmp(dbmfp->fileid, R_ADDR(dbmp->reginfo,
+ mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /*
+ * If the file is being truncated, remove it from the system
+ * and create a new entry.
+ *
+ * !!!
+ * We should be able to set mfp to NULL and break out of the
+ * loop, but I like the idea of checking all the entries.
+ */
+ if (LF_ISSET(DB_TRUNCATE)) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ continue;
}
- last_pgno = 0;
- } else {
- /* Get the real name for this file and open it. */
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, path, 0, NULL, &rpath)) != 0)
- goto err;
- oflags = 0;
- if (LF_ISSET(DB_CREATE))
- oflags |= DB_OSO_CREATE;
- if (LF_ISSET(DB_RDONLY))
- oflags |= DB_OSO_RDONLY;
- if ((ret =
- __os_open(dbenv, rpath, oflags, mode, &dbmfp->fh)) != 0) {
- if (!LF_ISSET(DB_EXTENT))
- __db_err(dbenv,
- "%s: %s", rpath, db_strerror(ret));
+
+ /*
+ * Some things about a file cannot be changed: the clear length,
+ * page size, or lSN location.
+ *
+ * The file type can change if the application's pre- and post-
+ * processing needs change. For example, an application that
+ * created a hash subdatabase in a database that was previously
+ * all btree.
+ *
+ * XXX
+ * We do not check to see if the pgcookie information changed,
+ * or update it if it is, this might be a bug.
+ */
+ if (dbmfp->clear_len != mfp->clear_len ||
+ pagesize != mfp->stat.st_pagesize ||
+ dbmfp->lsn_offset != mfp->lsn_off) {
+ __db_err(dbenv,
+ "%s: clear length, page size or LSN location changed",
+ path);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ ret = EINVAL;
goto err;
}
+ if (dbmfp->ftype != 0)
+ mfp->ftype = dbmfp->ftype;
+
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ ++mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ break;
+ }
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ if (mfp != NULL)
+ goto check_map;
+
+alloc: /* Allocate and initialize a new MPOOLFILE. */
+ if ((ret = __memp_alloc(
+ dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
+ goto err;
+ mfp_alloc = 1;
+ memset(mfp, 0, sizeof(MPOOLFILE));
+ mfp->mpf_cnt = 1;
+ mfp->ftype = dbmfp->ftype;
+ mfp->stat.st_pagesize = pagesize;
+ mfp->lsn_off = dbmfp->lsn_offset;
+ mfp->clear_len = dbmfp->clear_len;
+
+ if (LF_ISSET(DB_DIRECT))
+ F_SET(mfp, MP_DIRECT);
+ if (LF_ISSET(DB_EXTENT))
+ F_SET(mfp, MP_EXTENT);
+
+ if (path == NULL)
+ F_SET(mfp, MP_TEMP);
+ else {
/*
* Don't permit files that aren't a multiple of the pagesize,
* and find the number of the last page in the file, all the
@@ -234,93 +487,84 @@ __memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
* environments where an off_t is 32-bits, but still run where
* offsets are 64-bits, and they pay us a lot of money.
*/
- if ((ret = __os_ioinfo(dbenv, rpath,
- &dbmfp->fh, &mbytes, &bytes, NULL)) != 0) {
+ if ((ret = __os_ioinfo(
+ dbenv, rpath, dbmfp->fhp, &mbytes, &bytes, NULL)) != 0) {
__db_err(dbenv, "%s: %s", rpath, db_strerror(ret));
goto err;
}
/*
- * If we're doing a verify, we might have to cope with
- * a truncated file; if the file size is not a multiple
- * of the page size, round down to a page--we'll
- * take care of the partial page outside the memp system.
+ * During verify or recovery, we might have to cope with a
+ * truncated file; if the file size is not a multiple of the
+ * page size, round down to a page, we'll take care of the
+ * partial page outside the mpool system.
*/
-
- /* Page sizes have to be a power-of-two, ignore mbytes. */
if (bytes % pagesize != 0) {
if (LF_ISSET(DB_ODDFILESIZE))
- /*
- * If we're doing a verify, we might
- * have to cope with a truncated file;
- * round down, we'll worry about the partial
- * page outside the memp system.
- */
- bytes -= (bytes % pagesize);
+ bytes -= (u_int32_t)(bytes % pagesize);
else {
__db_err(dbenv,
- "%s: file size not a multiple of the pagesize",
- rpath);
+ "%s: file size not a multiple of the pagesize", rpath);
ret = EINVAL;
goto err;
}
}
- last_pgno = mbytes * (MEGABYTE / pagesize);
- last_pgno += bytes / pagesize;
-
- /* Correction: page numbers are zero-based, not 1-based. */
+ /*
+ * If the user specifies DB_MPOOL_LAST or DB_MPOOL_NEW on a
+ * page get, we have to increment the last page in the file.
+ * Figure it out and save it away.
+ *
+ * Note correction: page numbers are zero-based, not 1-based.
+ */
+ last_pgno = (db_pgno_t)(mbytes * (MEGABYTE / pagesize));
+ last_pgno += (db_pgno_t)(bytes / pagesize);
if (last_pgno != 0)
--last_pgno;
+ mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
- /*
- * Get the file id if we weren't given one. Generated file id's
- * don't use timestamps, otherwise there'd be no chance of any
- * other process joining the party.
- */
- if (finfop->fileid == NULL) {
- if ((ret = __os_fileid(dbenv, rpath, 0, idbuf)) != 0)
- goto err;
- finfop->fileid = idbuf;
- }
- }
+ /* Copy the file path into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
+ goto err;
+ memcpy(p, path, strlen(path) + 1);
- /*
- * If we weren't provided an underlying shared object to join with,
- * find/allocate the shared file objects. Also allocate space for
- * for the per-process thread lock.
- */
- if (needlock)
- R_LOCK(dbenv, dbmp->reginfo);
- if (mfp == NULL)
- ret = __memp_mf_open(
- dbmp, path, pagesize, last_pgno, finfop, flags, &mfp);
- else {
- ++mfp->mpf_cnt;
- ret = 0;
+ /* Copy the file identification string into shared memory. */
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
+ goto err;
+ memcpy(p, dbmfp->fileid, DB_FILE_ID_LEN);
}
- if (needlock)
- R_UNLOCK(dbenv, dbmp->reginfo);
- if (ret != 0)
- goto err;
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if ((ret = __db_mutex_alloc(
- dbenv, dbmp->reginfo, &dbmfp->mutexp)) != 0)
- goto err;
- if ((ret = __db_mutex_init(
- dbenv, dbmfp->mutexp, 0, MUTEX_THREAD)) != 0)
+ /* Copy the page cookie into shared memory. */
+ if (dbmfp->pgcookie == NULL || dbmfp->pgcookie->size == 0) {
+ mfp->pgcookie_len = 0;
+ mfp->pgcookie_off = 0;
+ } else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, dbmfp->pgcookie->size, &mfp->pgcookie_off, &p)) != 0)
goto err;
-
- /* XXX: KEITH: CLOSE THE FILE ON FAILURE? */
+ memcpy(p, dbmfp->pgcookie->data, dbmfp->pgcookie->size);
+ mfp->pgcookie_len = dbmfp->pgcookie->size;
}
- dbmfp->mfp = mfp;
+ /*
+ * Prepend the MPOOLFILE to the list of MPOOLFILE's.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ ret = __db_mutex_setup(dbenv, dbmp->reginfo, &mfp->mutex,
+ MUTEX_NO_RLOCK);
+ if (ret == 0)
+ SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ goto err;
+check_map:
/*
* If a file:
- * + is read-only
* + isn't temporary
+ * + is read-only
* + doesn't require any pgin/pgout support
* + the DB_NOMMAP flag wasn't set (in either the file open or
* the environment in which it was opened)
@@ -332,7 +576,6 @@ __memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
* NFS mounted partition, and we can fail in buffer I/O just as easily
* as here.
*
- * XXX
* We'd like to test to see if the file is too big to mmap. Since we
* don't know what size or type off_t's or size_t's are, or the largest
* unsigned integral type is, or what random insanity the local C
@@ -341,11 +584,11 @@ __memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
*/
#define DB_MAXMMAPSIZE (10 * 1024 * 1024) /* 10 Mb. */
if (F_ISSET(mfp, MP_CAN_MMAP)) {
- if (!F_ISSET(dbmfp, MP_READONLY))
- F_CLR(mfp, MP_CAN_MMAP);
if (path == NULL)
F_CLR(mfp, MP_CAN_MMAP);
- if (finfop->ftype != 0)
+ if (!F_ISSET(dbmfp, MP_READONLY))
+ F_CLR(mfp, MP_CAN_MMAP);
+ if (dbmfp->ftype != 0)
F_CLR(mfp, MP_CAN_MMAP);
if (LF_ISSET(DB_NOMMAP) || F_ISSET(dbenv, DB_ENV_NOMMAP))
F_CLR(mfp, MP_CAN_MMAP);
@@ -354,260 +597,239 @@ __memp_fopen(dbmp, mfp, path, flags, mode, pagesize, needlock, finfop, retp)
if (mbytes > maxmap / MEGABYTE ||
(mbytes == maxmap / MEGABYTE && bytes >= maxmap % MEGABYTE))
F_CLR(mfp, MP_CAN_MMAP);
- }
- dbmfp->addr = NULL;
- if (F_ISSET(mfp, MP_CAN_MMAP)) {
- dbmfp->len = (size_t)mbytes * MEGABYTE + bytes;
- if (__os_mapfile(dbenv, rpath,
- &dbmfp->fh, dbmfp->len, 1, &dbmfp->addr) != 0) {
- dbmfp->addr = NULL;
- F_CLR(mfp, MP_CAN_MMAP);
+
+ dbmfp->addr = NULL;
+ if (F_ISSET(mfp, MP_CAN_MMAP)) {
+ dbmfp->len = (size_t)mbytes * MEGABYTE + bytes;
+ if (__os_mapfile(dbenv, rpath,
+ dbmfp->fhp, dbmfp->len, 1, &dbmfp->addr) != 0) {
+ dbmfp->addr = NULL;
+ F_CLR(mfp, MP_CAN_MMAP);
+ }
}
}
- if (rpath != NULL)
- __os_freestr(rpath);
+ dbmfp->mfp = mfp;
+
+ F_SET(dbmfp, MP_OPEN_CALLED);
+
+ /* Add the file to the process' list of DB_MPOOLFILEs. */
MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
TAILQ_INSERT_TAIL(&dbmp->dbmfq, dbmfp, q);
MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
- *retp = dbmfp;
- return (0);
+ if (0) {
+err: if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, dbmfp->fhp);
+
+ if (mfp_alloc) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (mfp->path_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->path_off));
+ if (mfp->fileid_off != 0)
+ __db_shalloc_free(dbmp->reginfo[0].addr,
+ R_ADDR(dbmp->reginfo, mfp->fileid_off));
+ __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ }
-err: /*
- * Note that we do not have to free the thread mutex, because we
- * never get to here after we have successfully allocated it.
- */
- if (rpath != NULL)
- __os_freestr(rpath);
- if (F_ISSET(&dbmfp->fh, DB_FH_VALID))
- (void)__os_closehandle(&dbmfp->fh);
- if (dbmfp != NULL) {
- if (dbmfp->mutexp != NULL)
- __db_mutex_free(dbenv, dbmp->reginfo, dbmfp->mutexp);
- __os_free(dbmfp, sizeof(DB_MPOOLFILE));
}
+ if (rpath != NULL)
+ __os_free(dbenv, rpath);
return (ret);
}
/*
- * __memp_mf_open --
- * Open an MPOOLFILE.
+ * __memp_get_fileid --
+ * Return the file ID.
+ *
+ * XXX
+ * Undocumented interface: DB private.
*/
-static int
-__memp_mf_open(dbmp, path, pagesize, last_pgno, finfop, flags, retp)
- DB_MPOOL *dbmp;
- const char *path;
- size_t pagesize;
- db_pgno_t last_pgno;
- DB_MPOOL_FINFO *finfop;
- u_int32_t flags;
- MPOOLFILE **retp;
+static void
+__memp_get_fileid(dbmfp, fidp)
+ DB_MPOOLFILE *dbmfp;
+ u_int8_t *fidp;
{
- MPOOL *mp;
- MPOOLFILE *mfp;
- int ret;
- void *p;
-
-#define ISTEMPORARY (path == NULL)
-
/*
- * If not creating a temporary file, walk the list of MPOOLFILE's,
- * looking for a matching file. Files backed by temporary files
- * or previously removed files can't match.
+ * No lock needed -- we're using the handle, it had better not
+ * be going away.
*
- * DB_TRUNCATE support.
- *
- * The fileID is a filesystem unique number (e.g., a UNIX dev/inode
- * pair) plus a timestamp. If files are removed and created in less
- * than a second, the fileID can be repeated. The problem with
- * repetition happens when the file that previously had the fileID
- * value still has pages in the pool, since we don't want to use them
- * to satisfy requests for the new file.
- *
- * Because the DB_TRUNCATE flag reuses the dev/inode pair, repeated
- * opens with that flag set guarantees matching fileIDs when the
- * machine can open a file and then re-open with truncate within a
- * second. For this reason, we pass that flag down, and, if we find
- * a matching entry, we ensure that it's never found again, and we
- * create a new entry for the current request.
+ * !!!
+ * Get the fileID out of the region, not out of the DB_MPOOLFILE
+ * structure because the DB_MPOOLFILE reference is possibly short
+ * lived, and isn't to be trusted.
*/
- if (!ISTEMPORARY) {
- mp = dbmp->reginfo[0].primary;
- for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
- mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
- if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
- continue;
- if (memcmp(finfop->fileid, R_ADDR(dbmp->reginfo,
- mfp->fileid_off), DB_FILE_ID_LEN) == 0) {
- if (LF_ISSET(DB_TRUNCATE)) {
- MEMP_FREMOVE(mfp);
- continue;
- }
- if (finfop->clear_len != mfp->clear_len ||
- pagesize != mfp->stat.st_pagesize) {
- __db_err(dbmp->dbenv,
- "%s: page size or clear length changed",
- path);
- return (EINVAL);
- }
-
- /*
- * It's possible that our needs for pre- and
- * post-processing are changing. For example,
- * an application created a hash subdatabase
- * in a database that was previously all btree.
- */
- if (finfop->ftype != 0)
- mfp->ftype = finfop->ftype;
-
- ++mfp->mpf_cnt;
-
- *retp = mfp;
- return (0);
- }
- }
- }
+ memcpy(fidp, R_ADDR(
+ dbmfp->dbmp->reginfo, dbmfp->mfp->fileid_off), DB_FILE_ID_LEN);
+}
- /* Allocate a new MPOOLFILE. */
- if ((ret = __memp_alloc(
- dbmp, dbmp->reginfo, NULL, sizeof(MPOOLFILE), NULL, &mfp)) != 0)
- goto mem_err;
- *retp = mfp;
+/*
+ * __memp_last_pgno --
+ * Return the page number of the last page in the file.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_last_pgno(dbmfp, pgnoaddr)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *pgnoaddr;
+{
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
- /* Initialize the structure. */
- memset(mfp, 0, sizeof(MPOOLFILE));
- mfp->mpf_cnt = 1;
- mfp->ftype = finfop->ftype;
- mfp->lsn_off = finfop->lsn_offset;
- mfp->clear_len = finfop->clear_len;
+ dbmp = dbmfp->dbmp;
+ dbenv = dbmp->dbenv;
- /*
- * If the user specifies DB_MPOOL_LAST or DB_MPOOL_NEW on a memp_fget,
- * we have to know the last page in the file. Figure it out and save
- * it away.
- */
- mfp->stat.st_pagesize = pagesize;
- mfp->orig_last_pgno = mfp->last_pgno = last_pgno;
+ R_LOCK(dbenv, dbmp->reginfo);
+ *pgnoaddr = dbmfp->mfp->last_pgno;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+}
- if (ISTEMPORARY)
- F_SET(mfp, MP_TEMP);
- else {
- /* Copy the file path into shared memory. */
- if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
- NULL, strlen(path) + 1, &mfp->path_off, &p)) != 0)
- goto err;
- memcpy(p, path, strlen(path) + 1);
+/*
+ * __memp_refcnt --
+ * Return the current reference count.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_refcnt(dbmfp, cntp)
+ DB_MPOOLFILE *dbmfp;
+ db_pgno_t *cntp;
+{
+ DB_ENV *dbenv;
- /* Copy the file identification string into shared memory. */
- if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
- NULL, DB_FILE_ID_LEN, &mfp->fileid_off, &p)) != 0)
- goto err;
- memcpy(p, finfop->fileid, DB_FILE_ID_LEN);
+ dbenv = dbmfp->dbmp->dbenv;
- F_SET(mfp, MP_CAN_MMAP);
- }
+ MUTEX_LOCK(dbenv, &dbmfp->mfp->mutex);
+ *cntp = dbmfp->mfp->mpf_cnt;
+ MUTEX_UNLOCK(dbenv, &dbmfp->mfp->mutex);
+}
- /* Copy the page cookie into shared memory. */
- if (finfop->pgcookie == NULL || finfop->pgcookie->size == 0) {
- mfp->pgcookie_len = 0;
- mfp->pgcookie_off = 0;
- } else {
- if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
- NULL, finfop->pgcookie->size, &mfp->pgcookie_off, &p)) != 0)
- goto err;
- memcpy(p, finfop->pgcookie->data, finfop->pgcookie->size);
- mfp->pgcookie_len = finfop->pgcookie->size;
- }
+/*
+ * __memp_set_unlink --
+ * Set unlink on last close flag.
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+static void
+__memp_set_unlink(dbmpf, set)
+ DB_MPOOLFILE *dbmpf;
+ int set;
+{
+ DB_ENV *dbenv;
- /* Prepend the MPOOLFILE to the list of MPOOLFILE's. */
- mp = dbmp->reginfo[0].primary;
- SH_TAILQ_INSERT_HEAD(&mp->mpfq, mfp, q, __mpoolfile);
+ dbenv = dbmpf->dbmp->dbenv;
- if (0) {
-err: if (mfp->path_off != 0)
- __db_shalloc_free(dbmp->reginfo[0].addr,
- R_ADDR(dbmp->reginfo, mfp->path_off));
- if (mfp->fileid_off != 0)
- __db_shalloc_free(dbmp->reginfo[0].addr,
- R_ADDR(dbmp->reginfo, mfp->fileid_off));
- if (mfp != NULL)
- __db_shalloc_free(dbmp->reginfo[0].addr, mfp);
-mem_err: __db_err(dbmp->dbenv,
- "Unable to allocate memory for mpool file");
- }
- return (ret);
+ MUTEX_LOCK(dbenv, &dbmpf->mfp->mutex);
+ if (set)
+ F_SET(dbmpf->mfp, MP_UNLINK);
+ else
+ F_CLR(dbmpf->mfp, MP_UNLINK);
+ MUTEX_UNLOCK(dbenv, &dbmpf->mfp->mutex);
}
/*
* memp_fclose --
* Close a backing file for the memory pool.
*/
+static int
+__memp_fclose(dbmfp, flags)
+ DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret, t_ret;
+
+ dbenv = dbmfp->dbmp->dbenv;
+
+ PANIC_CHECK(dbenv);
+
+ /*
+ * XXX
+ * DB_MPOOL_DISCARD: Undocumented flag: DB private.
+ */
+ ret = __db_fchk(dbenv, "DB_MPOOLFILE->close", flags, DB_MPOOL_DISCARD);
+
+ if ((t_ret = __memp_fclose_int(dbmfp, flags)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __memp_fclose_int --
+ * Internal version of __memp_fclose.
+ *
+ * PUBLIC: int __memp_fclose_int __P((DB_MPOOLFILE *, u_int32_t));
+ */
int
-memp_fclose(dbmfp)
+__memp_fclose_int(dbmfp, flags)
DB_MPOOLFILE *dbmfp;
+ u_int32_t flags;
{
DB_ENV *dbenv;
DB_MPOOL *dbmp;
MPOOLFILE *mfp;
char *rpath;
- int ret, t_ret;
+ int deleted, ret, t_ret;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
ret = 0;
- PANIC_CHECK(dbenv);
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fclose(dbmfp));
-#endif
-
/*
- * Remove the DB_MPOOLFILE from the queue. This has to happen before
- * we perform any action that can fail, otherwise __memp_close may
- * loop infinitely when calling us to discard all of the DB_MPOOLFILEs.
+ * We have to reference count DB_MPOOLFILE structures as other threads
+ * in the process may be using them. Here's the problem:
+ *
+ * Thread A opens a database.
+ * Thread B uses thread A's DB_MPOOLFILE to write a buffer
+ * in order to free up memory in the mpool cache.
+ * Thread A closes the database while thread B is using the
+ * DB_MPOOLFILE structure.
+ *
+ * By opening all databases before creating any threads, and closing
+ * the databases after all the threads have exited, applications get
+ * better performance and avoid the problem path entirely.
+ *
+ * Regardless, holding the DB_MPOOLFILE to flush a dirty buffer is a
+ * short-term lock, even in worst case, since we better be the only
+ * thread of control using the DB_MPOOLFILE structure to read pages
+ * *into* the cache. Wait until we're the only reference holder and
+ * remove the DB_MPOOLFILE structure from the list, so nobody else can
+ * find it. We do this, rather than have the last reference holder
+ * (whoever that might be) discard the DB_MPOOLFILE structure, because
+ * we'd rather write error messages to the application in the close
+ * routine, not in the checkpoint/sync routine.
+ *
+ * !!!
+ * It's possible the DB_MPOOLFILE was never added to the DB_MPOOLFILE
+ * file list, check the DB_OPEN_CALLED flag to be sure.
*/
- for (;;) {
+ for (deleted = 0;;) {
MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
-
- /*
- * We have to reference count DB_MPOOLFILE structures as other
- * threads may be using them. The problem only happens if the
- * application makes a bad design choice. Here's the path:
- *
- * Thread A opens a database.
- * Thread B uses thread A's DB_MPOOLFILE to write a buffer
- * in order to free up memory in the mpool cache.
- * Thread A closes the database while thread B is using the
- * DB_MPOOLFILE structure.
- *
- * By opening all databases before creating the threads, and
- * closing them after the threads have exited, applications
- * get better performance and avoid the problem path entirely.
- *
- * Regardless, holding the DB_MPOOLFILE to flush a dirty buffer
- * is a short-term lock, even in worst case, since we better be
- * the only thread of control using the DB_MPOOLFILE structure
- * to read pages *into* the cache. Wait until we're the only
- * reference holder and remove the DB_MPOOLFILE structure from
- * the list, so nobody else can even find it.
- */
if (dbmfp->ref == 1) {
- TAILQ_REMOVE(&dbmp->dbmfq, dbmfp, q);
- break;
+ if (F_ISSET(dbmfp, MP_OPEN_CALLED))
+ TAILQ_REMOVE(&dbmp->dbmfq, dbmfp, q);
+ deleted = 1;
}
MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
- (void)__os_sleep(dbenv, 1, 0);
+ if (deleted)
+ break;
+ __os_sleep(dbenv, 1, 0);
}
- MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
/* Complain if pinned blocks never returned. */
- if (dbmfp->pinref != 0)
+ if (dbmfp->pinref != 0) {
__db_err(dbenv, "%s: close: %lu blocks left pinned",
__memp_fn(dbmfp), (u_long)dbmfp->pinref);
+ ret = __db_panic(dbenv, DB_RUNRECOVERY);
+ }
/* Discard any mmap information. */
if (dbmfp->addr != NULL &&
@@ -615,11 +837,11 @@ memp_fclose(dbmfp)
__db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(ret));
/* Close the file; temporary files may not yet have been created. */
- if (F_ISSET(&dbmfp->fh, DB_FH_VALID) &&
- (t_ret = __os_closehandle(&dbmfp->fh)) != 0) {
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID) &&
+ (t_ret = __os_closehandle(dbenv, dbmfp->fhp)) != 0) {
__db_err(dbenv, "%s: %s", __memp_fn(dbmfp), db_strerror(t_ret));
- if (ret != 0)
- t_ret = ret;
+ if (ret == 0)
+ ret = t_ret;
}
/* Discard the thread mutex. */
@@ -628,38 +850,51 @@ memp_fclose(dbmfp)
/*
* Discard our reference on the the underlying MPOOLFILE, and close
- * it if it's no longer useful to anyone.
- *
- * If we're not discarding it, and it's a temp file, this means
- * all the outstanding references belong to unflushed buffers.
- * (A temp file can only be referenced by one DB_MPOOLFILE).
- * We don't care about preserving any of those buffers, so mark
- * the MPOOLFILE as dead so that when we try to flush them,
- * even the dirty ones just get discarded.
+ * it if it's no longer useful to anyone. It possible the open of
+ * the file never happened or wasn't successful, in which case, mpf
+ * will be NULL;
*/
- R_LOCK(dbenv, dbmp->reginfo);
- mfp = dbmfp->mfp;
- if (--mfp->mpf_cnt == 0) {
+ if ((mfp = dbmfp->mfp) == NULL)
+ goto done;
+
+ /*
+ * If it's a temp file, all outstanding references belong to unflushed
+ * buffers. (A temp file can only be referenced by one DB_MPOOLFILE).
+ * We don't care about preserving any of those buffers, so mark the
+ * MPOOLFILE as dead so that even the dirty ones just get discarded
+ * when we try to flush them.
+ */
+ deleted = 0;
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
+ if (LF_ISSET(DB_MPOOL_DISCARD) ||
+ F_ISSET(mfp, MP_TEMP | MP_UNLINK))
+ MPOOLFILE_IGNORE(mfp);
if (F_ISSET(mfp, MP_UNLINK)) {
- MEMP_FREMOVE(mfp);
if ((t_ret = __db_appname(dbmp->dbenv,
- DB_APP_DATA, NULL, R_ADDR(dbmp->reginfo,
+ DB_APP_DATA, R_ADDR(dbmp->reginfo,
mfp->path_off), 0, NULL, &rpath)) != 0 && ret == 0)
ret = t_ret;
- if (t_ret == 0 && (t_ret =
- __os_unlink(dbmp->dbenv, rpath) != 0 && ret == 0))
+ if (t_ret == 0) {
+ if ((t_ret = __os_unlink(
+ dbmp->dbenv, rpath) != 0) && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, rpath);
+ }
+ }
+ if (mfp->block_cnt == 0) {
+ if ((t_ret =
+ __memp_mf_discard(dbmp, mfp)) != 0 && ret == 0)
ret = t_ret;
- __os_free(rpath, 0);
+ deleted = 1;
}
- if (mfp->block_cnt == 0)
- __memp_mf_discard(dbmp, mfp);
}
- else if (F_ISSET(mfp, MP_TEMP))
- MEMP_FREMOVE(mfp);
- R_UNLOCK(dbenv, dbmp->reginfo);
+ if (deleted == 0)
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
/* Discard the DB_MPOOLFILE structure. */
- __os_free(dbmfp, sizeof(DB_MPOOLFILE));
+done: __os_free(dbenv, dbmfp->fhp);
+ __os_free(dbenv, dbmfp);
return (ret);
}
@@ -668,20 +903,69 @@ memp_fclose(dbmfp)
* __memp_mf_discard --
* Discard an MPOOLFILE.
*
- * PUBLIC: void __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
+ * PUBLIC: int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *));
*/
-void
+int
__memp_mf_discard(dbmp, mfp)
DB_MPOOL *dbmp;
MPOOLFILE *mfp;
{
+ DB_ENV *dbenv;
+ DB_FH fh;
+ DB_MPOOL_STAT *sp;
MPOOL *mp;
+ char *rpath;
+ int ret;
+ dbenv = dbmp->dbenv;
mp = dbmp->reginfo[0].primary;
+ ret = 0;
+
+ /*
+ * Expects caller to be holding the MPOOLFILE mutex.
+ *
+ * When discarding a file, we have to flush writes from it to disk.
+ * The scenario is that dirty buffers from this file need to be
+ * flushed to satisfy a future checkpoint, but when the checkpoint
+ * calls mpool sync, the sync code won't know anything about them.
+ */
+ if (!F_ISSET(mfp, MP_DEADFILE) &&
+ (ret = __db_appname(dbenv, DB_APP_DATA,
+ R_ADDR(dbmp->reginfo, mfp->path_off), 0, NULL, &rpath)) == 0) {
+ if ((ret = __os_open(dbenv, rpath, 0, 0, &fh)) == 0) {
+ ret = __os_fsync(dbenv, &fh);
+ (void)__os_closehandle(dbenv, &fh);
+ }
+ __os_free(dbenv, rpath);
+ }
+
+ /*
+ * We have to release the MPOOLFILE lock before acquiring the region
+ * lock so that we don't deadlock. Make sure nobody ever looks at
+ * this structure again.
+ */
+ MPOOLFILE_IGNORE(mfp);
+
+ /* Discard the mutex we're holding. */
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
/* Delete from the list of MPOOLFILEs. */
+ R_LOCK(dbenv, dbmp->reginfo);
SH_TAILQ_REMOVE(&mp->mpfq, mfp, q, __mpoolfile);
+ /* Copy the statistics into the region. */
+ sp = &mp->stat;
+ sp->st_cache_hit += mfp->stat.st_cache_hit;
+ sp->st_cache_miss += mfp->stat.st_cache_miss;
+ sp->st_map += mfp->stat.st_map;
+ sp->st_page_create += mfp->stat.st_page_create;
+ sp->st_page_in += mfp->stat.st_page_in;
+ sp->st_page_out += mfp->stat.st_page_out;
+
+ /* Clear the mutex this MPOOLFILE recorded. */
+ __db_shlocks_clear(&mfp->mutex, dbmp->reginfo,
+ (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off));
+
/* Free the space. */
if (mfp->path_off != 0)
__db_shalloc_free(dbmp->reginfo[0].addr,
@@ -693,35 +977,10 @@ __memp_mf_discard(dbmp, mfp)
__db_shalloc_free(dbmp->reginfo[0].addr,
R_ADDR(dbmp->reginfo, mfp->pgcookie_off));
__db_shalloc_free(dbmp->reginfo[0].addr, mfp);
-}
-
-/*
- * __memp_fremove --
- * Remove an underlying file from the system.
- *
- * PUBLIC: int __memp_fremove __P((DB_MPOOLFILE *));
- */
-int
-__memp_fremove(dbmfp)
- DB_MPOOLFILE *dbmfp;
-{
- DB_ENV *dbenv;
- DB_MPOOL *dbmp;
- MPOOLFILE *mfp;
-
- dbmp = dbmfp->dbmp;
- dbenv = dbmp->dbenv;
- mfp = dbmfp->mfp;
-
- PANIC_CHECK(dbenv);
-
- R_LOCK(dbenv, dbmp->reginfo);
-
- MEMP_FREMOVE(mfp);
R_UNLOCK(dbenv, dbmp->reginfo);
- return (0);
+ return (ret);
}
/*
diff --git a/bdb/mp/mp_fput.c b/bdb/mp/mp_fput.c
index be03b721f36..271e44a4ef8 100644
--- a/bdb/mp/mp_fput.c
+++ b/bdb/mp/mp_fput.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_fput.c,v 11.16 2000/11/30 00:58:41 ubell Exp $";
+static const char revid[] = "$Id: mp_fput.c,v 11.36 2002/08/09 19:04:11 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -15,43 +15,32 @@ static const char revid[] = "$Id: mp_fput.c,v 11.16 2000/11/30 00:58:41 ubell Ex
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
/*
- * memp_fput --
+ * __memp_fput --
* Mpool file put function.
+ *
+ * PUBLIC: int __memp_fput __P((DB_MPOOLFILE *, void *, u_int32_t));
*/
int
-memp_fput(dbmfp, pgaddr, flags)
+__memp_fput(dbmfp, pgaddr, flags)
DB_MPOOLFILE *dbmfp;
void *pgaddr;
u_int32_t flags;
{
- BH *bhp;
+ BH *argbhp, *bhp, *prev;
DB_ENV *dbenv;
DB_MPOOL *dbmp;
- MPOOL *c_mp, *mp;
- int ret, wrote;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
+ int adjust, ret;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
- mp = dbmp->reginfo[0].primary;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fput(dbmfp, pgaddr, flags));
-#endif
PANIC_CHECK(dbenv);
@@ -72,17 +61,6 @@ memp_fput(dbmfp, pgaddr, flags)
}
}
- R_LOCK(dbenv, dbmp->reginfo);
-
- /* Decrement the pinned reference count. */
- if (dbmfp->pinref == 0) {
- __db_err(dbenv,
- "%s: more pages returned than retrieved", __memp_fn(dbmfp));
- R_UNLOCK(dbenv, dbmp->reginfo);
- return (EINVAL);
- } else
- --dbmfp->pinref;
-
/*
* If we're mapping the file, there's nothing to do. Because we can
* stop mapping the file at any time, we have to check on each buffer
@@ -90,97 +68,135 @@ memp_fput(dbmfp, pgaddr, flags)
* region.
*/
if (dbmfp->addr != NULL && pgaddr >= dbmfp->addr &&
- (u_int8_t *)pgaddr <= (u_int8_t *)dbmfp->addr + dbmfp->len) {
- R_UNLOCK(dbenv, dbmp->reginfo);
+ (u_int8_t *)pgaddr <= (u_int8_t *)dbmfp->addr + dbmfp->len)
return (0);
+
+#ifdef DIAGNOSTIC
+ /*
+ * Decrement the per-file pinned buffer count (mapped pages aren't
+ * counted).
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (dbmfp->pinref == 0) {
+ ret = EINVAL;
+ __db_err(dbenv,
+ "%s: more pages returned than retrieved", __memp_fn(dbmfp));
+ } else {
+ ret = 0;
+ --dbmfp->pinref;
}
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ if (ret != 0)
+ return (ret);
+#endif
- /* Convert the page address to a buffer header. */
+ /* Convert a page address to a buffer header and hash bucket. */
bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
- /* Convert the buffer header to a cache. */
- c_mp = BH_TO_CACHE(dbmp, bhp);
-
-/* UNLOCK THE REGION, LOCK THE CACHE. */
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
/* Set/clear the page bits. */
- if (LF_ISSET(DB_MPOOL_CLEAN) && F_ISSET(bhp, BH_DIRTY)) {
- ++c_mp->stat.st_page_clean;
- --c_mp->stat.st_page_dirty;
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
F_CLR(bhp, BH_DIRTY);
}
if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
- --c_mp->stat.st_page_clean;
- ++c_mp->stat.st_page_dirty;
+ ++hp->hash_page_dirty;
F_SET(bhp, BH_DIRTY);
}
if (LF_ISSET(DB_MPOOL_DISCARD))
F_SET(bhp, BH_DISCARD);
/*
- * If the page is dirty and being scheduled to be written as part of
- * a checkpoint, we no longer know that the log is up-to-date.
- */
- if (F_ISSET(bhp, BH_DIRTY) && F_ISSET(bhp, BH_SYNC))
- F_SET(bhp, BH_SYNC_LOGFLSH);
-
- /*
* Check for a reference count going to zero. This can happen if the
* application returns a page twice.
*/
if (bhp->ref == 0) {
__db_err(dbenv, "%s: page %lu: unpinned page returned",
__memp_fn(dbmfp), (u_long)bhp->pgno);
- R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
return (EINVAL);
}
/*
- * If more than one reference to the page, we're done. Ignore the
- * discard flags (for now) and leave it at its position in the LRU
- * chain. The rest gets done at last reference close.
+ * If more than one reference to the page or a reference other than a
+ * thread waiting to flush the buffer to disk, we're done. Ignore the
+ * discard flags (for now) and leave the buffer's priority alone.
*/
- if (--bhp->ref > 0) {
- R_UNLOCK(dbenv, dbmp->reginfo);
+ if (--bhp->ref > 1 || (bhp->ref == 1 && !F_ISSET(bhp, BH_LOCKED))) {
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
return (0);
}
+ /* Update priority values. */
+ if (F_ISSET(bhp, BH_DISCARD) ||
+ dbmfp->mfp->priority == MPOOL_PRI_VERY_LOW)
+ bhp->priority = 0;
+ else {
+ /*
+ * We don't lock the LRU counter or the stat.st_pages field, if
+ * we get garbage (which won't happen on a 32-bit machine), it
+ * only means a buffer has the wrong priority.
+ */
+ bhp->priority = c_mp->lru_count;
+
+ adjust = 0;
+ if (dbmfp->mfp->priority != 0)
+ adjust =
+ (int)c_mp->stat.st_pages / dbmfp->mfp->priority;
+ if (F_ISSET(bhp, BH_DIRTY))
+ adjust += c_mp->stat.st_pages / MPOOL_PRI_DIRTY;
+
+ if (adjust > 0) {
+ if (UINT32_T_MAX - bhp->priority <= (u_int32_t)adjust)
+ bhp->priority += adjust;
+ } else if (adjust < 0)
+ if (bhp->priority > (u_int32_t)-adjust)
+ bhp->priority += adjust;
+ }
+
/*
- * Move the buffer to the head/tail of the LRU chain. We do this
- * before writing the buffer for checkpoint purposes, as the write
- * can discard the region lock and allow another process to acquire
- * buffer. We could keep that from happening, but there seems no
- * reason to do so.
+ * Buffers on hash buckets are sorted by priority -- move the buffer
+ * to the correct position in the list.
*/
- SH_TAILQ_REMOVE(&c_mp->bhq, bhp, q, __bh);
- if (F_ISSET(bhp, BH_DISCARD))
- SH_TAILQ_INSERT_HEAD(&c_mp->bhq, bhp, q, __bh);
+ argbhp = bhp;
+ SH_TAILQ_REMOVE(&hp->hash_bucket, argbhp, hq, __bh);
+
+ prev = NULL;
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; prev = bhp, bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->priority > argbhp->priority)
+ break;
+ if (prev == NULL)
+ SH_TAILQ_INSERT_HEAD(&hp->hash_bucket, argbhp, hq, __bh);
else
- SH_TAILQ_INSERT_TAIL(&c_mp->bhq, bhp, q);
+ SH_TAILQ_INSERT_AFTER(&hp->hash_bucket, prev, argbhp, hq, __bh);
+
+ /* Reset the hash bucket's priority. */
+ hp->hash_priority = SH_TAILQ_FIRST(&hp->hash_bucket, __bh)->priority;
+
+#ifdef DIAGNOSTIC
+ __memp_check_order(hp);
+#endif
/*
- * If this buffer is scheduled for writing because of a checkpoint, we
- * need to write it (if it's dirty), or update the checkpoint counters
- * (if it's not dirty). If we try to write it and can't, that's not
- * necessarily an error as it's not completely unreasonable that the
- * application have permission to write the underlying file, but set a
- * flag so that the next time the memp_sync function is called we try
- * writing it there, as the checkpoint thread of control better be able
- * to write all of the files.
+ * The sync code has a separate counter for buffers on which it waits.
+ * It reads that value without holding a lock so we update it as the
+ * last thing we do. Once that value goes to 0, we won't see another
+ * reference to that buffer being returned to the cache until the sync
+ * code has finished, so we're safe as long as we don't let the value
+ * go to 0 before we finish with the buffer.
*/
- if (F_ISSET(bhp, BH_SYNC)) {
- if (F_ISSET(bhp, BH_DIRTY)) {
- if (__memp_bhwrite(dbmp,
- dbmfp->mfp, bhp, NULL, &wrote) != 0 || !wrote)
- F_SET(mp, MP_LSN_RETRY);
- } else {
- F_CLR(bhp, BH_SYNC);
-
- --mp->lsn_cnt;
- --dbmfp->mfp->lsn_cnt;
- }
- }
+ if (F_ISSET(argbhp, BH_LOCKED) && argbhp->ref_sync != 0)
+ --argbhp->ref_sync;
+
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
- R_UNLOCK(dbenv, dbmp->reginfo);
return (0);
}
diff --git a/bdb/mp/mp_fset.c b/bdb/mp/mp_fset.c
index 08313c9b6f5..65cd6286ac9 100644
--- a/bdb/mp/mp_fset.c
+++ b/bdb/mp/mp_fset.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_fset.c,v 11.13 2000/11/30 00:58:41 ubell Exp $";
+static const char revid[] = "$Id: mp_fset.c,v 11.25 2002/05/03 15:21:17 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -15,25 +15,18 @@ static const char revid[] = "$Id: mp_fset.c,v 11.13 2000/11/30 00:58:41 ubell Ex
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
/*
- * memp_fset --
+ * __memp_fset --
* Mpool page set-flag routine.
+ *
+ * PUBLIC: int __memp_fset __P((DB_MPOOLFILE *, void *, u_int32_t));
*/
int
-memp_fset(dbmfp, pgaddr, flags)
+__memp_fset(dbmfp, pgaddr, flags)
DB_MPOOLFILE *dbmfp;
void *pgaddr;
u_int32_t flags;
@@ -41,17 +34,13 @@ memp_fset(dbmfp, pgaddr, flags)
BH *bhp;
DB_ENV *dbenv;
DB_MPOOL *dbmp;
- MPOOL *c_mp, *mp;
+ DB_MPOOL_HASH *hp;
+ MPOOL *c_mp;
+ u_int32_t n_cache;
int ret;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
- mp = dbmp->reginfo[0].primary;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fset(dbmfp, pgaddr, flags));
-#endif
PANIC_CHECK(dbenv);
@@ -60,7 +49,7 @@ memp_fset(dbmfp, pgaddr, flags)
return (__db_ferr(dbenv, "memp_fset", 1));
if ((ret = __db_fchk(dbenv, "memp_fset", flags,
- DB_MPOOL_DIRTY | DB_MPOOL_CLEAN | DB_MPOOL_DISCARD)) != 0)
+ DB_MPOOL_CLEAN | DB_MPOOL_DIRTY | DB_MPOOL_DISCARD)) != 0)
return (ret);
if ((ret = __db_fcchk(dbenv, "memp_fset",
flags, DB_MPOOL_CLEAN, DB_MPOOL_DIRTY)) != 0)
@@ -72,27 +61,29 @@ memp_fset(dbmfp, pgaddr, flags)
return (EACCES);
}
- /* Convert the page address to a buffer header. */
+ /* Convert the page address to a buffer header and hash bucket. */
bhp = (BH *)((u_int8_t *)pgaddr - SSZA(BH, buf));
-
- /* Convert the buffer header to a cache. */
- c_mp = BH_TO_CACHE(dbmp, bhp);
-
- R_LOCK(dbenv, dbmp->reginfo);
-
- if (LF_ISSET(DB_MPOOL_CLEAN) && F_ISSET(bhp, BH_DIRTY)) {
- ++c_mp->stat.st_page_clean;
- --c_mp->stat.st_page_dirty;
+ n_cache = NCACHE(dbmp->reginfo[0].primary, bhp->mf_offset, bhp->pgno);
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ hp = &hp[NBUCKET(c_mp, bhp->mf_offset, bhp->pgno)];
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+
+ /* Set/clear the page bits. */
+ if (LF_ISSET(DB_MPOOL_CLEAN) &&
+ F_ISSET(bhp, BH_DIRTY) && !F_ISSET(bhp, BH_DIRTY_CREATE)) {
+ DB_ASSERT(hp->hash_page_dirty != 0);
+ --hp->hash_page_dirty;
F_CLR(bhp, BH_DIRTY);
}
if (LF_ISSET(DB_MPOOL_DIRTY) && !F_ISSET(bhp, BH_DIRTY)) {
- --c_mp->stat.st_page_clean;
- ++c_mp->stat.st_page_dirty;
+ ++hp->hash_page_dirty;
F_SET(bhp, BH_DIRTY);
}
if (LF_ISSET(DB_MPOOL_DISCARD))
F_SET(bhp, BH_DISCARD);
- R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
return (0);
}
diff --git a/bdb/mp/mp_method.c b/bdb/mp/mp_method.c
index 85a6239b032..38f0a645f16 100644
--- a/bdb/mp/mp_method.c
+++ b/bdb/mp/mp_method.c
@@ -1,30 +1,30 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_method.c,v 11.10 2000/04/04 20:12:04 bostic Exp $";
+static const char revid[] = "$Id: mp_method.c,v 11.29 2002/03/27 04:32:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
-#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
#endif
static int __memp_set_cachesize __P((DB_ENV *, u_int32_t, u_int32_t, int));
@@ -41,29 +41,46 @@ __memp_dbenv_create(dbenv)
DB_ENV *dbenv;
{
/*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ *
* We default to 32 8K pages. We don't default to a flat 256K, because
* some systems require significantly more memory to hold 32 pages than
* others. For example, HP-UX with POSIX pthreads needs 88 bytes for
* a POSIX pthread mutex and almost 200 bytes per buffer header, while
- * Solaris needs 24 and 52 bytes for the same structures.
+ * Solaris needs 24 and 52 bytes for the same structures. The minimum
+ * number of hash buckets is 37. These contain a mutex also.
*/
- dbenv->mp_bytes = 32 * ((8 * 1024) + sizeof(BH));
+ dbenv->mp_bytes =
+ 32 * ((8 * 1024) + sizeof(BH)) + 37 * sizeof(DB_MPOOL_HASH);
dbenv->mp_ncache = 1;
- dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
- dbenv->set_cachesize = __memp_set_cachesize;
-
-#ifdef HAVE_RPC
- /*
- * If we have a client, overwrite what we just setup to
- * point to client functions.
- */
+#ifdef HAVE_RPC
if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
dbenv->set_cachesize = __dbcl_env_cachesize;
dbenv->set_mp_mmapsize = __dbcl_set_mp_mmapsize;
- }
+ dbenv->memp_dump_region = NULL;
+ dbenv->memp_fcreate = __dbcl_memp_fcreate;
+ dbenv->memp_nameop = NULL;
+ dbenv->memp_register = __dbcl_memp_register;
+ dbenv->memp_stat = __dbcl_memp_stat;
+ dbenv->memp_sync = __dbcl_memp_sync;
+ dbenv->memp_trickle = __dbcl_memp_trickle;
+ } else
#endif
-
+ {
+ dbenv->set_cachesize = __memp_set_cachesize;
+ dbenv->set_mp_mmapsize = __memp_set_mp_mmapsize;
+ dbenv->memp_dump_region = __memp_dump_region;
+ dbenv->memp_fcreate = __memp_fcreate;
+ dbenv->memp_nameop = __memp_nameop;
+ dbenv->memp_register = __memp_register;
+ dbenv->memp_stat = __memp_stat;
+ dbenv->memp_sync = __memp_sync;
+ dbenv->memp_trickle = __memp_trickle;
+ }
}
/*
@@ -78,26 +95,50 @@ __memp_set_cachesize(dbenv, gbytes, bytes, ncache)
{
ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_cachesize");
- dbenv->mp_gbytes = gbytes + bytes / GIGABYTE;
- dbenv->mp_bytes = bytes % GIGABYTE;
- dbenv->mp_ncache = ncache == 0 ? 1 : ncache;
+ /* Normalize the values. */
+ if (ncache == 0)
+ ncache = 1;
/*
- * If the application requested less than 500Mb, increase the
- * cachesize by 25% to account for our overhead. (I'm guessing
- * that caches over 500Mb are specifically sized, i.e., it's
- * a large server and the application actually knows how much
- * memory is available.)
+ * You can only store 4GB-1 in an unsigned 32-bit value, so correct for
+ * applications that specify 4GB cache sizes -- we know what they meant.
+ */
+ if (gbytes / ncache == 4 && bytes == 0) {
+ --gbytes;
+ bytes = GIGABYTE - 1;
+ } else {
+ gbytes += bytes / GIGABYTE;
+ bytes %= GIGABYTE;
+ }
+
+ /* Avoid too-large cache sizes, they result in a region size of zero. */
+ if (gbytes / ncache > 4 || (gbytes / ncache == 4 && bytes != 0)) {
+ __db_err(dbenv, "individual cache size too large");
+ return (EINVAL);
+ }
+
+ /*
+ * If the application requested less than 500Mb, increase the cachesize
+ * by 25% and factor in the size of the hash buckets to account for our
+ * overhead. (I'm guessing caches over 500Mb are specifically sized,
+ * that is, it's a large server and the application actually knows how
+ * much memory is available. We only document the 25% overhead number,
+ * not the hash buckets, but I don't see a reason to confuse the issue,
+ * it shouldn't matter to an application.)
*
* There is a minimum cache size, regardless.
*/
- if (dbenv->mp_gbytes == 0) {
- if (dbenv->mp_bytes < 500 * MEGABYTE)
- dbenv->mp_bytes += dbenv->mp_bytes / 4;
- if (dbenv->mp_bytes < DB_CACHESIZE_MIN)
- dbenv->mp_bytes = DB_CACHESIZE_MIN;
+ if (gbytes == 0) {
+ if (bytes < 500 * MEGABYTE)
+ bytes += (bytes / 4) + 37 * sizeof(DB_MPOOL_HASH);
+ if (bytes / ncache < DB_CACHESIZE_MIN)
+ bytes = ncache * DB_CACHESIZE_MIN;
}
+ dbenv->mp_gbytes = gbytes;
+ dbenv->mp_bytes = bytes;
+ dbenv->mp_ncache = ncache;
+
return (0);
}
diff --git a/bdb/mp/mp_region.c b/bdb/mp/mp_region.c
index 4b85466ce63..06eca2f8646 100644
--- a/bdb/mp/mp_region.c
+++ b/bdb/mp/mp_region.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_region.c,v 11.26 2000/11/30 00:58:41 ubell Exp $";
+static const char revid[] = "$Id: mp_region.c,v 11.49 2002/05/07 18:42:20 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,11 +17,11 @@ static const char revid[] = "$Id: mp_region.c,v 11.26 2000/11/30 00:58:41 ubell
#endif
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
static int __mpool_init __P((DB_ENV *, DB_MPOOL *, int, int));
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
static size_t __mpool_region_maint __P((REGINFO *));
#endif
@@ -119,6 +119,8 @@ __memp_open(dbenv)
regids[i] = dbmp->reginfo[i].id;
}
+
+ R_UNLOCK(dbenv, dbmp->reginfo);
} else {
/*
* Determine how many regions there are going to be, allocate
@@ -135,6 +137,19 @@ __memp_open(dbenv)
dbmp->reginfo[i].id = INVALID_REGION_ID;
dbmp->reginfo[0] = reginfo;
+ /*
+ * We have to unlock the primary mpool region before we attempt
+ * to join the additional mpool regions. If we don't, we can
+ * deadlock. The scenario is that we hold the primary mpool
+ * region lock. We then try to attach to an additional mpool
+ * region, which requires the acquisition/release of the main
+ * region lock (to search the list of regions). If another
+ * thread of control already holds the main region lock and is
+ * waiting on our primary mpool region lock, we'll deadlock.
+ * See [#4696] for more information.
+ */
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
/* Join remaining regions. */
regids = R_ADDR(dbmp->reginfo, mp->regids);
for (i = 1; i < dbmp->nreg; ++i) {
@@ -155,17 +170,10 @@ __memp_open(dbenv)
R_ADDR(&dbmp->reginfo[i], dbmp->reginfo[i].rp->primary);
/* If the region is threaded, allocate a mutex to lock the handles. */
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if ((ret = __db_mutex_alloc(
- dbenv, dbmp->reginfo, &dbmp->mutexp)) != 0) {
- goto err;
- }
- if ((ret =
- __db_mutex_init(dbenv, dbmp->mutexp, 0, MUTEX_THREAD)) != 0)
- goto err;
- }
-
- R_UNLOCK(dbenv, dbmp->reginfo);
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, dbmp->reginfo, &dbmp->mutexp,
+ MUTEX_ALLOC | MUTEX_THREAD)) != 0)
+ goto err;
dbenv->mp_handle = dbmp;
return (0);
@@ -180,12 +188,11 @@ err: if (dbmp->reginfo != NULL && dbmp->reginfo[0].addr != NULL) {
if (dbmp->reginfo[i].id != INVALID_REGION_ID)
(void)__db_r_detach(
dbenv, &dbmp->reginfo[i], 0);
- __os_free(dbmp->reginfo,
- dbmp->nreg * sizeof(*dbmp->reginfo));
+ __os_free(dbenv, dbmp->reginfo);
}
if (dbmp->mutexp != NULL)
__db_mutex_free(dbenv, dbmp->reginfo, dbmp->mutexp);
- __os_free(dbmp, sizeof(*dbmp));
+ __os_free(dbenv, dbmp);
return (ret);
}
@@ -199,13 +206,13 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
DB_MPOOL *dbmp;
int reginfo_off, htab_buckets;
{
- DB_HASHTAB *htab;
+ DB_MPOOL_HASH *htab;
MPOOL *mp;
REGINFO *reginfo;
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
size_t maint_size;
#endif
- int ret;
+ int i, ret;
void *p;
mp = NULL;
@@ -218,7 +225,7 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
mp = reginfo->primary;
memset(mp, 0, sizeof(*mp));
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
maint_size = __mpool_region_maint(reginfo);
/* Allocate room for the maintenance info and initialize it. */
if ((ret = __db_shalloc(reginfo->addr,
@@ -231,14 +238,7 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
if (reginfo_off == 0) {
SH_TAILQ_INIT(&mp->mpfq);
- if ((ret = __db_shmutex_init(dbenv, &mp->sync_mutex,
- R_OFFSET(dbmp->reginfo, &mp->sync_mutex) +
- DB_FCNTL_OFF_MPOOL, 0, dbmp->reginfo,
- (REGMAINT *)R_ADDR(dbmp->reginfo, mp->maint_off))) != 0)
- goto err;
-
ZERO_LSN(mp->lsn);
- mp->lsn_cnt = 0;
mp->nreg = dbmp->nreg;
if ((ret = __db_shalloc(dbmp->reginfo[0].addr,
@@ -247,32 +247,41 @@ __mpool_init(dbenv, dbmp, reginfo_off, htab_buckets)
mp->regids = R_OFFSET(dbmp->reginfo, p);
}
- SH_TAILQ_INIT(&mp->bhq);
-
/* Allocate hash table space and initialize it. */
if ((ret = __db_shalloc(reginfo->addr,
- htab_buckets * sizeof(DB_HASHTAB), 0, &htab)) != 0)
+ htab_buckets * sizeof(DB_MPOOL_HASH), 0, &htab)) != 0)
goto mem_err;
- __db_hashinit(htab, htab_buckets);
mp->htab = R_OFFSET(reginfo, htab);
- mp->htab_buckets = htab_buckets;
+ for (i = 0; i < htab_buckets; i++) {
+ if ((ret = __db_mutex_setup(dbenv,
+ reginfo, &htab[i].hash_mutex,
+ MUTEX_NO_RLOCK)) != 0)
+ return (ret);
+ SH_TAILQ_INIT(&htab[i].hash_bucket);
+ htab[i].hash_page_dirty = htab[i].hash_priority = 0;
+ }
+ mp->htab_buckets = mp->stat.st_hash_buckets = htab_buckets;
+ /*
+ * Only the environment creator knows the total cache size, fill in
+ * those statistics now.
+ */
+ mp->stat.st_gbytes = dbenv->mp_gbytes;
+ mp->stat.st_bytes = dbenv->mp_bytes;
return (0);
mem_err:__db_err(dbenv, "Unable to allocate memory for mpool region");
-err: if (reginfo->primary != NULL)
- __db_shalloc_free(reginfo->addr, reginfo->primary);
return (ret);
}
/*
- * __memp_close --
- * Internal version of memp_close: only called from DB_ENV->close.
+ * __memp_dbenv_refresh --
+ * Clean up after the mpool system on a close or failed open.
*
- * PUBLIC: int __memp_close __P((DB_ENV *));
+ * PUBLIC: int __memp_dbenv_refresh __P((DB_ENV *));
*/
int
-__memp_close(dbenv)
+__memp_dbenv_refresh(dbenv)
DB_ENV *dbenv;
{
DB_MPOOL *dbmp;
@@ -287,12 +296,12 @@ __memp_close(dbenv)
/* Discard DB_MPREGs. */
while ((mpreg = LIST_FIRST(&dbmp->dbregq)) != NULL) {
LIST_REMOVE(mpreg, q);
- __os_free(mpreg, sizeof(DB_MPREG));
+ __os_free(dbenv, mpreg);
}
/* Discard DB_MPOOLFILEs. */
while ((dbmfp = TAILQ_FIRST(&dbmp->dbmfq)) != NULL)
- if ((t_ret = memp_fclose(dbmfp)) != 0 && ret == 0)
+ if ((t_ret = __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
ret = t_ret;
/* Discard the thread mutex. */
@@ -305,14 +314,14 @@ __memp_close(dbenv)
dbenv, &dbmp->reginfo[i], 0)) != 0 && ret == 0)
ret = t_ret;
- __os_free(dbmp->reginfo, dbmp->nreg * sizeof(*dbmp->reginfo));
- __os_free(dbmp, sizeof(*dbmp));
+ __os_free(dbenv, dbmp->reginfo);
+ __os_free(dbenv, dbmp);
dbenv->mp_handle = NULL;
return (ret);
}
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
/*
* __mpool_region_maint --
* Return the amount of space needed for region maintenance info.
@@ -328,9 +337,11 @@ __mpool_region_maint(infop)
/*
* For mutex maintenance we need one mutex per possible page.
* Compute the maximum number of pages this cache can have.
- * Also add in an mpool mutex.
+ * Also add in an mpool mutex and mutexes for all dbenv and db
+ * handles.
*/
numlocks = ((infop->rp->size / DB_MIN_PGSIZE) + 1);
+ numlocks += DB_MAX_HANDLES;
s = sizeof(roff_t) * numlocks;
return (s);
}
@@ -347,11 +358,109 @@ __mpool_region_destroy(dbenv, infop)
DB_ENV *dbenv;
REGINFO *infop;
{
- MPOOL *mp;
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off));
COMPQUIET(dbenv, NULL);
- mp = R_ADDR(infop, infop->rp->primary);
+ COMPQUIET(infop, NULL);
+}
+
+/*
+ * __memp_nameop
+ * Remove or rename a file in the pool.
+ *
+ * PUBLIC: int __memp_nameop __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, const char *, const char *, const char *));
+ *
+ * XXX
+ * Undocumented interface: DB private.
+ */
+int
+__memp_nameop(dbenv, fileid, newname, fullold, fullnew)
+ DB_ENV *dbenv;
+ u_int8_t *fileid;
+ const char *newname, *fullold, *fullnew;
+{
+ DB_MPOOL *dbmp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ roff_t newname_off;
+ int locked, ret;
+ void *p;
+
+ locked = 0;
+ dbmp = NULL;
- __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop, mp->maint_off));
- return;
+ if (!MPOOL_ON(dbenv))
+ goto fsop;
+
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
+
+ /*
+ * Remove or rename a file that the mpool might know about. We assume
+ * that the fop layer has the file locked for exclusive access, so we
+ * don't worry about locking except for the mpool mutexes. Checkpoint
+ * can happen at any time, independent of file locking, so we have to
+ * do the actual unlink or rename system call to avoid any race.
+ *
+ * If this is a rename, allocate first, because we can't recursively
+ * grab the region lock.
+ */
+ if (newname == NULL)
+ p = NULL;
+ else {
+ if ((ret = __memp_alloc(dbmp, dbmp->reginfo,
+ NULL, strlen(newname) + 1, &newname_off, &p)) != 0)
+ return (ret);
+ memcpy(p, newname, strlen(newname) + 1);
+ }
+
+ locked = 1;
+ R_LOCK(dbenv, dbmp->reginfo);
+
+ /*
+ * Find the file -- if mpool doesn't know about this file, that's not
+ * an error-- we may not have it open.
+ */
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ /* Ignore non-active files. */
+ if (F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Ignore non-matching files. */
+ if (memcmp(fileid, R_ADDR(
+ dbmp->reginfo, mfp->fileid_off), DB_FILE_ID_LEN) != 0)
+ continue;
+
+ /* If newname is NULL, we're removing the file. */
+ if (newname == NULL) {
+ MUTEX_LOCK(dbenv, &mfp->mutex);
+ MPOOLFILE_IGNORE(mfp);
+ MUTEX_UNLOCK(dbenv, &mfp->mutex);
+ } else {
+ /*
+ * Else, it's a rename. We've allocated memory
+ * for the new name. Swap it with the old one.
+ */
+ p = R_ADDR(dbmp->reginfo, mfp->path_off);
+ mfp->path_off = newname_off;
+ }
+ break;
+ }
+
+ /* Delete the memory we no longer need. */
+ if (p != NULL)
+ __db_shalloc_free(dbmp->reginfo[0].addr, p);
+
+fsop: if (newname == NULL)
+ (void)__os_unlink(dbenv, fullold);
+ else
+ (void)__os_rename(dbenv, fullold, fullnew, 1);
+
+ if (locked)
+ R_UNLOCK(dbenv, dbmp->reginfo);
+
+ return (0);
}
diff --git a/bdb/mp/mp_register.c b/bdb/mp/mp_register.c
index 27859f69d7b..46eefad986f 100644
--- a/bdb/mp/mp_register.c
+++ b/bdb/mp/mp_register.c
@@ -1,38 +1,33 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_register.c,v 11.12 2000/11/15 19:25:39 sue Exp $";
+static const char revid[] = "$Id: mp_register.c,v 11.21 2002/03/27 04:32:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
/*
* memp_register --
* Register a file type's pgin, pgout routines.
+ *
+ * PUBLIC: int __memp_register __P((DB_ENV *, int,
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *),
+ * PUBLIC: int (*)(DB_ENV *, db_pgno_t, void *, DBT *)));
*/
int
-memp_register(dbenv, ftype, pgin, pgout)
+__memp_register(dbenv, ftype, pgin, pgout)
DB_ENV *dbenv;
int ftype;
int (*pgin) __P((DB_ENV *, db_pgno_t, void *, DBT *));
@@ -42,13 +37,9 @@ memp_register(dbenv, ftype, pgin, pgout)
DB_MPREG *mpreg;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_register(dbenv, ftype, pgin, pgout));
-#endif
-
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "DB_ENV->memp_register", DB_INIT_MPOOL);
dbmp = dbenv->mp_handle;
@@ -70,7 +61,7 @@ memp_register(dbenv, ftype, pgin, pgout)
return (0);
/* New entry. */
- if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), NULL, &mpreg)) != 0)
+ if ((ret = __os_malloc(dbenv, sizeof(DB_MPREG), &mpreg)) != 0)
return (ret);
mpreg->ftype = ftype;
diff --git a/bdb/mp/mp_stat.c b/bdb/mp/mp_stat.c
index 7982513448d..12e72b91d70 100644
--- a/bdb/mp/mp_stat.c
+++ b/bdb/mp/mp_stat.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_stat.c,v 11.21 2001/01/09 16:59:30 bostic Exp $";
+static const char revid[] = "$Id: mp_stat.c,v 11.51 2002/08/06 06:13:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,123 +18,150 @@ static const char revid[] = "$Id: mp_stat.c,v 11.21 2001/01/09 16:59:30 bostic E
#include <unistd.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_am.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/mp.h"
-static void __memp_dumpcache
- __P((DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t));
+static void __memp_dumpcache __P((DB_ENV *,
+ DB_MPOOL *, REGINFO *, size_t *, FILE *, u_int32_t));
static void __memp_pbh __P((DB_MPOOL *, BH *, size_t *, FILE *));
+static void __memp_stat_wait __P((REGINFO *, MPOOL *, DB_MPOOL_STAT *, int));
/*
- * memp_stat --
+ * __memp_stat --
* Display MPOOL statistics.
+ *
+ * PUBLIC: int __memp_stat
+ * PUBLIC: __P((DB_ENV *, DB_MPOOL_STAT **, DB_MPOOL_FSTAT ***, u_int32_t));
*/
int
-memp_stat(dbenv, gspp, fspp, db_malloc)
+__memp_stat(dbenv, gspp, fspp, flags)
DB_ENV *dbenv;
DB_MPOOL_STAT **gspp;
DB_MPOOL_FSTAT ***fspp;
- void *(*db_malloc) __P((size_t));
+ u_int32_t flags;
{
DB_MPOOL *dbmp;
DB_MPOOL_FSTAT **tfsp, *tstruct;
DB_MPOOL_STAT *sp;
MPOOL *c_mp, *mp;
MPOOLFILE *mfp;
- char *tname;
- size_t len, nlen;
- u_int32_t i;
+ size_t len, nlen, pagesize;
+ u_int32_t pages, i;
int ret;
- char *name;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_stat(dbenv, gspp, fspp, db_malloc));
-#endif
+ char *name, *tname;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_stat", DB_INIT_MPOOL);
+
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->memp_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
dbmp = dbenv->mp_handle;
- sp = NULL;
+ mp = dbmp->reginfo[0].primary;
/* Global statistics. */
- mp = dbmp->reginfo[0].primary;
if (gspp != NULL) {
*gspp = NULL;
- if ((ret = __os_calloc(dbenv, 1, sizeof(**gspp), gspp)) != 0)
+ if ((ret = __os_umalloc(dbenv, sizeof(**gspp), gspp)) != 0)
return (ret);
+ memset(*gspp, 0, sizeof(**gspp));
sp = *gspp;
/*
* Initialization and information that is not maintained on
* a per-cache basis.
*/
- sp->st_hash_longest = 0;
- sp->st_region_wait = dbmp->reginfo[0].rp->mutex.mutex_set_wait;
- sp->st_region_nowait =
- dbmp->reginfo[0].rp->mutex.mutex_set_nowait;
- sp->st_gbytes = dbenv->mp_gbytes;
- sp->st_bytes = dbenv->mp_bytes;
+ c_mp = dbmp->reginfo[0].primary;
+ sp->st_gbytes = c_mp->stat.st_gbytes;
+ sp->st_bytes = c_mp->stat.st_bytes;
sp->st_ncache = dbmp->nreg;
sp->st_regsize = dbmp->reginfo[0].rp->size;
- R_LOCK(dbenv, dbmp->reginfo);
-
/* Walk the cache list and accumulate the global information. */
for (i = 0; i < mp->nreg; ++i) {
c_mp = dbmp->reginfo[i].primary;
+
+ sp->st_map += c_mp->stat.st_map;
sp->st_cache_hit += c_mp->stat.st_cache_hit;
sp->st_cache_miss += c_mp->stat.st_cache_miss;
- sp->st_map += c_mp->stat.st_map;
sp->st_page_create += c_mp->stat.st_page_create;
sp->st_page_in += c_mp->stat.st_page_in;
sp->st_page_out += c_mp->stat.st_page_out;
sp->st_ro_evict += c_mp->stat.st_ro_evict;
sp->st_rw_evict += c_mp->stat.st_rw_evict;
+ sp->st_page_trickle += c_mp->stat.st_page_trickle;
+ sp->st_pages += c_mp->stat.st_pages;
+ /*
+ * st_page_dirty calculated by __memp_stat_hash
+ * st_page_clean calculated here
+ */
+ __memp_stat_hash(
+ &dbmp->reginfo[i], c_mp, &sp->st_page_dirty);
+ sp->st_page_clean = sp->st_pages - sp->st_page_dirty;
sp->st_hash_buckets += c_mp->stat.st_hash_buckets;
sp->st_hash_searches += c_mp->stat.st_hash_searches;
- if (c_mp->stat.st_hash_longest > sp->st_hash_longest)
- sp->st_hash_longest =
- c_mp->stat.st_hash_longest;
+ sp->st_hash_longest += c_mp->stat.st_hash_longest;
sp->st_hash_examined += c_mp->stat.st_hash_examined;
- sp->st_page_clean += c_mp->stat.st_page_clean;
- sp->st_page_dirty += c_mp->stat.st_page_dirty;
- sp->st_page_trickle += c_mp->stat.st_page_trickle;
- sp->st_region_wait += c_mp->stat.st_region_wait;
- sp->st_region_nowait += c_mp->stat.st_region_nowait;
+ /*
+ * st_hash_nowait calculated by __memp_stat_wait
+ * st_hash_wait
+ */
+ __memp_stat_wait(&dbmp->reginfo[i], c_mp, sp, flags);
+ sp->st_region_nowait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait;
+ sp->st_region_wait +=
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait;
+ sp->st_alloc += c_mp->stat.st_alloc;
+ sp->st_alloc_buckets += c_mp->stat.st_alloc_buckets;
+ if (sp->st_alloc_max_buckets <
+ c_mp->stat.st_alloc_max_buckets)
+ sp->st_alloc_max_buckets =
+ c_mp->stat.st_alloc_max_buckets;
+ sp->st_alloc_pages += c_mp->stat.st_alloc_pages;
+ if (sp->st_alloc_max_pages <
+ c_mp->stat.st_alloc_max_pages)
+ sp->st_alloc_max_pages =
+ c_mp->stat.st_alloc_max_pages;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ dbmp->reginfo[i].rp->mutex.mutex_set_wait = 0;
+ dbmp->reginfo[i].rp->mutex.mutex_set_nowait = 0;
+ pages = c_mp->stat.st_pages;
+ memset(&c_mp->stat, 0, sizeof(c_mp->stat));
+ c_mp->stat.st_hash_buckets = c_mp->htab_buckets;
+ c_mp->stat.st_pages = pages;
+ }
}
/*
- * We have duplicate statistics fields in the cache and
- * per-file structures. The counters are only incremented
- * in the per-file structures, though. The intent is that
- * if we ever flush files from the pool we can save their
- * last known totals in the cache structure.
+ * We have duplicate statistics fields in per-file structures
+ * and the cache. The counters are only incremented in the
+ * per-file structures, except if a file is flushed from the
+ * mpool, at which time we copy its information into the cache
+ * statistics. We added the cache information above, now we
+ * add the per-file information.
*/
+ R_LOCK(dbenv, dbmp->reginfo);
for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ sp->st_map += mfp->stat.st_map;
sp->st_cache_hit += mfp->stat.st_cache_hit;
sp->st_cache_miss += mfp->stat.st_cache_miss;
- sp->st_map += mfp->stat.st_map;
sp->st_page_create += mfp->stat.st_page_create;
sp->st_page_in += mfp->stat.st_page_in;
sp->st_page_out += mfp->stat.st_page_out;
+ if (fspp == NULL && LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
}
-
R_UNLOCK(dbenv, dbmp->reginfo);
}
@@ -142,9 +169,8 @@ memp_stat(dbenv, gspp, fspp, db_malloc)
if (fspp != NULL) {
*fspp = NULL;
- R_LOCK(dbenv, dbmp->reginfo);
-
/* Count the MPOOLFILE structures. */
+ R_LOCK(dbenv, dbmp->reginfo);
for (i = 0, len = 0,
mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
mfp != NULL;
@@ -153,18 +179,15 @@ memp_stat(dbenv, gspp, fspp, db_malloc)
sizeof(DB_MPOOL_FSTAT) +
strlen(__memp_fns(dbmp, mfp)) + 1;
len += sizeof(DB_MPOOL_FSTAT *); /* Trailing NULL */
-
R_UNLOCK(dbenv, dbmp->reginfo);
- if (len == 0)
+ if (i == 0)
return (0);
/* Allocate space */
- if ((ret = __os_malloc(dbenv, len, db_malloc, fspp)) != 0)
+ if ((ret = __os_umalloc(dbenv, len, fspp)) != 0)
return (ret);
- R_LOCK(dbenv, dbmp->reginfo);
-
/*
* Build each individual entry. We assume that an array of
* pointers are aligned correctly to be followed by an array
@@ -179,20 +202,30 @@ memp_stat(dbenv, gspp, fspp, db_malloc)
tstruct = (DB_MPOOL_FSTAT *)(tfsp + i + 1);
tname = (char *)(tstruct + i);
+ /*
+ * Files may have been opened since we counted, don't walk
+ * off the end of the allocated space.
+ */
+ R_LOCK(dbenv, dbmp->reginfo);
for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
- mfp != NULL;
+ mfp != NULL && i-- > 0;
++tfsp, ++tstruct, tname += nlen,
mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
name = __memp_fns(dbmp, mfp);
nlen = strlen(name) + 1;
*tfsp = tstruct;
*tstruct = mfp->stat;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ pagesize = mfp->stat.st_pagesize;
+ memset(&mfp->stat, 0, sizeof(mfp->stat));
+ mfp->stat.st_pagesize = pagesize;
+ }
tstruct->file_name = tname;
memcpy(tname, name, nlen);
}
- *tfsp = NULL;
-
R_UNLOCK(dbenv, dbmp->reginfo);
+
+ *tfsp = NULL;
}
return (0);
}
@@ -200,7 +233,6 @@ memp_stat(dbenv, gspp, fspp, db_malloc)
#define FMAP_ENTRIES 200 /* Files we map. */
#define MPOOL_DUMP_HASH 0x01 /* Debug hash chains. */
-#define MPOOL_DUMP_LRU 0x02 /* Debug LRU chains. */
#define MPOOL_DUMP_MEM 0x04 /* Debug region memory. */
#define MPOOL_DUMP_ALL 0x07 /* Debug all. */
@@ -208,14 +240,23 @@ memp_stat(dbenv, gspp, fspp, db_malloc)
* __memp_dump_region --
* Display MPOOL structures.
*
- * PUBLIC: void __memp_dump_region __P((DB_ENV *, char *, FILE *));
+ * PUBLIC: int __memp_dump_region __P((DB_ENV *, char *, FILE *));
*/
-void
+int
__memp_dump_region(dbenv, area, fp)
DB_ENV *dbenv;
char *area;
FILE *fp;
{
+ static const FN fn[] = {
+ { MP_CAN_MMAP, "mmapped" },
+ { MP_DEADFILE, "dead" },
+ { MP_DIRECT, "no buffer" },
+ { MP_EXTENT, "extent" },
+ { MP_TEMP, "temporary" },
+ { MP_UNLINK, "unlink" },
+ { 0, NULL }
+ };
DB_MPOOL *dbmp;
DB_MPOOLFILE *dbmfp;
MPOOL *mp;
@@ -225,6 +266,10 @@ __memp_dump_region(dbenv, area, fp)
int cnt;
u_int8_t *p;
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_dump_region", DB_INIT_MPOOL);
+
dbmp = dbenv->mp_handle;
/* Make it easy to call from the debugger. */
@@ -239,40 +284,42 @@ __memp_dump_region(dbenv, area, fp)
case 'h':
LF_SET(MPOOL_DUMP_HASH);
break;
- case 'l':
- LF_SET(MPOOL_DUMP_LRU);
- break;
case 'm':
LF_SET(MPOOL_DUMP_MEM);
break;
}
- R_LOCK(dbenv, dbmp->reginfo);
-
mp = dbmp->reginfo[0].primary;
/* Display MPOOL structures. */
(void)fprintf(fp, "%s\nPool (region addr 0x%lx)\n",
- DB_LINE, (u_long)dbmp->reginfo[0].addr);
+ DB_LINE, P_TO_ULONG(dbmp->reginfo[0].addr));
/* Display the MPOOLFILE structures. */
- cnt = 0;
- for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (cnt = 0, mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile), ++cnt) {
- (void)fprintf(fp, "File #%d: %s: type %ld, %s\n\t [UID: ",
- cnt + 1, __memp_fns(dbmp, mfp), (long)mfp->ftype,
- F_ISSET(mfp, MP_CAN_MMAP) ? "mmap" : "read/write");
+ (void)fprintf(fp, "File #%d: %s: pagesize %lu\n", cnt + 1,
+ __memp_fns(dbmp, mfp), (u_long)mfp->stat.st_pagesize);
+ (void)fprintf(fp, "\t type %ld; ref %lu; blocks %lu; last %lu;",
+ (long)mfp->ftype, (u_long)mfp->mpf_cnt,
+ (u_long)mfp->block_cnt, (u_long)mfp->last_pgno);
+ __db_prflags(mfp->flags, fn, fp);
+
+ (void)fprintf(fp, "\n\t UID: ");
p = R_ADDR(dbmp->reginfo, mfp->fileid_off);
- for (i = 0; i < DB_FILE_ID_LEN; ++i) {
- (void)fprintf(fp, "%x", *p++);
+ for (i = 0; i < DB_FILE_ID_LEN; ++i, ++p) {
+ (void)fprintf(fp, "%x", (u_int)*p);
if (i < DB_FILE_ID_LEN - 1)
(void)fprintf(fp, " ");
}
- (void)fprintf(fp, "]\n");
+ (void)fprintf(fp, "\n");
if (cnt < FMAP_ENTRIES)
fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
}
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q), ++cnt) {
(void)fprintf(fp, "File #%d: %s: per-process, %s\n",
@@ -281,6 +328,7 @@ __memp_dump_region(dbenv, area, fp)
if (cnt < FMAP_ENTRIES)
fmap[cnt] = R_OFFSET(dbmp->reginfo, mfp);
}
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
if (cnt < FMAP_ENTRIES)
fmap[cnt] = INVALID_ROFF;
else
@@ -289,13 +337,14 @@ __memp_dump_region(dbenv, area, fp)
/* Dump the memory pools. */
for (i = 0; i < mp->nreg; ++i) {
(void)fprintf(fp, "%s\nCache #%d:\n", DB_LINE, i + 1);
- __memp_dumpcache(dbmp, &dbmp->reginfo[i], fmap, fp, flags);
+ __memp_dumpcache(
+ dbenv, dbmp, &dbmp->reginfo[i], fmap, fp, flags);
}
- R_UNLOCK(dbenv, dbmp->reginfo);
-
/* Flush in case we're debugging. */
(void)fflush(fp);
+
+ return (0);
}
/*
@@ -303,7 +352,8 @@ __memp_dump_region(dbenv, area, fp)
* Display statistics for a cache.
*/
static void
-__memp_dumpcache(dbmp, reginfo, fmap, fp, flags)
+__memp_dumpcache(dbenv, dbmp, reginfo, fmap, fp, flags)
+ DB_ENV *dbenv;
DB_MPOOL *dbmp;
REGINFO *reginfo;
size_t *fmap;
@@ -311,7 +361,7 @@ __memp_dumpcache(dbmp, reginfo, fmap, fp, flags)
u_int32_t flags;
{
BH *bhp;
- DB_HASHTAB *dbht;
+ DB_MPOOL_HASH *hp;
MPOOL *c_mp;
int bucket;
@@ -320,27 +370,24 @@ __memp_dumpcache(dbmp, reginfo, fmap, fp, flags)
/* Display the hash table list of BH's. */
if (LF_ISSET(MPOOL_DUMP_HASH)) {
(void)fprintf(fp,
- "%s\nBH hash table (%lu hash slots)\npageno, file, ref, address\n",
+ "%s\nBH hash table (%lu hash slots)\nbucket (priority):\n",
DB_LINE, (u_long)c_mp->htab_buckets);
- for (dbht = R_ADDR(reginfo, c_mp->htab),
- bucket = 0; bucket < c_mp->htab_buckets; ++dbht, ++bucket) {
- if (SH_TAILQ_FIRST(dbht, __bh) != NULL)
- (void)fprintf(fp, "%lu:\n", (u_long)bucket);
- for (bhp = SH_TAILQ_FIRST(dbht, __bh);
- bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ (void)fprintf(fp,
+ "\tpageno, file, ref, address [LSN] priority\n");
+
+ for (hp = R_ADDR(reginfo, c_mp->htab),
+ bucket = 0; bucket < c_mp->htab_buckets; ++hp, ++bucket) {
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ if ((bhp =
+ SH_TAILQ_FIRST(&hp->hash_bucket, __bh)) != NULL)
+ (void)fprintf(fp, "%lu (%u):\n",
+ (u_long)bucket, hp->hash_priority);
+ for (; bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
__memp_pbh(dbmp, bhp, fmap, fp);
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
}
}
- /* Display the LRU list of BH's. */
- if (LF_ISSET(MPOOL_DUMP_LRU)) {
- (void)fprintf(fp, "%s\nBH LRU list\n", DB_LINE);
- (void)fprintf(fp, "pageno, file, ref, address\n");
- for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
- bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh))
- __memp_pbh(dbmp, bhp, fmap, fp);
- }
-
/* Dump the memory pool. */
if (LF_ISSET(MPOOL_DUMP_MEM))
__db_shalloc_dump(reginfo->addr, fp);
@@ -360,10 +407,9 @@ __memp_pbh(dbmp, bhp, fmap, fp)
static const FN fn[] = {
{ BH_CALLPGIN, "callpgin" },
{ BH_DIRTY, "dirty" },
+ { BH_DIRTY_CREATE, "created" },
{ BH_DISCARD, "discard" },
{ BH_LOCKED, "locked" },
- { BH_SYNC, "sync" },
- { BH_SYNC_LOGFLSH, "sync:logflush" },
{ BH_TRASH, "trash" },
{ 0, NULL }
};
@@ -374,15 +420,72 @@ __memp_pbh(dbmp, bhp, fmap, fp)
break;
if (fmap[i] == INVALID_ROFF)
- (void)fprintf(fp, " %4lu, %lu, %2lu, %lu",
+ (void)fprintf(fp, "\t%5lu, %lu, %2lu, %8lu [%lu,%lu] %lu",
(u_long)bhp->pgno, (u_long)bhp->mf_offset,
- (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp));
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
else
- (void)fprintf(fp, " %4lu, #%d, %2lu, %lu",
+ (void)fprintf(fp, "\t%5lu, #%d, %2lu, %8lu [%lu,%lu] %lu",
(u_long)bhp->pgno, i + 1,
- (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp));
+ (u_long)bhp->ref, (u_long)R_OFFSET(dbmp->reginfo, bhp),
+ (u_long)LSN(bhp->buf).file, (u_long)LSN(bhp->buf).offset,
+ (u_long)bhp->priority);
__db_prflags(bhp->flags, fn, fp);
(void)fprintf(fp, "\n");
}
+
+/*
+ * __memp_stat_hash --
+ * Total hash bucket stats (other than mutex wait) into the region.
+ *
+ * PUBLIC: void __memp_stat_hash __P((REGINFO *, MPOOL *, u_int32_t *));
+ */
+void
+__memp_stat_hash(reginfo, mp, dirtyp)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ u_int32_t *dirtyp;
+{
+ DB_MPOOL_HASH *hp;
+ u_int32_t dirty;
+ int i;
+
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0, dirty = 0; i < mp->htab_buckets; i++, hp++)
+ dirty += hp->hash_page_dirty;
+ *dirtyp = dirty;
+}
+
+/*
+ * __memp_stat_wait --
+ * Total hash bucket wait stats into the region.
+ */
+static void
+__memp_stat_wait(reginfo, mp, mstat, flags)
+ REGINFO *reginfo;
+ MPOOL *mp;
+ DB_MPOOL_STAT *mstat;
+ int flags;
+{
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
+ int i;
+
+ mstat->st_hash_max_wait = 0;
+ hp = R_ADDR(reginfo, mp->htab);
+ for (i = 0; i < mp->htab_buckets; i++, hp++) {
+ mutexp = &hp->hash_mutex;
+ mstat->st_hash_nowait += mutexp->mutex_set_nowait;
+ mstat->st_hash_wait += mutexp->mutex_set_wait;
+ if (mutexp->mutex_set_wait > mstat->st_hash_max_wait)
+ mstat->st_hash_max_wait = mutexp->mutex_set_wait;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mutexp->mutex_set_wait = 0;
+ mutexp->mutex_set_nowait = 0;
+ }
+ }
+}
diff --git a/bdb/mp/mp_sync.c b/bdb/mp/mp_sync.c
index 1b0751db709..03b42208b39 100644
--- a/bdb/mp/mp_sync.c
+++ b/bdb/mp/mp_sync.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_sync.c,v 11.29 2001/01/11 18:19:53 bostic Exp $";
+static const char revid[] = "$Id: mp_sync.c,v 11.64 2002/08/25 16:00:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,339 +16,92 @@ static const char revid[] = "$Id: mp_sync.c,v 11.29 2001/01/11 18:19:53 bostic E
#include <stdlib.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+typedef struct {
+ DB_MPOOL_HASH *track_hp; /* Hash bucket. */
+
+ roff_t track_off; /* Page file offset. */
+ db_pgno_t track_pgno; /* Page number. */
+} BH_TRACK;
static int __bhcmp __P((const void *, const void *));
-static int __memp_fsync __P((DB_MPOOLFILE *));
-static int __memp_sballoc __P((DB_ENV *, BH ***, u_int32_t *));
+static int __memp_close_flush_files __P((DB_ENV *, DB_MPOOL *));
+static int __memp_sync_files __P((DB_ENV *, DB_MPOOL *));
/*
- * memp_sync --
+ * __memp_sync --
* Mpool sync function.
+ *
+ * PUBLIC: int __memp_sync __P((DB_ENV *, DB_LSN *));
*/
int
-memp_sync(dbenv, lsnp)
+__memp_sync(dbenv, lsnp)
DB_ENV *dbenv;
DB_LSN *lsnp;
{
- BH *bhp, **bharray;
DB_MPOOL *dbmp;
- DB_LSN tlsn;
- MPOOL *c_mp, *mp;
- MPOOLFILE *mfp;
- u_int32_t ar_cnt, i, ndirty;
- int ret, retry_done, retry_need, wrote;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_sync(dbenv, lsnp));
-#endif
+ MPOOL *mp;
+ int ret;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
-
- dbmp = dbenv->mp_handle;
- mp = dbmp->reginfo[0].primary;
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_sync", DB_INIT_MPOOL);
/*
- * If no LSN is provided, flush the entire cache.
- *
- * !!!
- * Our current behavior is to flush the entire cache, so there's
- * nothing special we have to do here other than deal with NULL
- * pointers.
+ * If no LSN is provided, flush the entire cache (reasonable usage
+ * even if there's no log subsystem configured).
*/
- if (lsnp == NULL) {
- ZERO_LSN(tlsn);
- lsnp = &tlsn;
- F_SET(mp, MP_LSN_RETRY);
- } else if (!LOGGING_ON(dbenv)) {
- __db_err(dbenv, "memp_sync: requires logging");
- return (EINVAL);
- }
+ if (lsnp != NULL)
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->lg_handle, "memp_sync", DB_INIT_LOG);
- /*
- * Sync calls are single-threaded so that we don't have multiple
- * threads, with different checkpoint LSNs, walking the caches
- * and updating the checkpoint LSNs and how many buffers remain
- * to be written for the checkpoint. This shouldn't be a problem,
- * any application that has multiple checkpoint threads isn't what
- * I'd call trustworthy.
- */
- MUTEX_LOCK(dbenv, &mp->sync_mutex, dbenv->lockfhp);
+ dbmp = dbenv->mp_handle;
+ mp = dbmp->reginfo[0].primary;
- /*
- * If the application is asking about a previous call to memp_sync(),
- * and we haven't found any buffers that the application holding the
- * pin couldn't write, return yes or no based on the current count.
- * Note, if the application is asking about a LSN *smaller* than one
- * we've already handled or are currently handling, then we return a
- * result based on the count for the larger LSN.
- */
- R_LOCK(dbenv, dbmp->reginfo);
- if (!IS_ZERO_LSN(*lsnp) &&
- !F_ISSET(mp, MP_LSN_RETRY) && log_compare(lsnp, &mp->lsn) <= 0) {
- if (mp->lsn_cnt == 0) {
+ /* If we've flushed to the requested LSN, return that information. */
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) <= 0) {
*lsnp = mp->lsn;
- ret = 0;
- } else
- ret = DB_INCOMPLETE;
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ return (0);
+ }
R_UNLOCK(dbenv, dbmp->reginfo);
- MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
- return (ret);
}
- /*
- * Allocate room for a list of buffers, and decide how many buffers
- * we can pin down.
- *
- * !!!
- * Note: __memp_sballoc has released the region lock if we're not
- * continuing forward.
- */
- if ((ret =
- __memp_sballoc(dbenv, &bharray, &ndirty)) != 0 || ndirty == 0) {
- MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
+ if ((ret = __memp_sync_int(dbenv, NULL, 0, DB_SYNC_CACHE, NULL)) != 0)
return (ret);
- }
- retry_done = 0;
-retry: retry_need = 0;
- /*
- * Start a new checkpoint.
- *
- * Save the LSN. We know that it's a new LSN, a retry, or larger than
- * the one for which we were already doing a checkpoint. (BTW, I don't
- * expect to see multiple LSN's from the same or multiple processes,
- * but You Just Never Know. Responding as if they all called with the
- * largest of the LSNs specified makes everything work.)
- *
- * We don't currently use the LSN we save. We could potentially save
- * the last-written LSN in each buffer header and use it to determine
- * what buffers need to be written. The problem with this is that it's
- * sizeof(LSN) more bytes of buffer header. We currently write all the
- * dirty buffers instead, but with a sufficiently large cache that's
- * going to be a problem.
- */
- mp->lsn = *lsnp;
-
- /*
- * Clear the global count of buffers waiting to be written, walk the
- * list of files clearing the count of buffers waiting to be written.
- *
- * Clear the retry flag.
- */
- mp->lsn_cnt = 0;
- for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
- mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile))
- mfp->lsn_cnt = 0;
- F_CLR(mp, MP_LSN_RETRY);
-
- /*
- * Walk each cache's list of buffers and mark all dirty buffers to be
- * written and all pinned buffers to be potentially written (we can't
- * know if they'll need to be written until the holder returns them to
- * the cache). We do this in one pass while holding the region locked
- * so that processes can't make new buffers dirty, causing us to never
- * finish. Since the application may have restarted the sync using a
- * different LSN value, clear any BH_SYNC | BH_SYNC_LOGFLSH flags that
- * appear leftover from previous calls.
- *
- * Keep a count of the total number of buffers we need to write in
- * MPOOL->lsn_cnt, and for each file, in MPOOLFILE->lsn_count.
- */
- for (ar_cnt = 0, i = 0; i < mp->nreg; ++i) {
- c_mp = dbmp->reginfo[i].primary;
- for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
- bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
- if (F_ISSET(bhp, BH_DIRTY) || bhp->ref != 0) {
- F_SET(bhp, BH_SYNC);
-
- ++mp->lsn_cnt;
-
- mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
- ++mfp->lsn_cnt;
-
- /*
- * If the buffer isn't being used, we can write
- * it immediately, so increment its reference
- * count to lock it down, and save a reference
- * to it.
- *
- * If we've run out space to store buffer refs,
- * we're screwed. We don't want to realloc the
- * array while holding a region lock, so we set
- * a flag and deal with it later.
- */
- if (bhp->ref == 0) {
- ++bhp->ref;
- bharray[ar_cnt] = bhp;
-
- if (++ar_cnt >= ndirty) {
- retry_need = 1;
- break;
- }
- }
- } else
- if (F_ISSET(bhp, BH_SYNC))
- F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
- }
- if (ar_cnt >= ndirty)
- break;
- }
-
- /* If there no buffers we can write immediately, we're done. */
- if (ar_cnt == 0) {
- ret = mp->lsn_cnt ? DB_INCOMPLETE : 0;
- goto done;
- }
-
- R_UNLOCK(dbenv, dbmp->reginfo);
-
- /*
- * Sort the buffers we're going to write immediately.
- *
- * We try and write the buffers in file/page order: it should reduce
- * seeks by the underlying filesystem and possibly reduce the actual
- * number of writes.
- */
- if (ar_cnt > 1)
- qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
-
- /*
- * Flush the log. We have to ensure the log records reflecting the
- * changes on the database pages we're writing have already made it
- * to disk. We usually do that as we write each page, but if we
- * are going to write a large number of pages, repeatedly acquiring
- * the log region lock is going to be expensive. Flush the entire
- * log now, so that sync doesn't require any more log flushes.
- */
- if (LOGGING_ON(dbenv) && (ret = log_flush(dbenv, NULL)) != 0)
- goto done;
-
- R_LOCK(dbenv, dbmp->reginfo);
-
- /* Walk the array, writing buffers. */
- for (i = 0; i < ar_cnt; ++i) {
- /*
- * It's possible for a thread to have gotten the buffer since
- * we listed it for writing. If the reference count is still
- * 1, we're the only ones using the buffer, go ahead and write.
- * If it's >1, then skip the buffer and assume that it will be
- * written when it's returned to the cache.
- */
- if (bharray[i]->ref > 1) {
- --bharray[i]->ref;
- continue;
- }
-
- /* Write the buffer. */
- mfp = R_ADDR(dbmp->reginfo, bharray[i]->mf_offset);
- ret = __memp_bhwrite(dbmp, mfp, bharray[i], NULL, &wrote);
-
- /* Release the buffer. */
- --bharray[i]->ref;
-
- if (ret == 0 && wrote)
- continue;
-
- /*
- * Any process syncing the shared memory buffer pool had best
- * be able to write to any underlying file. Be understanding,
- * but firm, on this point.
- */
- if (ret == 0) {
- __db_err(dbenv, "%s: unable to flush page: %lu",
- __memp_fns(dbmp, mfp), (u_long)bharray[i]->pgno);
- ret = EPERM;
- }
-
- /*
- * On error, clear MPOOL->lsn and set MP_LSN_RETRY so that no
- * future checkpoint return can depend on this failure. Clear
- * the buffer's BH_SYNC flag, because it's used to determine
- * if lsn_cnt values are incremented/decremented. Don't bother
- * to reset/clear:
- *
- * MPOOL->lsn_cnt
- * MPOOLFILE->lsn_cnt
- *
- * they don't make any difference.
- */
- ZERO_LSN(mp->lsn);
- F_SET(mp, MP_LSN_RETRY);
-
- /* Release any buffers we're still pinning down. */
- while (++i < ar_cnt) {
- bhp = bharray[i];
- --bhp->ref;
- F_CLR(bhp, BH_SYNC | BH_SYNC_LOGFLSH);
- }
-
- goto done;
- }
-
- ret = mp->lsn_cnt != 0 ? DB_INCOMPLETE : 0;
-
- /*
- * If there were too many buffers and we're not returning an error, we
- * re-try the checkpoint once -- since we allocated 80% of the total
- * buffer count, once should be enough. If it still doesn't work, some
- * other thread of control is dirtying buffers as fast as we're writing
- * them, and we might as well give up for now. In the latter case, set
- * the global retry flag, we'll have to start from scratch on the next
- * checkpoint.
- */
- if (retry_need) {
- if (retry_done) {
- ret = DB_INCOMPLETE;
- F_SET(mp, MP_LSN_RETRY);
- } else {
- retry_done = 1;
- goto retry;
- }
+ if (lsnp != NULL) {
+ R_LOCK(dbenv, dbmp->reginfo);
+ if (log_compare(lsnp, &mp->lsn) > 0)
+ mp->lsn = *lsnp;
+ R_UNLOCK(dbenv, dbmp->reginfo);
}
-done: R_UNLOCK(dbenv, dbmp->reginfo);
- MUTEX_UNLOCK(dbenv, &mp->sync_mutex);
-
- __os_free(bharray, ndirty * sizeof(BH *));
-
- return (ret);
+ return (0);
}
/*
- * memp_fsync --
+ * __memp_fsync --
* Mpool file sync function.
+ *
+ * PUBLIC: int __memp_fsync __P((DB_MPOOLFILE *));
*/
int
-memp_fsync(dbmfp)
+__memp_fsync(dbmfp)
DB_MPOOLFILE *dbmfp;
{
DB_ENV *dbenv;
DB_MPOOL *dbmp;
- int is_tmp;
dbmp = dbmfp->dbmp;
dbenv = dbmp->dbenv;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_fsync(dbmfp));
-#endif
-
PANIC_CHECK(dbenv);
/*
@@ -359,13 +112,10 @@ memp_fsync(dbmfp)
if (F_ISSET(dbmfp, MP_READONLY))
return (0);
- R_LOCK(dbenv, dbmp->reginfo);
- is_tmp = F_ISSET(dbmfp->mfp, MP_TEMP);
- R_UNLOCK(dbenv, dbmp->reginfo);
- if (is_tmp)
+ if (F_ISSET(dbmfp->mfp, MP_TEMP))
return (0);
- return (__memp_fsync(dbmfp));
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
}
/*
@@ -379,6 +129,7 @@ __mp_xxx_fh(dbmfp, fhp)
DB_MPOOLFILE *dbmfp;
DB_FH **fhp;
{
+ DB_ENV *dbenv;
/*
* This is a truly spectacular layering violation, intended ONLY to
* support compatibility for the DB 1.85 DB->fd call.
@@ -393,239 +144,457 @@ __mp_xxx_fh(dbmfp, fhp)
* because we want to write to the backing file regardless so that
* we get a file descriptor to return.
*/
- *fhp = &dbmfp->fh;
- return (F_ISSET(&dbmfp->fh, DB_FH_VALID) ? 0 : __memp_fsync(dbmfp));
+ *fhp = dbmfp->fhp;
+ if (F_ISSET(dbmfp->fhp, DB_FH_VALID))
+ return (0);
+ dbenv = dbmfp->dbmp->dbenv;
+
+ return (__memp_sync_int(dbenv, dbmfp, 0, DB_SYNC_FILE, NULL));
}
/*
- * __memp_fsync --
- * Mpool file internal sync function.
+ * __memp_sync_int --
+ * Mpool sync internal function.
+ *
+ * PUBLIC: int __memp_sync_int
+ * PUBLIC: __P((DB_ENV *, DB_MPOOLFILE *, int, db_sync_op, int *));
*/
-static int
-__memp_fsync(dbmfp)
+int
+__memp_sync_int(dbenv, dbmfp, ar_max, op, wrotep)
+ DB_ENV *dbenv;
DB_MPOOLFILE *dbmfp;
+ int ar_max, *wrotep;
+ db_sync_op op;
{
- BH *bhp, **bharray;
- DB_ENV *dbenv;
+ BH *bhp;
+ BH_TRACK *bharray;
DB_MPOOL *dbmp;
+ DB_MPOOL_HASH *hp;
+ DB_MUTEX *mutexp;
MPOOL *c_mp, *mp;
- size_t mf_offset;
- u_int32_t ar_cnt, i, ndirty;
- int incomplete, ret, retry_done, retry_need, wrote;
+ MPOOLFILE *mfp;
+ u_int32_t n_cache;
+ int ar_cnt, hb_lock, i, pass, remaining, ret, t_ret, wait_cnt, wrote;
- dbmp = dbmfp->dbmp;
- dbenv = dbmp->dbenv;
+ dbmp = dbenv->mp_handle;
mp = dbmp->reginfo[0].primary;
-
- R_LOCK(dbenv, dbmp->reginfo);
+ pass = wrote = 0;
/*
- * Allocate room for a list of buffers, and decide how many buffers
- * we can pin down.
- *
- * !!!
- * Note: __memp_sballoc has released our region lock if we're not
- * continuing forward.
+ * If the caller does not specify how many pages assume one
+ * per bucket.
*/
+ if (ar_max == 0)
+ ar_max = mp->nreg * mp->htab_buckets;
+
if ((ret =
- __memp_sballoc(dbenv, &bharray, &ndirty)) != 0 || ndirty == 0)
+ __os_malloc(dbenv, ar_max * sizeof(BH_TRACK), &bharray)) != 0)
return (ret);
- retry_done = 0;
-retry: retry_need = 0;
/*
* Walk each cache's list of buffers and mark all dirty buffers to be
- * written and all pinned buffers to be potentially written (we can't
- * know if they'll need to be written until the holder returns them to
- * the cache). We do this in one pass while holding the region locked
- * so that processes can't make new buffers dirty, causing us to never
- * finish.
+ * written and all pinned buffers to be potentially written, depending
+ * on our flags.
*/
- mf_offset = R_OFFSET(dbmp->reginfo, dbmfp->mfp);
- for (ar_cnt = 0, incomplete = 0, i = 0; i < mp->nreg; ++i) {
- c_mp = dbmp->reginfo[i].primary;
- for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
- bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
- if (!F_ISSET(bhp, BH_DIRTY) ||
- bhp->mf_offset != mf_offset)
- continue;
- if (bhp->ref != 0 || F_ISSET(bhp, BH_LOCKED)) {
- incomplete = 1;
- continue;
- }
+ for (ar_cnt = 0, n_cache = 0; n_cache < mp->nreg; ++n_cache) {
+ c_mp = dbmp->reginfo[n_cache].primary;
+ hp = R_ADDR(&dbmp->reginfo[n_cache], c_mp->htab);
+ for (i = 0; i < c_mp->htab_buckets; i++, hp++) {
/*
- * If the buffer isn't being used, we can write
- * it immediately, so increment its reference
- * count to lock it down, and save a reference
- * to it.
- *
- * If we've run out space to store buffer refs,
- * we're screwed. We don't want to realloc the
- * array while holding a region lock, so we set
- * a flag and deal with it later.
+ * We can check for empty buckets before locking as we
+ * only care if the pointer is zero or non-zero. We
+ * can ignore empty buckets because we only need write
+ * buffers that were dirty before we started.
*/
- ++bhp->ref;
- bharray[ar_cnt] = bhp;
- if (++ar_cnt >= ndirty) {
- retry_need = 1;
- break;
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ continue;
+
+ MUTEX_LOCK(dbenv, &hp->hash_mutex);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh)) {
+ /* Always ignore unreferenced, clean pages. */
+ if (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ /*
+ * Checkpoints have to wait on all pinned pages,
+ * as pages may be marked dirty when returned to
+ * the cache.
+ *
+ * File syncs only wait on pages both pinned and
+ * dirty. (We don't care if pages are marked
+ * dirty when returned to the cache, that means
+ * there's another writing thread and flushing
+ * the cache for this handle is meaningless.)
+ */
+ if (op == DB_SYNC_FILE &&
+ !F_ISSET(bhp, BH_DIRTY))
+ continue;
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+
+ /*
+ * Ignore temporary files -- this means you
+ * can't even flush temporary files by handle.
+ * (Checkpoint doesn't require temporary files
+ * be flushed and the underlying buffer write
+ * write routine may not be able to write it
+ * anyway.)
+ */
+ if (F_ISSET(mfp, MP_TEMP))
+ continue;
+
+ /*
+ * If we're flushing a specific file, see if
+ * this page is from that file.
+ */
+ if (dbmfp != NULL && mfp != dbmfp->mfp)
+ continue;
+
+ /*
+ * Ignore files that aren't involved in DB's
+ * transactional operations during checkpoints.
+ */
+ if (dbmfp == NULL && mfp->lsn_off == -1)
+ continue;
+
+ /* Track the buffer, we want it. */
+ bharray[ar_cnt].track_hp = hp;
+ bharray[ar_cnt].track_pgno = bhp->pgno;
+ bharray[ar_cnt].track_off = bhp->mf_offset;
+ ar_cnt++;
+
+ if (ar_cnt >= ar_max) {
+ if ((ret = __os_realloc(dbenv,
+ (ar_max * 2) * sizeof(BH_TRACK),
+ &bharray)) != 0)
+ break;
+ ar_max *= 2;
+ }
}
+ MUTEX_UNLOCK(dbenv, &hp->hash_mutex);
+
+ if (ret != 0)
+ goto err;
}
- if (ar_cnt >= ndirty)
- break;
}
- /* If there no buffers we can write immediately, we're done. */
- if (ar_cnt == 0) {
- ret = 0;
+ /* If there no buffers to write, we're done. */
+ if (ar_cnt == 0)
goto done;
- }
- R_UNLOCK(dbenv, dbmp->reginfo);
-
- /* Sort the buffers we're going to write. */
+ /*
+ * Write the buffers in file/page order, trying to reduce seeks by the
+ * filesystem and, when pages are smaller than filesystem block sizes,
+ * reduce the actual number of writes.
+ */
if (ar_cnt > 1)
- qsort(bharray, ar_cnt, sizeof(BH *), __bhcmp);
+ qsort(bharray, ar_cnt, sizeof(BH_TRACK), __bhcmp);
- R_LOCK(dbenv, dbmp->reginfo);
+ /*
+ * If we're trickling buffers, only write enough to reach the correct
+ * percentage for this region. We may not write enough if the dirty
+ * buffers have an unbalanced distribution among the regions, but that
+ * seems unlikely.
+ */
+ if (op == DB_SYNC_TRICKLE && ar_cnt > ar_max / (int)mp->nreg)
+ ar_cnt = ar_max / (int)mp->nreg;
+
+ /*
+ * Flush the log. We have to ensure the log records reflecting the
+ * changes on the database pages we're writing have already made it
+ * to disk. We still have to check the log each time we write a page
+ * (because pages we are about to write may be modified after we have
+ * flushed the log), but in general this will at least avoid any I/O
+ * on the log's part.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
+ /*
+ * Walk the array, writing buffers. When we write a buffer, we NULL
+ * out its hash bucket pointer so we don't process a slot more than
+ * once.
+ */
+ for (remaining = ar_cnt, i = pass = 0; remaining > 0; ++i) {
+ if (i >= ar_cnt) {
+ i = 0;
+ ++pass;
+ __os_sleep(dbenv, 1, 0);
+ }
+ if ((hp = bharray[i].track_hp) == NULL)
+ continue;
+
+ /* Lock the hash bucket and find the buffer. */
+ mutexp = &hp->hash_mutex;
+ MUTEX_LOCK(dbenv, mutexp);
+ for (bhp = SH_TAILQ_FIRST(&hp->hash_bucket, __bh);
+ bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, hq, __bh))
+ if (bhp->pgno == bharray[i].track_pgno &&
+ bhp->mf_offset == bharray[i].track_off)
+ break;
- /* Walk the array, writing buffers. */
- for (i = 0; i < ar_cnt;) {
/*
- * It's possible for a thread to have gotten the buffer since
- * we listed it for writing. If the reference count is still
- * 1, we're the only ones using the buffer, go ahead and write.
- * If it's >1, then skip the buffer and assume that it will be
- * written when it's returned to the cache.
+ * If we can't find the buffer we're done, somebody else had
+ * to have written it.
+ *
+ * If the buffer isn't pinned or dirty, we're done, there's
+ * no work needed.
*/
- if (bharray[i]->ref > 1) {
- incomplete = 1;
- --bharray[i++]->ref;
+ if (bhp == NULL || (bhp->ref == 0 && !F_ISSET(bhp, BH_DIRTY))) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ --remaining;
+ bharray[i].track_hp = NULL;
continue;
}
- /* Write the buffer. */
- ret = __memp_pgwrite(dbmp, dbmfp, bharray[i], NULL, &wrote);
+ /*
+ * If the buffer is locked by another thread, ignore it, we'll
+ * come back to it.
+ *
+ * If the buffer is pinned and it's only the first or second
+ * time we have looked at it, ignore it, we'll come back to
+ * it.
+ *
+ * In either case, skip the buffer if we're not required to
+ * write it.
+ */
+ if (F_ISSET(bhp, BH_LOCKED) || (bhp->ref != 0 && pass < 2)) {
+ MUTEX_UNLOCK(dbenv, mutexp);
+ if (op != DB_SYNC_CACHE && op != DB_SYNC_FILE) {
+ --remaining;
+ bharray[i].track_hp = NULL;
+ }
+ continue;
+ }
+
+ /*
+ * The buffer is either pinned or dirty.
+ *
+ * Set the sync wait-for count, used to count down outstanding
+ * references to this buffer as they are returned to the cache.
+ */
+ bhp->ref_sync = bhp->ref;
- /* Release the buffer. */
- --bharray[i++]->ref;
+ /* Pin the buffer into memory and lock it. */
+ ++bhp->ref;
+ F_SET(bhp, BH_LOCKED);
+ MUTEX_LOCK(dbenv, &bhp->mutex);
- if (ret == 0) {
- if (!wrote)
- incomplete = 1;
- continue;
+ /*
+ * Unlock the hash bucket and wait for the wait-for count to
+ * go to 0. No new thread can acquire the buffer because we
+ * have it locked.
+ *
+ * If a thread attempts to re-pin a page, the wait-for count
+ * will never go to 0 (the thread spins on our buffer lock,
+ * while we spin on the thread's ref count). Give up if we
+ * don't get the buffer in 3 seconds, we can try again later.
+ *
+ * If, when the wait-for count goes to 0, the buffer is found
+ * to be dirty, write it.
+ */
+ MUTEX_UNLOCK(dbenv, mutexp);
+ for (wait_cnt = 1;
+ bhp->ref_sync != 0 && wait_cnt < 4; ++wait_cnt)
+ __os_sleep(dbenv, 1, 0);
+ MUTEX_LOCK(dbenv, mutexp);
+ hb_lock = 1;
+
+ /*
+ * If the ref_sync count has gone to 0, we're going to be done
+ * with this buffer no matter what happens.
+ */
+ if (bhp->ref_sync == 0) {
+ --remaining;
+ bharray[i].track_hp = NULL;
}
/*
- * On error:
+ * If the ref_sync count has gone to 0 and the buffer is still
+ * dirty, we write it. We only try to write the buffer once.
+ * Any process checkpointing or trickle-flushing the pool
+ * must be able to write any underlying file -- if the write
+ * fails, error out. It would be very strange if file sync
+ * failed to write, but we don't care if it happens.
+ */
+ if (bhp->ref_sync == 0 && F_ISSET(bhp, BH_DIRTY)) {
+ hb_lock = 0;
+ MUTEX_UNLOCK(dbenv, mutexp);
+
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
+ if ((ret = __memp_bhwrite(dbmp, hp, mfp, bhp, 1)) == 0)
+ ++wrote;
+ else if (op == DB_SYNC_CACHE || op == DB_SYNC_TRICKLE)
+ __db_err(dbenv, "%s: unable to flush page: %lu",
+ __memp_fns(dbmp, mfp), (u_long)bhp->pgno);
+ else
+ ret = 0;
+ }
+
+ /*
+ * If ref_sync count never went to 0, the buffer was written
+ * by another thread, or the write failed, we still have the
+ * buffer locked.
+ *
+ * We may or may not currently hold the hash bucket mutex. If
+ * the __memp_bhwrite -> __memp_pgwrite call was successful,
+ * then __memp_pgwrite will have swapped the buffer lock for
+ * the hash lock. All other call paths will leave us without
+ * the hash bucket lock.
*
- * Release any buffers we're still pinning down.
+ * The order of mutexes above was to acquire the buffer lock
+ * while holding the hash bucket lock. Don't deadlock here,
+ * release the buffer lock and then acquire the hash bucket
+ * lock.
*/
- while (i < ar_cnt)
- --bharray[i++]->ref;
- break;
- }
+ if (F_ISSET(bhp, BH_LOCKED)) {
+ F_CLR(bhp, BH_LOCKED);
+ MUTEX_UNLOCK(dbenv, &bhp->mutex);
- /*
- * If there were too many buffers and we're not returning an error, we
- * re-try the flush once -- since we allocated 80% of the total
- * buffer count, once should be enough. If it still doesn't work, some
- * other thread of control is dirtying buffers as fast as we're writing
- * them, and we might as well give up.
- */
- if (retry_need) {
- if (retry_done)
- incomplete = 1;
- else {
- retry_done = 1;
- goto retry;
+ if (!hb_lock)
+ MUTEX_LOCK(dbenv, mutexp);
}
- }
-done: R_UNLOCK(dbenv, dbmp->reginfo);
+ /*
+ * Reset the ref_sync count regardless of our success, we're
+ * done with this buffer for now.
+ */
+ bhp->ref_sync = 0;
+
+ /* Discard our reference and unlock the bucket. */
+ --bhp->ref;
+ MUTEX_UNLOCK(dbenv, mutexp);
- __os_free(bharray, ndirty * sizeof(BH *));
+ if (ret != 0)
+ break;
+ }
+
+done: /* If we've opened files to flush pages, close them. */
+ if ((t_ret = __memp_close_flush_files(dbenv, dbmp)) != 0 && ret == 0)
+ ret = t_ret;
/*
- * Sync the underlying file as the last thing we do, so that the OS
- * has a maximal opportunity to flush buffers before we request it.
- *
- * !!!:
- * Don't lock the region around the sync, fsync(2) has no atomicity
- * issues.
+ * If doing a checkpoint or flushing a file for the application, we
+ * have to force the pages to disk. We don't do this as we go along
+ * because we want to give the OS as much time as possible to lazily
+ * flush, and because we have to flush files that might not even have
+ * had dirty buffers in the cache, so we have to walk the files list.
*/
- if (ret == 0)
- ret = incomplete ?
- DB_INCOMPLETE : __os_fsync(dbenv, &dbmfp->fh);
+ if (ret == 0 && (op == DB_SYNC_CACHE || op == DB_SYNC_FILE)) {
+ if (dbmfp == NULL)
+ ret = __memp_sync_files(dbenv, dbmp);
+ else
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ }
+
+err: __os_free(dbenv, bharray);
+ if (wrotep != NULL)
+ *wrotep = wrote;
return (ret);
}
/*
- * __memp_sballoc --
- * Allocate room for a list of buffers.
+ * __memp_sync_files --
+ * Sync all the files in the environment, open or not.
*/
-static int
-__memp_sballoc(dbenv, bharrayp, ndirtyp)
+static
+int __memp_sync_files(dbenv, dbmp)
DB_ENV *dbenv;
- BH ***bharrayp;
- u_int32_t *ndirtyp;
-{
DB_MPOOL *dbmp;
- MPOOL *c_mp, *mp;
- u_int32_t i, nclean, ndirty, maxpin;
- int ret;
+{
+ DB_MPOOLFILE *dbmfp;
+ MPOOL *mp;
+ MPOOLFILE *mfp;
+ int ret, t_ret;
- dbmp = dbenv->mp_handle;
+ ret = 0;
mp = dbmp->reginfo[0].primary;
- /*
- * We don't want to hold the region lock while we write the buffers,
- * so only lock it while we create a list.
- *
- * Walk through the list of caches, figuring out how many buffers
- * we're going to need.
- *
- * Make a point of not holding the region lock across the library
- * allocation call.
- */
- for (nclean = ndirty = 0, i = 0; i < mp->nreg; ++i) {
- c_mp = dbmp->reginfo[i].primary;
- ndirty += c_mp->stat.st_page_dirty;
- nclean += c_mp->stat.st_page_clean;
+ R_LOCK(dbenv, dbmp->reginfo);
+ for (mfp = SH_TAILQ_FIRST(&mp->mpfq, __mpoolfile);
+ mfp != NULL; mfp = SH_TAILQ_NEXT(mfp, q, __mpoolfile)) {
+ if (mfp->stat.st_page_out == 0 ||
+ F_ISSET(mfp, MP_DEADFILE | MP_TEMP))
+ continue;
+
+ /* Look for an already open handle. */
+ ret = 0;
+ MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (dbmfp->mfp == mfp) {
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ break;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if (ret != 0)
+ goto err;
+
+ /* If we don't find one, open one. */
+ if (dbmfp == NULL) {
+ if ((ret = dbenv->memp_fcreate(dbenv, &dbmfp, 0)) != 0)
+ goto err;
+ ret = __memp_fopen_int(
+ dbmfp, mfp, R_ADDR(dbmp->reginfo, mfp->path_off),
+ 0, 0, mfp->stat.st_pagesize);
+ if (ret == 0)
+ ret = __os_fsync(dbenv, dbmfp->fhp);
+ if ((t_ret =
+ __memp_fclose_int(dbmfp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+ }
}
- R_UNLOCK(dbenv, dbmp->reginfo);
- if (ndirty == 0) {
- *ndirtyp = 0;
- return (0);
+
+ if (0) {
+err: __db_err(dbenv, "%s: cannot sync: %s",
+ R_ADDR(dbmp->reginfo, mfp->path_off), db_strerror(ret));
}
+ R_UNLOCK(dbenv, dbmp->reginfo);
- /*
- * We don't want to pin down the entire buffer cache, otherwise we'll
- * starve threads needing new pages. Don't pin down more than 80% of
- * the cache, making sure that we don't screw up just because only a
- * few pages have been created.
- */
- maxpin = ((ndirty + nclean) * 8) / 10;
- if (maxpin < 10)
- maxpin = 10;
+ return (ret);
+}
+
+/*
+ * __memp_close_flush_files --
+ * Close files opened only to flush buffers.
+ */
+static int
+__memp_close_flush_files(dbenv, dbmp)
+ DB_ENV *dbenv;
+ DB_MPOOL *dbmp;
+{
+ DB_MPOOLFILE *dbmfp;
+ int ret;
/*
- * Get a good-sized block of memory to hold buffer pointers, we don't
- * want to run out, but correct if we want to allocate more than we
- * would be allowed to store, regardless.
+ * The routine exists because we must close files opened by sync to
+ * flush buffers. There are two cases: first, extent files have to
+ * be closed so they may be removed when empty. Second, regular
+ * files have to be closed so we don't run out of descriptors (for
+ * example, and application partitioning its data into databases
+ * based on timestamps, so there's a continually increasing set of
+ * files).
+ *
+ * We mark files opened in the __memp_bhwrite() function with the
+ * MP_FLUSH flag. Here we walk through our file descriptor list,
+ * and, if a file was opened by __memp_bhwrite(), we close it.
*/
- ndirty += ndirty / 2 + 10;
- if (ndirty > maxpin)
- ndirty = maxpin;
- if ((ret =
- __os_malloc(dbenv, ndirty * sizeof(BH *), NULL, bharrayp)) != 0)
- return (ret);
-
- *ndirtyp = ndirty;
-
- R_LOCK(dbenv, dbmp->reginfo);
+retry: MUTEX_THREAD_LOCK(dbenv, dbmp->mutexp);
+ for (dbmfp = TAILQ_FIRST(&dbmp->dbmfq);
+ dbmfp != NULL; dbmfp = TAILQ_NEXT(dbmfp, q))
+ if (F_ISSET(dbmfp, MP_FLUSH)) {
+ F_CLR(dbmfp, MP_FLUSH);
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
+ if ((ret = __memp_fclose_int(dbmfp, 0)) != 0)
+ return (ret);
+ goto retry;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, dbmp->mutexp);
return (0);
}
@@ -634,15 +603,15 @@ static int
__bhcmp(p1, p2)
const void *p1, *p2;
{
- BH *bhp1, *bhp2;
+ BH_TRACK *bhp1, *bhp2;
- bhp1 = *(BH * const *)p1;
- bhp2 = *(BH * const *)p2;
+ bhp1 = (BH_TRACK *)p1;
+ bhp2 = (BH_TRACK *)p2;
/* Sort by file (shared memory pool offset). */
- if (bhp1->mf_offset < bhp2->mf_offset)
+ if (bhp1->track_off < bhp2->track_off)
return (-1);
- if (bhp1->mf_offset > bhp2->mf_offset)
+ if (bhp1->track_off > bhp2->track_off)
return (1);
/*
@@ -650,9 +619,9 @@ __bhcmp(p1, p2)
* Defend against badly written quicksort code calling the comparison
* function with two identical pointers (e.g., WATCOM C++ (Power++)).
*/
- if (bhp1->pgno < bhp2->pgno)
+ if (bhp1->track_pgno < bhp2->track_pgno)
return (-1);
- if (bhp1->pgno > bhp2->pgno)
+ if (bhp1->track_pgno > bhp2->track_pgno)
return (1);
return (0);
}
diff --git a/bdb/mp/mp_trickle.c b/bdb/mp/mp_trickle.c
index f937805cf40..71077ab60cc 100644
--- a/bdb/mp/mp_trickle.c
+++ b/bdb/mp/mp_trickle.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mp_trickle.c,v 11.12 2000/11/30 00:58:41 ubell Exp $";
+static const char revid[] = "$Id: mp_trickle.c,v 11.24 2002/08/06 06:13:53 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,42 +16,29 @@ static const char revid[] = "$Id: mp_trickle.c,v 11.12 2000/11/30 00:58:41 ubell
#include <stdlib.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "mp.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
-
-static int __memp_trick __P((DB_ENV *, int, int, int *));
+#include "dbinc/db_shash.h"
+#include "dbinc/mp.h"
/*
- * memp_trickle --
+ * __memp_trickle --
* Keep a specified percentage of the buffers clean.
+ *
+ * PUBLIC: int __memp_trickle __P((DB_ENV *, int, int *));
*/
int
-memp_trickle(dbenv, pct, nwrotep)
+__memp_trickle(dbenv, pct, nwrotep)
DB_ENV *dbenv;
int pct, *nwrotep;
{
DB_MPOOL *dbmp;
- MPOOL *mp;
- u_int32_t i;
- int ret;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_memp_trickle(dbenv, pct, nwrotep));
-#endif
+ MPOOL *c_mp, *mp;
+ u_int32_t clean, dirty, i, total, dtmp;
+ int ret, wrote;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->mp_handle, DB_INIT_MPOOL);
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->mp_handle, "memp_trickle", DB_INIT_MPOOL);
dbmp = dbenv->mp_handle;
mp = dbmp->reginfo[0].primary;
@@ -62,88 +49,35 @@ memp_trickle(dbenv, pct, nwrotep)
if (pct < 1 || pct > 100)
return (EINVAL);
- R_LOCK(dbenv, dbmp->reginfo);
-
- /* Loop through the caches... */
- for (ret = 0, i = 0; i < mp->nreg; ++i)
- if ((ret = __memp_trick(dbenv, i, pct, nwrotep)) != 0)
- break;
-
- R_UNLOCK(dbenv, dbmp->reginfo);
- return (ret);
-}
-
-/*
- * __memp_trick --
- * Trickle a single cache.
- */
-static int
-__memp_trick(dbenv, ncache, pct, nwrotep)
- DB_ENV *dbenv;
- int ncache, pct, *nwrotep;
-{
- BH *bhp;
- DB_MPOOL *dbmp;
- MPOOL *c_mp;
- MPOOLFILE *mfp;
- db_pgno_t pgno;
- u_long total;
- int ret, wrote;
-
- dbmp = dbenv->mp_handle;
- c_mp = dbmp->reginfo[ncache].primary;
-
/*
- * If there are sufficient clean buffers, or no buffers or no dirty
+ * If there are sufficient clean buffers, no buffers or no dirty
* buffers, we're done.
*
* XXX
- * Using st_page_clean and st_page_dirty is our only choice at the
- * moment, but it's not as correct as we might like in the presence
- * of pools with more than one buffer size, as a free 512-byte buffer
- * isn't the same as a free 8K buffer.
+ * Using hash_page_dirty is our only choice at the moment, but it's not
+ * as correct as we might like in the presence of pools having more
+ * than one page size, as a free 512B buffer isn't the same as a free
+ * 8KB buffer.
+ *
+ * Loop through the caches counting total/dirty buffers.
*/
-loop: total = c_mp->stat.st_page_clean + c_mp->stat.st_page_dirty;
- if (total == 0 || c_mp->stat.st_page_dirty == 0 ||
- (c_mp->stat.st_page_clean * 100) / total >= (u_long)pct)
- return (0);
-
- /* Loop until we write a buffer. */
- for (bhp = SH_TAILQ_FIRST(&c_mp->bhq, __bh);
- bhp != NULL; bhp = SH_TAILQ_NEXT(bhp, q, __bh)) {
- if (bhp->ref != 0 ||
- !F_ISSET(bhp, BH_DIRTY) || F_ISSET(bhp, BH_LOCKED))
- continue;
-
- mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
-
- /*
- * We can't write to temporary files -- see the comment in
- * mp_bh.c:__memp_bhwrite().
- */
- if (F_ISSET(mfp, MP_TEMP))
- continue;
+ for (ret = 0, i = dirty = total = 0; i < mp->nreg; ++i) {
+ c_mp = dbmp->reginfo[i].primary;
+ total += c_mp->stat.st_pages;
+ __memp_stat_hash(&dbmp->reginfo[i], c_mp, &dtmp);
+ dirty += dtmp;
+ }
- pgno = bhp->pgno;
- if ((ret = __memp_bhwrite(dbmp, mfp, bhp, NULL, &wrote)) != 0)
- return (ret);
+ clean = total - dirty;
+ if (clean == total || (clean * 100) / total >= (u_long)pct)
+ return (0);
- /*
- * Any process syncing the shared memory buffer pool had better
- * be able to write to any underlying file. Be understanding,
- * but firm, on this point.
- */
- if (!wrote) {
- __db_err(dbenv, "%s: unable to flush page: %lu",
- __memp_fns(dbmp, mfp), (u_long)pgno);
- return (EPERM);
- }
+ if (nwrotep == NULL)
+ nwrotep = &wrote;
+ ret = __memp_sync_int(dbenv, NULL,
+ ((total * pct) / 100) - clean, DB_SYNC_TRICKLE, nwrotep);
- ++c_mp->stat.st_page_trickle;
- if (nwrotep != NULL)
- ++*nwrotep;
- goto loop;
- }
+ mp->stat.st_page_trickle += *nwrotep;
- return (0);
+ return (ret);
}
diff --git a/bdb/mutex/mut_fcntl.c b/bdb/mutex/mut_fcntl.c
index 02f4d4044f8..2fdf9eff7ef 100644
--- a/bdb/mutex/mut_fcntl.c
+++ b/bdb/mutex/mut_fcntl.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mut_fcntl.c,v 11.11 2001/01/11 18:19:53 bostic Exp $";
+static const char revid[] = "$Id: mut_fcntl.c,v 11.21 2002/05/31 19:37:45 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -26,15 +26,26 @@ static const char revid[] = "$Id: mut_fcntl.c,v 11.11 2001/01/11 18:19:53 bostic
* __db_fcntl_mutex_init --
* Initialize a DB mutex structure.
*
- * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ * PUBLIC: int __db_fcntl_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
*/
int
__db_fcntl_mutex_init(dbenv, mutexp, offset)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
u_int32_t offset;
{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
/*
* This is where we decide to ignore locks we don't need to set -- if
@@ -46,7 +57,7 @@ __db_fcntl_mutex_init(dbenv, mutexp, offset)
}
mutexp->off = offset;
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
mutexp->reg_off = INVALID_ROFF;
#endif
F_SET(mutexp, MUTEX_INITED);
@@ -58,18 +69,17 @@ __db_fcntl_mutex_init(dbenv, mutexp, offset)
* __db_fcntl_mutex_lock
* Lock on a mutex, blocking if necessary.
*
- * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, MUTEX *, DB_FH *));
+ * PUBLIC: int __db_fcntl_mutex_lock __P((DB_ENV *, DB_MUTEX *));
*/
int
-__db_fcntl_mutex_lock(dbenv, mutexp, fhp)
+__db_fcntl_mutex_lock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
- DB_FH *fhp;
+ DB_MUTEX *mutexp;
{
struct flock k_lock;
int locked, ms, waited;
- if (!dbenv->db_mutexlocks)
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
return (0);
/* Initialize the lock. */
@@ -91,18 +101,18 @@ __db_fcntl_mutex_lock(dbenv, mutexp, fhp)
/* Acquire an exclusive kernel lock. */
k_lock.l_type = F_WRLCK;
- if (fcntl(fhp->fd, F_SETLKW, &k_lock))
+ if (fcntl(dbenv->lockfhp->fd, F_SETLKW, &k_lock))
return (__os_get_errno());
/* If the resource is still available, it's ours. */
if (mutexp->pid == 0) {
locked = 1;
- mutexp->pid = (u_int32_t)getpid();
+ __os_id(&mutexp->pid);
}
/* Release the kernel lock. */
k_lock.l_type = F_UNLCK;
- if (fcntl(fhp->fd, F_SETLK, &k_lock))
+ if (fcntl(dbenv->lockfhp->fd, F_SETLK, &k_lock))
return (__os_get_errno());
/*
@@ -129,14 +139,14 @@ __db_fcntl_mutex_lock(dbenv, mutexp, fhp)
* __db_fcntl_mutex_unlock --
* Release a lock.
*
- * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, MUTEX *));
+ * PUBLIC: int __db_fcntl_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
*/
int
__db_fcntl_mutex_unlock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
- if (!dbenv->db_mutexlocks)
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING))
return (0);
#ifdef DIAGNOSTIC
@@ -160,13 +170,13 @@ __db_fcntl_mutex_unlock(dbenv, mutexp)
/*
* __db_fcntl_mutex_destroy --
- * Destroy a MUTEX.
+ * Destroy a DB_MUTEX.
*
- * PUBLIC: int __db_fcntl_mutex_destroy __P((MUTEX *));
+ * PUBLIC: int __db_fcntl_mutex_destroy __P((DB_MUTEX *));
*/
int
__db_fcntl_mutex_destroy(mutexp)
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
COMPQUIET(mutexp, NULL);
diff --git a/bdb/mutex/mut_pthread.c b/bdb/mutex/mut_pthread.c
index 3de4abcefc5..4a55ce0ca03 100644
--- a/bdb/mutex/mut_pthread.c
+++ b/bdb/mutex/mut_pthread.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mut_pthread.c,v 11.33 2001/01/09 00:56:16 ubell Exp $";
+static const char revid[] = "$Id: mut_pthread.c,v 11.53 2002/08/13 19:56:47 sue Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -36,7 +36,11 @@ static const char revid[] = "$Id: mut_pthread.c,v 11.33 2001/01/09 00:56:16 ubel
#define pthread_mutex_lock _lwp_mutex_lock
#define pthread_mutex_trylock _lwp_mutex_trylock
#define pthread_mutex_unlock _lwp_mutex_unlock
-#define pthread_self _lwp_self
+/*
+ * _lwp_self returns the LWP process ID which isn't a unique per-thread
+ * identifier. Use pthread_self instead, it appears to work even if we
+ * are not a pthreads application.
+ */
#define pthread_mutex_destroy(x) 0
#endif
#ifdef HAVE_MUTEX_UI_THREADS
@@ -53,20 +57,31 @@ static const char revid[] = "$Id: mut_pthread.c,v 11.33 2001/01/09 00:56:16 ubel
/*
* __db_pthread_mutex_init --
- * Initialize a MUTEX.
+ * Initialize a DB_MUTEX.
*
- * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ * PUBLIC: int __db_pthread_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
*/
int
__db_pthread_mutex_init(dbenv, mutexp, flags)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
u_int32_t flags;
{
+ u_int32_t save;
int ret;
ret = 0;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
/*
* If this is a thread lock or the process has told us that there are
@@ -81,7 +96,6 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
F_SET(mutexp, MUTEX_IGNORE);
return (0);
}
- F_SET(mutexp, MUTEX_THREAD);
}
#ifdef HAVE_MUTEX_PTHREADS
@@ -89,18 +103,13 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
pthread_condattr_t condattr, *condattrp = NULL;
pthread_mutexattr_t mutexattr, *mutexattrp = NULL;
- if (!F_ISSET(mutexp, MUTEX_THREAD)) {
- ret = pthread_condattr_init(&condattr);
- if (ret == 0)
- ret = pthread_condattr_setpshared(
- &condattr, PTHREAD_PROCESS_SHARED);
- condattrp = &condattr;
-
- if (ret == 0)
- ret = pthread_mutexattr_init(&mutexattr);
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_mutexattr_init(&mutexattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
if (ret == 0)
ret = pthread_mutexattr_setpshared(
&mutexattr, PTHREAD_PROCESS_SHARED);
+#endif
mutexattrp = &mutexattr;
}
@@ -108,14 +117,27 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
ret = pthread_mutex_init(&mutexp->mutex, mutexattrp);
if (mutexattrp != NULL)
pthread_mutexattr_destroy(mutexattrp);
- if (LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
+ if (!LF_ISSET(MUTEX_THREAD)) {
+ ret = pthread_condattr_init(&condattr);
+#ifndef HAVE_MUTEX_THREAD_ONLY
+ if (ret == 0) {
+ condattrp = &condattr;
+ ret = pthread_condattr_setpshared(
+ &condattr, PTHREAD_PROCESS_SHARED);
+ }
+#endif
+ }
+
if (ret == 0)
ret = pthread_cond_init(&mutexp->cond, condattrp);
F_SET(mutexp, MUTEX_SELF_BLOCK);
if (condattrp != NULL)
- pthread_condattr_destroy(condattrp);
- }}
+ (void)pthread_condattr_destroy(condattrp);
+ }
+
+ }
#endif
#ifdef HAVE_MUTEX_SOLARIS_LWP
/*
@@ -126,7 +148,7 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
* initialization values doesn't have surrounding braces. There's not
* much we can do.
*/
- if (F_ISSET(mutexp, MUTEX_THREAD)) {
+ if (LF_ISSET(MUTEX_THREAD)) {
static lwp_mutex_t mi = DEFAULTMUTEX;
mutexp->mutex = mi;
@@ -136,7 +158,7 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
mutexp->mutex = mi;
}
if (LF_ISSET(MUTEX_SELF_BLOCK)) {
- if (F_ISSET(mutexp, MUTEX_THREAD)) {
+ if (LF_ISSET(MUTEX_THREAD)) {
static lwp_cond_t ci = DEFAULTCV;
mutexp->cond = ci;
@@ -152,7 +174,7 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
{
int type;
- type = F_ISSET(mutexp, MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
+ type = LF_ISSET(MUTEX_THREAD) ? USYNC_THREAD : USYNC_PROCESS;
ret = mutex_init(&mutexp->mutex, type, NULL);
if (ret == 0 && LF_ISSET(MUTEX_SELF_BLOCK)) {
@@ -162,12 +184,15 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
}}
#endif
- mutexp->spins = __os_spin();
-#ifdef MUTEX_SYSTEM_RESOURCES
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
mutexp->reg_off = INVALID_ROFF;
#endif
if (ret == 0)
F_SET(mutexp, MUTEX_INITED);
+ else
+ __db_err(dbenv,
+ "unable to initialize mutex: %s", strerror(ret));
return (ret);
}
@@ -176,17 +201,17 @@ __db_pthread_mutex_init(dbenv, mutexp, flags)
* __db_pthread_mutex_lock
* Lock on a mutex, logically blocking if necessary.
*
- * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_lock __P((DB_ENV *, DB_MUTEX *));
*/
int
__db_pthread_mutex_lock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
u_int32_t nspins;
int i, ret, waited;
- if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
/* Attempt to acquire the resource for N spins. */
@@ -195,7 +220,7 @@ __db_pthread_mutex_lock(dbenv, mutexp)
break;
if (nspins == 0 && (ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
- return (ret);
+ goto err;
if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
for (waited = 0; mutexp->locked != 0; waited = 1) {
@@ -210,8 +235,14 @@ __db_pthread_mutex_lock(dbenv, mutexp)
* call, and Solaris delivers the signal to the wrong
* LWP.
*/
- if (ret != 0 && ret != ETIME && ret != ETIMEDOUT)
+ if (ret != 0 && ret != EINTR &&
+#ifdef ETIME
+ ret != ETIME &&
+#endif
+ ret != ETIMEDOUT) {
+ (void)pthread_mutex_unlock(&mutexp->mutex);
return (ret);
+ }
}
if (waited)
@@ -238,11 +269,14 @@ __db_pthread_mutex_lock(dbenv, mutexp)
ret = pthread_mutex_unlock(&mutexp->mutex);
} while (ret == EFAULT && --i > 0);
if (ret != 0)
- return (ret);
+ goto err;
} else {
if (nspins == mutexp->spins)
++mutexp->mutex_set_nowait;
- else
+ else if (nspins > 0) {
+ ++mutexp->mutex_set_spin;
+ mutexp->mutex_set_spins += mutexp->spins - nspins;
+ } else
++mutexp->mutex_set_wait;
#ifdef DIAGNOSTIC
if (mutexp->locked) {
@@ -257,22 +291,25 @@ __db_pthread_mutex_lock(dbenv, mutexp)
#endif
}
return (0);
+
+err: __db_err(dbenv, "unable to lock mutex: %s", strerror(ret));
+ return (ret);
}
/*
* __db_pthread_mutex_unlock --
* Release a lock.
*
- * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
*/
int
__db_pthread_mutex_unlock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
int i, ret;
- if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
#ifdef DIAGNOSTIC
@@ -282,47 +319,43 @@ __db_pthread_mutex_unlock(dbenv, mutexp)
if (F_ISSET(mutexp, MUTEX_SELF_BLOCK)) {
if ((ret = pthread_mutex_lock(&mutexp->mutex)) != 0)
- return (ret);
+ goto err;
mutexp->locked = 0;
if ((ret = pthread_cond_signal(&mutexp->cond)) != 0)
return (ret);
- /* See comment above; workaround for [#2471]. */
- i = PTHREAD_UNLOCK_ATTEMPTS;
- do {
- ret = pthread_mutex_unlock(&mutexp->mutex);
- } while (ret == EFAULT && --i > 0);
- if (ret != 0)
- return (ret);
- } else {
+ } else
mutexp->locked = 0;
- /* See comment above; workaround for [#2471]. */
- i = PTHREAD_UNLOCK_ATTEMPTS;
- do {
- ret = pthread_mutex_unlock(&mutexp->mutex);
- } while (ret == EFAULT && --i > 0);
- if (ret != 0)
- return (ret);
- }
+ /* See comment above; workaround for [#2471]. */
+ i = PTHREAD_UNLOCK_ATTEMPTS;
+ do {
+ ret = pthread_mutex_unlock(&mutexp->mutex);
+ } while (ret == EFAULT && --i > 0);
+ return (ret);
- return (0);
+err: __db_err(dbenv, "unable to unlock mutex: %s", strerror(ret));
+ return (ret);
}
/*
* __db_pthread_mutex_destroy --
- * Destroy a MUTEX.
+ * Destroy a DB_MUTEX.
*
- * PUBLIC: int __db_pthread_mutex_destroy __P((MUTEX *));
+ * PUBLIC: int __db_pthread_mutex_destroy __P((DB_MUTEX *));
*/
int
__db_pthread_mutex_destroy(mutexp)
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
+ int ret;
+
if (F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
- return (pthread_mutex_destroy(&mutexp->mutex));
+ if ((ret = pthread_mutex_destroy(&mutexp->mutex)) != 0)
+ __db_err(NULL, "unable to destroy mutex: %s", strerror(ret));
+ return (ret);
}
diff --git a/bdb/mutex/mut_tas.c b/bdb/mutex/mut_tas.c
index 4b0db4bdf05..c24e09473ca 100644
--- a/bdb/mutex/mut_tas.c
+++ b/bdb/mutex/mut_tas.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mut_tas.c,v 11.18 2000/11/30 00:58:41 ubell Exp $";
+static const char revid[] = "$Id: mut_tas.c,v 11.32 2002/05/07 18:42:21 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -25,32 +25,33 @@ static const char revid[] = "$Id: mut_tas.c,v 11.18 2000/11/30 00:58:41 ubell Ex
#define LOAD_ACTUAL_MUTEX_CODE
#include "db_int.h"
-#ifdef DIAGNOSTIC
-#undef MSG1
-#define MSG1 "mutex_lock: ERROR: lock currently in use: pid: %lu.\n"
-#undef MSG2
-#define MSG2 "mutex_unlock: ERROR: lock already unlocked\n"
-#ifndef STDERR_FILENO
-#define STDERR_FILENO 2
-#endif
-#endif
-
/*
* __db_tas_mutex_init --
- * Initialize a MUTEX.
+ * Initialize a DB_MUTEX.
*
- * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t));
+ * PUBLIC: int __db_tas_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
*/
int
__db_tas_mutex_init(dbenv, mutexp, flags)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
u_int32_t flags;
{
+ u_int32_t save;
+
/* Check alignment. */
DB_ASSERT(((db_alignp_t)mutexp & (MUTEX_ALIGN - 1)) == 0);
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
/*
* If this is a thread lock or the process has told us that there are
@@ -65,15 +66,14 @@ __db_tas_mutex_init(dbenv, mutexp, flags)
F_SET(mutexp, MUTEX_IGNORE);
return (0);
}
- F_SET(mutexp, MUTEX_THREAD);
}
/* Initialize the lock. */
if (MUTEX_INIT(&mutexp->tas))
return (__os_get_errno());
- mutexp->spins = __os_spin();
-#ifdef MUTEX_SYSTEM_RESOURCES
+ mutexp->spins = __os_spin(dbenv);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
mutexp->reg_off = INVALID_ROFF;
#endif
F_SET(mutexp, MUTEX_INITED);
@@ -85,17 +85,17 @@ __db_tas_mutex_init(dbenv, mutexp, flags)
* __db_tas_mutex_lock
* Lock on a mutex, logically blocking if necessary.
*
- * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, MUTEX *));
+ * PUBLIC: int __db_tas_mutex_lock __P((DB_ENV *, DB_MUTEX *));
*/
int
__db_tas_mutex_lock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
u_long ms;
int nspins;
- if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
ms = 1;
@@ -119,7 +119,7 @@ relock:
* happened to initialize or use one of them.)
*/
if (mutexp->locked != 0) {
- mutexp->locked = (u_int32_t)getpid();
+ __os_id(&mutexp->locked);
goto relock;
}
/*
@@ -129,15 +129,13 @@ relock:
*/
#endif
#ifdef DIAGNOSTIC
- if (mutexp->locked != 0) {
- char msgbuf[128];
- (void)snprintf(msgbuf,
- sizeof(msgbuf), MSG1, (u_long)mutexp->locked);
- (void)write(STDERR_FILENO, msgbuf, strlen(msgbuf));
- }
+ if (mutexp->locked != 0)
+ __db_err(dbenv,
+ "__db_tas_mutex_lock: ERROR: lock currently in use: ID: %lu",
+ (u_long)mutexp->locked);
#endif
#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
- mutexp->locked = (u_int32_t)getpid();
+ __os_id(&mutexp->locked);
#endif
if (ms == 1)
++mutexp->mutex_set_nowait;
@@ -158,19 +156,20 @@ relock:
* __db_tas_mutex_unlock --
* Release a lock.
*
- * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, MUTEX *));
+ * PUBLIC: int __db_tas_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
*/
int
__db_tas_mutex_unlock(dbenv, mutexp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
- if (!dbenv->db_mutexlocks || F_ISSET(mutexp, MUTEX_IGNORE))
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
#ifdef DIAGNOSTIC
if (!mutexp->locked)
- (void)write(STDERR_FILENO, MSG2, sizeof(MSG2) - 1);
+ __db_err(dbenv,
+ "__db_tas_mutex_unlock: ERROR: lock already unlocked");
#endif
#if defined(DIAGNOSTIC) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
mutexp->locked = 0;
@@ -183,13 +182,13 @@ __db_tas_mutex_unlock(dbenv, mutexp)
/*
* __db_tas_mutex_destroy --
- * Destroy a MUTEX.
+ * Destroy a DB_MUTEX.
*
- * PUBLIC: int __db_tas_mutex_destroy __P((MUTEX *));
+ * PUBLIC: int __db_tas_mutex_destroy __P((DB_MUTEX *));
*/
int
__db_tas_mutex_destroy(mutexp)
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
if (F_ISSET(mutexp, MUTEX_IGNORE))
return (0);
diff --git a/bdb/mutex/mut_win32.c b/bdb/mutex/mut_win32.c
new file mode 100644
index 00000000000..49eb20a6ecf
--- /dev/null
+++ b/bdb/mutex/mut_win32.c
@@ -0,0 +1,257 @@
+/*
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: mut_win32.c,v 1.8 2002/09/10 02:37:25 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+/*
+ * This is where we load in the actual test-and-set mutex code.
+ */
+#define LOAD_ACTUAL_MUTEX_CODE
+#include "db_int.h"
+
+/* We don't want to run this code even in "ordinary" diagnostic mode. */
+#undef MUTEX_DIAG
+
+#define GET_HANDLE(mutexp, event) do { \
+ char idbuf[13]; \
+ \
+ if (F_ISSET(mutexp, MUTEX_THREAD)) { \
+ event = mutexp->event; \
+ return (0); \
+ } \
+ \
+ snprintf(idbuf, sizeof idbuf, "db.m%08x", mutexp->id); \
+ event = CreateEvent(NULL, FALSE, FALSE, idbuf); \
+ if (event == NULL) \
+ return (__os_win32_errno()); \
+} while (0)
+
+#define RELEASE_HANDLE(mutexp, event) \
+ if (!F_ISSET(mutexp, MUTEX_THREAD) && event != NULL) { \
+ CloseHandle(event); \
+ event = NULL; \
+ }
+
+/*
+ * __db_win32_mutex_init --
+ * Initialize a DB_MUTEX.
+ *
+ * PUBLIC: int __db_win32_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t));
+ */
+int
+__db_win32_mutex_init(dbenv, mutexp, flags)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+ u_int32_t flags;
+{
+ u_int32_t save;
+
+ /*
+ * The only setting/checking of the MUTEX_MPOOL flags is in the mutex
+ * mutex allocation code (__db_mutex_alloc/free). Preserve only that
+ * flag. This is safe because even if this flag was never explicitly
+ * set, but happened to be set in memory, it will never be checked or
+ * acted upon.
+ */
+ save = F_ISSET(mutexp, MUTEX_MPOOL);
+ memset(mutexp, 0, sizeof(*mutexp));
+ F_SET(mutexp, save);
+
+ /*
+ * If this is a thread lock or the process has told us that there are
+ * no other processes in the environment, use thread-only locks, they
+ * are faster in some cases.
+ *
+ * This is where we decide to ignore locks we don't need to set -- if
+ * the application isn't threaded, there aren't any threads to block.
+ */
+ if (LF_ISSET(MUTEX_THREAD) || F_ISSET(dbenv, DB_ENV_PRIVATE)) {
+ if (!F_ISSET(dbenv, DB_ENV_THREAD)) {
+ F_SET(mutexp, MUTEX_IGNORE);
+ return (0);
+ }
+ F_SET(mutexp, MUTEX_THREAD);
+ mutexp->event = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (mutexp->event == NULL)
+ return (__os_win32_errno());
+ } else
+ mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp);
+
+ mutexp->spins = __os_spin(dbenv);
+ F_SET(mutexp, MUTEX_INITED);
+
+ return (0);
+}
+
+/*
+ * __db_win32_mutex_lock
+ * Lock on a mutex, logically blocking if necessary.
+ *
+ * PUBLIC: int __db_win32_mutex_lock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_lock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ HANDLE event;
+ int ret, ms, nspins;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+ event = NULL;
+ ms = 50;
+ ret = 0;
+
+loop: /* Attempt to acquire the resource for N spins. */
+ for (nspins = mutexp->spins; nspins > 0; --nspins) {
+ if (!MUTEX_SET(&mutexp->tas))
+ continue;
+
+#ifdef DIAGNOSTIC
+ if (mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_lock: mutex double-locked!");
+
+ __os_id(&mutexp->locked);
+#endif
+
+ if (event == NULL)
+ ++mutexp->mutex_set_nowait;
+ else {
+ ++mutexp->mutex_set_wait;
+ RELEASE_HANDLE(mutexp, event);
+ InterlockedDecrement(&mutexp->nwaiters);
+#ifdef MUTEX_DIAG
+ if (ret != WAIT_OBJECT_0) {
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Lost signal on mutex %p, "
+ "id %d, ms %d\n",
+ now.QuadPart, mutexp, mutexp->id, ms);
+ }
+#endif
+ }
+
+ return (0);
+ }
+
+ /*
+ * Yield the processor; wait 50 ms initially, up to 1 second. This
+ * loop is needed to work around a race where the signal from the
+ * unlocking thread gets lost. We start at 50 ms because it's unlikely
+ * to happen often and we want to avoid wasting CPU.
+ */
+ if (event == NULL) {
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Waiting on mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ InterlockedIncrement(&mutexp->nwaiters);
+ GET_HANDLE(mutexp, event);
+ }
+ if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED)
+ return (__os_win32_errno());
+ if ((ms <<= 1) > MS_PER_SEC)
+ ms = MS_PER_SEC;
+
+ goto loop;
+}
+
+/*
+ * __db_win32_mutex_unlock --
+ * Release a lock.
+ *
+ * PUBLIC: int __db_win32_mutex_unlock __P((DB_ENV *, DB_MUTEX *));
+ */
+int
+__db_win32_mutex_unlock(dbenv, mutexp)
+ DB_ENV *dbenv;
+ DB_MUTEX *mutexp;
+{
+ int ret;
+ HANDLE event;
+#ifdef MUTEX_DIAG
+ LARGE_INTEGER now;
+#endif
+
+ if (F_ISSET(dbenv, DB_ENV_NOLOCKING) || F_ISSET(mutexp, MUTEX_IGNORE))
+ return (0);
+
+#ifdef DIAGNOSTIC
+ if (!mutexp->tas || !mutexp->locked)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: lock already unlocked");
+
+ mutexp->locked = 0;
+#endif
+ MUTEX_UNSET(&mutexp->tas);
+
+ ret = 0;
+
+ if (mutexp->nwaiters > 0) {
+ GET_HANDLE(mutexp, event);
+
+#ifdef MUTEX_DIAG
+ QueryPerformanceCounter(&now);
+ printf("[%I64d]: Signalling mutex %p, id %d\n",
+ now.QuadPart, mutexp, mutexp->id);
+#endif
+ if (!PulseEvent(event))
+ ret = __os_win32_errno();
+
+ RELEASE_HANDLE(mutexp, event);
+ }
+
+#ifdef DIAGNOSTIC
+ if (ret != 0)
+ __db_err(dbenv,
+ "__db_win32_mutex_unlock: ERROR: unlock failed");
+#endif
+
+ return (ret);
+}
+
+/*
+ * __db_win32_mutex_destroy --
+ * Destroy a DB_MUTEX.
+ *
+ * PUBLIC: int __db_win32_mutex_destroy __P((DB_MUTEX *));
+ */
+int
+__db_win32_mutex_destroy(mutexp)
+ DB_MUTEX *mutexp;
+{
+ int ret;
+
+ if (F_ISSET(mutexp, MUTEX_IGNORE) || !F_ISSET(mutexp, MUTEX_THREAD))
+ return (0);
+
+ ret = 0;
+ if (mutexp->event != NULL) {
+ if (!CloseHandle(mutexp->event))
+ ret = __os_win32_errno();
+ mutexp->event = NULL;
+ }
+
+ return (ret);
+}
diff --git a/bdb/mutex/mutex.c b/bdb/mutex/mutex.c
index acc4af9bfcc..5418764a889 100644
--- a/bdb/mutex/mutex.c
+++ b/bdb/mutex/mutex.c
@@ -1,58 +1,152 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: mutex.c,v 11.14 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: mutex.c,v 11.37 2002/05/31 19:37:46 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <string.h>
#endif
#include "db_int.h"
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/mp.h"
+#include "dbinc/txn.h"
+#endif
+
+static int __db_mutex_alloc_int __P((DB_ENV *, REGINFO *, DB_MUTEX **));
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+static REGMAINT * __db_mutex_maint __P((DB_ENV *, REGINFO *));
+#endif
+
/*
- * __db_mutex_alloc --
- * Allocate and initialize a mutex.
+ * __db_mutex_setup --
+ * External interface to allocate, and/or initialize, record
+ * mutexes.
*
- * PUBLIC: int __db_mutex_alloc __P((DB_ENV *, REGINFO *, MUTEX **));
+ * PUBLIC: int __db_mutex_setup __P((DB_ENV *, REGINFO *, void *, u_int32_t));
*/
int
-__db_mutex_alloc(dbenv, infop, storep)
+__db_mutex_setup(dbenv, infop, ptr, flags)
DB_ENV *dbenv;
REGINFO *infop;
- MUTEX **storep;
+ void *ptr;
+ u_int32_t flags;
{
+ DB_MUTEX *mutex;
+ REGMAINT *maint;
+ u_int32_t iflags, offset;
int ret;
+ ret = 0;
/*
- * If the architecture supports mutexes in heap memory, use that
- * memory. If it doesn't, we have to allocate space in a region.
- *
- * XXX
- * There's a nasty starvation issue here for applications running
- * on systems that don't support mutexes in heap memory. If the
- * normal state of the entire region is dirty (e.g., mpool), then
- * we can run out of memory to allocate for mutexes when new files
- * are opened in the pool. We're not trying to fix this for now,
- * because the only known system where we can see this failure at
- * the moment is HP-UX 10.XX.
+ * If they indicated the region is not locked, then lock it.
+ * This is only needed when we have unusual mutex resources.
+ * (I.e. MUTEX_NO_MALLOC_LOCKS or HAVE_MUTEX_SYSTEM_RESOURCES)
*/
-#ifdef MUTEX_NO_MALLOC_LOCKS
- R_LOCK(dbenv, infop);
- ret = __db_shalloc(infop->addr, sizeof(MUTEX), MUTEX_ALIGN, storep);
- R_UNLOCK(dbenv, infop);
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_LOCK(dbenv, infop);
+#endif
+ /*
+ * Allocate the mutex if they asked us to.
+ */
+ mutex = NULL;
+ if (LF_ISSET(MUTEX_ALLOC)) {
+ if ((ret = __db_mutex_alloc_int(dbenv, infop, ptr)) != 0)
+ goto err;
+ mutex = *(DB_MUTEX **)ptr;
+ } else
+ mutex = (DB_MUTEX *)ptr;
+
+ /*
+ * Set up to initialize the mutex.
+ */
+ iflags = LF_ISSET(MUTEX_THREAD | MUTEX_SELF_BLOCK);
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_LOCK;
+ break;
+ case REGION_TYPE_MPOOL:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_MPOOL;
+ break;
+ default:
+ offset = P_TO_UINT32(mutex) + DB_FCNTL_OFF_GEN;
+ break;
+ }
+ maint = NULL;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (!LF_ISSET(MUTEX_NO_RECORD))
+ maint = (REGMAINT *)__db_mutex_maint(dbenv, infop);
+#endif
+
+ ret = __db_mutex_init(dbenv, mutex, offset, iflags, infop, maint);
+err:
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ if (!LF_ISSET(MUTEX_NO_RLOCK))
+ R_UNLOCK(dbenv, infop);
+#endif
+ /*
+ * If we allocated the mutex but had an error on init'ing,
+ * then we must free it before returning.
+ * !!!
+ * Free must be done after releasing region lock.
+ */
+ if (ret != 0 && LF_ISSET(MUTEX_ALLOC) && mutex != NULL) {
+ __db_mutex_free(dbenv, infop, mutex);
+ *(DB_MUTEX **)ptr = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __db_mutex_alloc_int --
+ * Allocate and initialize a mutex.
+ */
+static int
+__db_mutex_alloc_int(dbenv, infop, storep)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ DB_MUTEX **storep;
+{
+ int ret;
+
+ /*
+ * If the architecture supports mutexes in heap memory, use heap memory.
+ * If it doesn't, we have to allocate space in a region. If allocation
+ * in the region fails, fallback to allocating from the mpool region,
+ * because it's big, it almost always exists and if it's entirely dirty,
+ * we can free buffers until memory is available.
+ */
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX), MUTEX_ALIGN, storep);
+
+ if (ret == ENOMEM && MPOOL_ON(dbenv)) {
+ DB_MPOOL *dbmp;
+
+ dbmp = dbenv->mp_handle;
+ if ((ret = __memp_alloc(dbmp,
+ dbmp->reginfo, NULL, sizeof(DB_MUTEX), NULL, storep)) == 0)
+ (*storep)->flags = MUTEX_MPOOL;
+ } else
+ (*storep)->flags = 0;
#else
COMPQUIET(dbenv, NULL);
COMPQUIET(infop, NULL);
- ret = __os_calloc(dbenv, 1, sizeof(MUTEX), storep);
+ ret = __os_calloc(dbenv, 1, sizeof(DB_MUTEX), storep);
#endif
if (ret != 0)
__db_err(dbenv, "Unable to allocate memory for mutex");
@@ -63,41 +157,47 @@ __db_mutex_alloc(dbenv, infop, storep)
* __db_mutex_free --
* Free a mutex.
*
- * PUBLIC: void __db_mutex_free __P((DB_ENV *, REGINFO *, MUTEX *));
+ * PUBLIC: void __db_mutex_free __P((DB_ENV *, REGINFO *, DB_MUTEX *));
*/
void
__db_mutex_free(dbenv, infop, mutexp)
DB_ENV *dbenv;
REGINFO *infop;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
{
+#if defined(MUTEX_NO_MALLOC_LOCKS) || defined(HAVE_MUTEX_SYSTEM_RESOURCES)
+ R_LOCK(dbenv, infop);
+#if defined(HAVE_MUTEX_SYSTEM_RESOURCES)
if (F_ISSET(mutexp, MUTEX_INITED))
- __db_mutex_destroy(mutexp);
+ __db_shlocks_clear(mutexp, infop, NULL);
+#endif
+ if (F_ISSET(mutexp, MUTEX_MPOOL)) {
+ DB_MPOOL *dbmp;
-#ifdef MUTEX_NO_MALLOC_LOCKS
- R_LOCK(dbenv, infop);
- __db_shalloc_free(infop->addr, mutexp);
+ dbmp = dbenv->mp_handle;
+ R_LOCK(dbenv, dbmp->reginfo);
+ __db_shalloc_free(dbmp->reginfo[0].addr, mutexp);
+ R_UNLOCK(dbenv, dbmp->reginfo);
+ } else
+ __db_shalloc_free(infop->addr, mutexp);
R_UNLOCK(dbenv, infop);
#else
COMPQUIET(dbenv, NULL);
COMPQUIET(infop, NULL);
- __os_free(mutexp, sizeof(*mutexp));
+ __os_free(dbenv, mutexp);
#endif
}
-#ifdef MUTEX_SYSTEM_RESOURCES
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
/*
* __db_shreg_locks_record --
* Record an entry in the shared locks area.
* Region lock must be held in caller.
- *
- * PUBLIC: int __db_shreg_locks_record __P((DB_ENV *, MUTEX *, REGINFO *,
- * PUBLIC: REGMAINT *));
*/
-int
+static int
__db_shreg_locks_record(dbenv, mutexp, infop, rp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
REGINFO *infop;
REGMAINT *rp;
{
@@ -110,7 +210,7 @@ __db_shreg_locks_record(dbenv, mutexp, infop, rp)
i = (roff_t *)R_ADDR(infop, rp->regmutex_hint) - &rp->regmutexes[0];
if (rp->regmutexes[i] != INVALID_ROFF) {
/*
- * Our hint failed, search for a open slot.
+ * Our hint failed, search for an open slot.
*/
rp->stat.st_hint_miss++;
for (i = 0; i < rp->reglocks; i++)
@@ -140,29 +240,33 @@ __db_shreg_locks_record(dbenv, mutexp, infop, rp)
/*
* __db_shreg_locks_clear --
* Erase an entry in the shared locks area.
- * Region lock must be held in caller.
*
- * PUBLIC: void __db_shreg_locks_clear __P((MUTEX *, REGINFO *, REGMAINT *));
+ * PUBLIC: void __db_shreg_locks_clear __P((DB_MUTEX *, REGINFO *, REGMAINT *));
*/
void
__db_shreg_locks_clear(mutexp, infop, rp)
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
REGINFO *infop;
REGMAINT *rp;
{
+ /*
+ * !!!
+ * Assumes the caller's region lock is held.
+ */
if (!F_ISSET(mutexp, MUTEX_INITED))
return;
/*
- * This function is generally only called on a forcible
- * remove of an environment. We recorded our index in
- * the mutex. Find it and clear it.
+ * This function is generally only called on a forcible remove of an
+ * environment. We recorded our index in the mutex, find and clear it.
*/
DB_ASSERT(mutexp->reg_off != INVALID_ROFF);
DB_ASSERT(*(roff_t *)R_ADDR(infop, mutexp->reg_off) == \
R_OFFSET(infop, mutexp));
*(roff_t *)R_ADDR(infop, mutexp->reg_off) = 0;
- rp->regmutex_hint = mutexp->reg_off;
- rp->stat.st_clears++;
+ if (rp != NULL) {
+ rp->regmutex_hint = mutexp->reg_off;
+ rp->stat.st_clears++;
+ }
mutexp->reg_off = INVALID_ROFF;
__db_mutex_destroy(mutexp);
}
@@ -186,7 +290,7 @@ __db_shreg_locks_destroy(infop, rp)
for (i = 0; i < rp->reglocks; i++)
if (rp->regmutexes[i] != 0) {
rp->stat.st_destroys++;
- __db_mutex_destroy((MUTEX *)R_ADDR(infop,
+ __db_mutex_destroy((DB_MUTEX *)R_ADDR(infop,
rp->regmutexes[i]));
}
}
@@ -195,13 +299,13 @@ __db_shreg_locks_destroy(infop, rp)
* __db_shreg_mutex_init --
* Initialize a shared memory mutex.
*
- * PUBLIC: int __db_shreg_mutex_init __P((DB_ENV *, MUTEX *, u_int32_t,
+ * PUBLIC: int __db_shreg_mutex_init __P((DB_ENV *, DB_MUTEX *, u_int32_t,
* PUBLIC: u_int32_t, REGINFO *, REGMAINT *));
*/
int
__db_shreg_mutex_init(dbenv, mutexp, offset, flags, infop, rp)
DB_ENV *dbenv;
- MUTEX *mutexp;
+ DB_MUTEX *mutexp;
u_int32_t offset;
u_int32_t flags;
REGINFO *infop;
@@ -209,18 +313,23 @@ __db_shreg_mutex_init(dbenv, mutexp, offset, flags, infop, rp)
{
int ret;
- if ((ret = __db_mutex_init(dbenv, mutexp, offset, flags)) != 0)
+ if ((ret = __db_mutex_init_int(dbenv, mutexp, offset, flags)) != 0)
+ return (ret);
+ /*
+ * Some mutexes cannot be recorded, but we want one interface.
+ * So, if we have no REGMAINT, then just return.
+ */
+ if (rp == NULL)
return (ret);
/*
* !!!
- * Since __db_mutex_init is a macro, we may not be
+ * Since __db_mutex_init_int is a macro, we may not be
* using the 'offset' as it is only used for one type
* of mutex. We COMPQUIET it here, after the call above.
*/
COMPQUIET(offset, 0);
+ ret = __db_shreg_locks_record(dbenv, mutexp, infop, rp);
- if (!F_ISSET(mutexp, MUTEX_THREAD))
- ret = __db_shreg_locks_record(dbenv, mutexp, infop, rp);
/*
* If we couldn't record it and we are returning an error,
* we need to destroy the mutex we just created.
@@ -244,10 +353,43 @@ __db_shreg_maintinit(infop, addr, size)
size_t size;
{
REGMAINT *rp;
+ u_int32_t i;
rp = (REGMAINT *)addr;
memset(addr, 0, sizeof(REGMAINT));
rp->reglocks = size / sizeof(roff_t);
rp->regmutex_hint = R_OFFSET(infop, &rp->regmutexes[0]);
+ for (i = 0; i < rp->reglocks; i++)
+ rp->regmutexes[i] = INVALID_ROFF;
+}
+
+static REGMAINT *
+__db_mutex_maint(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ roff_t moff;
+
+ switch (infop->type) {
+ case REGION_TYPE_LOCK:
+ moff = ((DB_LOCKREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_LOG:
+ moff = ((LOG *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_MPOOL:
+ moff = ((MPOOL *)R_ADDR(infop, infop->rp->primary))->maint_off;
+ break;
+ case REGION_TYPE_TXN:
+ moff = ((DB_TXNREGION *)R_ADDR(infop,
+ infop->rp->primary))->maint_off;
+ break;
+ default:
+ __db_err(dbenv,
+ "Attempting to record mutex in a region not set up to do so");
+ return (NULL);
+ }
+ return ((REGMAINT *)R_ADDR(infop, moff));
}
-#endif /* MUTEX_SYSTEM_RESOURCES */
+#endif /* HAVE_MUTEX_SYSTEM_RESOURCES */
diff --git a/bdb/mutex/tm.c b/bdb/mutex/tm.c
new file mode 100644
index 00000000000..4af1b1907a8
--- /dev/null
+++ b/bdb/mutex/tm.c
@@ -0,0 +1,627 @@
+/*
+ * Standalone mutex tester for Berkeley DB mutexes.
+ */
+#include "db_config.h"
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+#include <pthread.h>
+#endif
+
+#include "db_int.h"
+
+void exec_proc();
+void tm_file_init();
+void map_file();
+void run_proc();
+void *run_thread();
+void *run_thread_wake();
+void tm_mutex_destroy();
+void tm_mutex_init();
+void tm_mutex_stats();
+void unmap_file();
+
+#define MUTEX_WAKEME 0x80 /* Wake-me flag. */
+
+DB_ENV dbenv; /* Fake out DB. */
+size_t len; /* Backing file size. */
+int align; /* Mutex alignment in file. */
+int quit; /* End-of-test flag. */
+char *file = "mutex.file"; /* Backing file. */
+
+int maxlocks = 20; /* -l: Backing locks. */
+int nlocks = 10000; /* -n: Locks per processes. */
+int nprocs = 20; /* -p: Processes. */
+int child; /* -s: Slave. */
+int nthreads = 1; /* -t: Threads. */
+int verbose; /* -v: Verbosity. */
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern int optind;
+ extern char *optarg;
+ pid_t pid;
+ int ch, eval, i, status;
+ char *tmpath;
+
+ tmpath = argv[0];
+ while ((ch = getopt(argc, argv, "l:n:p:st:v")) != EOF)
+ switch(ch) {
+ case 'l':
+ maxlocks = atoi(optarg);
+ break;
+ case 'n':
+ nlocks = atoi(optarg);
+ break;
+ case 'p':
+ nprocs = atoi(optarg);
+ break;
+ case 's':
+ child = 1;
+ break;
+ case 't':
+ nthreads = atoi(optarg);
+#if !defined(HAVE_MUTEX_PTHREADS) && !defined(BUILD_PTHREADS_ANYWAY)
+ if (nthreads != 1) {
+ (void)fprintf(stderr,
+ "tm: pthreads not available or not compiled for this platform.\n");
+ return (EXIT_FAILURE);
+ }
+#endif
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ (void)fprintf(stderr,
+ "usage: tm [-v] [-l maxlocks] [-n locks] [-p procs] [-t threads]\n");
+ return (EXIT_FAILURE);
+ }
+ argc -= optind;
+ argv += optind;
+
+ /*
+ * The file layout:
+ * DB_MUTEX[1] per-thread mutex array lock
+ * DB_MUTEX[nthreads] per-thread mutex array
+ * DB_MUTEX[maxlocks] per-lock mutex array
+ * u_long[maxlocks][2] per-lock ID array
+ */
+ align = ALIGN(sizeof(DB_MUTEX) * 2, MUTEX_ALIGN);
+ len =
+ align * (1 + nthreads + maxlocks) + sizeof(u_long) * maxlocks * 2;
+ printf(
+ "mutex alignment %d, structure alignment %d, backing file %lu bytes\n",
+ MUTEX_ALIGN, align, (u_long)len);
+
+ if (child) {
+ run_proc();
+ return (EXIT_SUCCESS);
+ }
+
+ tm_file_init();
+ tm_mutex_init();
+
+ printf(
+ "%d proc, %d threads/proc, %d lock requests from %d locks:\n",
+ nprocs, nthreads, nlocks, maxlocks);
+ for (i = 0; i < nprocs; ++i)
+ switch (fork()) {
+ case -1:
+ perror("fork");
+ return (EXIT_FAILURE);
+ case 0:
+ exec_proc(tmpath);
+ break;
+ default:
+ break;
+ }
+
+ eval = EXIT_SUCCESS;
+ while ((pid = wait(&status)) != (pid_t)-1) {
+ fprintf(stderr,
+ "%lu: exited %d\n", (u_long)pid, WEXITSTATUS(status));
+ if (WEXITSTATUS(status) != 0)
+ eval = EXIT_FAILURE;
+ }
+
+ tm_mutex_stats();
+ tm_mutex_destroy();
+
+ printf("tm: exit status: %s\n",
+ eval == EXIT_SUCCESS ? "success" : "failed!");
+ return (eval);
+}
+
+void
+exec_proc(tmpath)
+ char *tmpath;
+{
+ char *argv[10], **ap, b_l[10], b_n[10], b_t[10];
+
+ ap = &argv[0];
+ *ap++ = "tm";
+ sprintf(b_l, "-l%d", maxlocks);
+ *ap++ = b_l;
+ sprintf(b_n, "-n%d", nlocks);
+ *ap++ = b_n;
+ *ap++ = "-s";
+ sprintf(b_t, "-t%d", nthreads);
+ *ap++ = b_t;
+ if (verbose)
+ *ap++ = "-v";
+
+ *ap = NULL;
+ execvp(tmpath, argv);
+
+ fprintf(stderr, "%s: %s\n", tmpath, strerror(errno));
+ exit(EXIT_FAILURE);
+}
+
+void
+run_proc()
+{
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ pthread_t *kidsp, wakep;
+ int i, status;
+ void *retp;
+#endif
+ __os_sleep(&dbenv, 3, 0); /* Let everyone catch up. */
+
+ srand((u_int)time(NULL) / getpid()); /* Initialize random numbers. */
+
+ if (nthreads == 1) /* Simple case. */
+ exit((int)run_thread((void *)0));
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Spawn off threads. We have nthreads all locking and going to
+ * sleep, and one other thread cycling through and waking them up.
+ */
+ if ((kidsp =
+ (pthread_t *)calloc(sizeof(pthread_t), nthreads)) == NULL) {
+ fprintf(stderr, "tm: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i < nthreads; i++)
+ if ((errno = pthread_create(
+ &kidsp[i], NULL, run_thread, (void *)i)) != 0) {
+ fprintf(stderr, "tm: failed spawning thread %d: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if ((errno = pthread_create(
+ &wakep, NULL, run_thread_wake, (void *)0)) != 0) {
+ fprintf(stderr, "tm: failed spawning wakeup thread: %s\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* Wait for the threads to exit. */
+ status = 0;
+ for (i = 0; i < nthreads; i++) {
+ pthread_join(kidsp[i], &retp);
+ if (retp != NULL) {
+ fprintf(stderr,
+ "tm: thread %d exited with error\n", i);
+ status = EXIT_FAILURE;
+ }
+ }
+ free(kidsp);
+
+ /* Signal wakeup thread to stop. */
+ quit = 1;
+ pthread_join(wakep, &retp);
+ if (retp != NULL) {
+ fprintf(stderr, "tm: wakeup thread exited with error\n");
+ status = EXIT_FAILURE;
+ }
+
+ exit(status);
+#endif
+}
+
+void *
+run_thread(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *lm_addr, *tm_addr, *mp;
+ u_long gid1, gid2, *id_addr;
+ int fd, i, lock, id, nl, remap;
+
+ /* Set local and global per-thread ID. */
+ id = (int)arg;
+ gid1 = (u_long)getpid();
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ gid2 = (u_long)pthread_self();
+#else
+ gid2 = 0;
+#endif
+ printf("\tPID: %lu; TID: %lx; ID: %d\n", gid1, gid2, id);
+
+ nl = nlocks;
+ for (gm_addr = NULL, remap = 0;;) {
+ /* Map in the file as necessary. */
+ if (gm_addr == NULL) {
+ map_file(&gm_addr, &tm_addr, &lm_addr, &id_addr, &fd);
+ remap = (rand() % 100) + 35;
+ }
+
+ /* Select and acquire a data lock. */
+ lock = rand() % maxlocks;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (verbose)
+ printf("%lu/%lx: %03d\n", gid1, gid2, lock);
+
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: never got lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (id_addr[lock * 2] != 0) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx granted lock %d held by %lu/%lx)\n",
+ gid1, gid2,
+ lock, id_addr[lock * 2], id_addr[lock * 2 + 1]);
+ return ((void *)EXIT_FAILURE);
+ }
+ id_addr[lock * 2] = gid1;
+ id_addr[lock * 2 + 1] = gid2;
+
+ /*
+ * Pretend to do some work, periodically checking to see if
+ * we still hold the mutex.
+ */
+ for (i = 0; i < 3; ++i) {
+ __os_sleep(&dbenv, 0, rand() % 3);
+ if (id_addr[lock * 2] != gid1 ||
+ id_addr[lock * 2 + 1] != gid2) {
+ fprintf(stderr,
+ "RACE! (%lu/%lx stole lock %d from %lu/%lx)\n",
+ id_addr[lock * 2],
+ id_addr[lock * 2 + 1], lock, gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+ /*
+ * Test self-blocking and unlocking by other threads/processes:
+ *
+ * acquire the global lock
+ * set our wakeup flag
+ * release the global lock
+ * acquire our per-thread lock
+ *
+ * The wakeup thread will wake us up.
+ */
+ if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "%lu/%lx: global lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ F_SET(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "%lu/%lx: per-thread lock\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+ /* Time passes... */
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ fprintf(stderr, "%lu/%lx: %03d wakeup flag still set\n",
+ gid1, gid2, id);
+ return ((void *)EXIT_FAILURE);
+ }
+#endif
+
+ /* Release the data lock. */
+ id_addr[lock * 2] = id_addr[lock * 2 + 1] = 0;
+ mp = (DB_MUTEX *)((u_int8_t *)lm_addr + lock * align);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "%lu/%lx: wakeup failed\n", gid1, gid2);
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if (--nl % 100 == 0)
+ fprintf(stderr, "%lu/%lx: %d\n", gid1, gid2, nl);
+
+ if (nl == 0 || --remap == 0) {
+ unmap_file((void *)gm_addr, fd);
+ gm_addr = NULL;
+
+ if (nl == 0)
+ break;
+
+ __os_sleep(&dbenv, rand() % 3, 0);
+ }
+ }
+
+ return (NULL);
+}
+
+#if defined(HAVE_MUTEX_PTHREADS) || defined(BUILD_PTHREADS_ANYWAY)
+/*
+ * run_thread_wake --
+ * Thread to wake up other threads that are sleeping.
+ */
+void *
+run_thread_wake(arg)
+ void *arg;
+{
+ DB_MUTEX *gm_addr, *tm_addr, *mp;
+ int fd, id;
+
+ arg = NULL;
+ map_file(&gm_addr, &tm_addr, NULL, NULL, &fd);
+
+ /* Loop, waking up sleepers and periodically sleeping ourselves. */
+ while (!quit) {
+ id = 0;
+
+ /* Acquire the global lock. */
+retry: if (__db_mutex_lock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global lock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+next: mp = (DB_MUTEX *)((u_int8_t *)tm_addr + id * align);
+ if (F_ISSET(mp, MUTEX_WAKEME)) {
+ F_CLR(mp, MUTEX_WAKEME);
+ if (__db_mutex_unlock(&dbenv, mp)) {
+ fprintf(stderr, "wt: wakeup failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+ }
+
+ if (++id < nthreads && id % 3 != 0)
+ goto next;
+
+ if (__db_mutex_unlock(&dbenv, gm_addr)) {
+ fprintf(stderr, "wt: global unlock failed\n");
+ return ((void *)EXIT_FAILURE);
+ }
+
+ __os_sleep(&dbenv, 0, 500);
+
+ if (id < nthreads)
+ goto retry;
+ }
+ return (NULL);
+}
+#endif
+
+/*
+ * tm_file_init --
+ * Initialize the backing file.
+ */
+void
+tm_file_init()
+{
+ int fd;
+
+
+ /* Initialize the backing file. */
+ printf("Create the backing file...\n");
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+ if ((fd = shm_open(file, O_CREAT | O_RDWR | O_TRUNC,
+#else
+ (void)remove(file);
+ if ((fd = open(file, O_CREAT | O_RDWR | O_TRUNC,
+#endif
+
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) == -1) {
+ (void)fprintf(stderr, "%s: open: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (lseek(fd, (off_t)len, SEEK_SET) != len || write(fd, &fd, 1) != 1) {
+ (void)fprintf(stderr,
+ "%s: seek/write: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ (void)close(fd);
+}
+
+/*
+ * tm_mutex_init --
+ * Initialize the mutexes.
+ */
+void
+tm_mutex_init()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Initialize the global mutex...\n");
+ if (__db_mutex_init_int(&dbenv, gm_addr, 0, 0)) {
+ fprintf(stderr,
+ "__db_mutex_init (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Initialize the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_init_int(&dbenv, mp, 0, MUTEX_SELF_BLOCK)) {
+ fprintf(stderr, "__db_mutex_init (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (__db_mutex_lock(&dbenv, mp)) {
+ fprintf(stderr,
+ "__db_mutex_init (per-thread %d) lock: %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Initialize the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_init_int(&dbenv, mp, 0, 0)) {
+ fprintf(stderr, "__db_mutex_init (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * tm_mutex_destroy --
+ * Destroy the mutexes.
+ */
+void
+tm_mutex_destroy()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp, *tm_addr;
+ int fd, i;
+
+ map_file(&gm_addr, &tm_addr, &lm_addr, NULL, &fd);
+
+ printf("Destroy the global mutex...\n");
+ if (__db_mutex_destroy(gm_addr)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (global): %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Destroy the per-thread mutexes...\n");
+ for (i = 1, mp = tm_addr;
+ i <= nthreads; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align)) {
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-thread %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ printf("Destroy the per-lock mutexes...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ if (__db_mutex_destroy(mp)) {
+ fprintf(stderr,
+ "__db_mutex_destroy (per-lock: %d): %s\n",
+ i, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ unmap_file((void *)gm_addr, fd);
+#ifdef HAVE_QNX
+ (void)shm_unlink(file);
+#endif
+}
+
+/*
+ * tm_mutex_stats --
+ * Display mutex statistics.
+ */
+void
+tm_mutex_stats()
+{
+ DB_MUTEX *gm_addr, *lm_addr, *mp;
+ int fd, i;
+
+ map_file(&gm_addr, NULL, &lm_addr, NULL, &fd);
+
+ printf("Per-lock mutex statistics...\n");
+ for (i = 1, mp = lm_addr;
+ i <= maxlocks; ++i, mp = (DB_MUTEX *)((u_int8_t *)mp + align))
+ printf("mutex %2d: wait: %lu; no wait %lu\n", i,
+ (u_long)mp->mutex_set_wait, (u_long)mp->mutex_set_nowait);
+
+ unmap_file((void *)gm_addr, fd);
+}
+
+/*
+ * map_file --
+ * Map in the backing file.
+ */
+void
+map_file(gm_addrp, tm_addrp, lm_addrp, id_addrp, fdp)
+ DB_MUTEX **gm_addrp, **tm_addrp, **lm_addrp;
+ u_long **id_addrp;
+ int *fdp;
+{
+ void *maddr;
+ int fd;
+
+#ifndef MAP_FAILED
+#define MAP_FAILED (void *)-1
+#endif
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+#ifdef HAVE_QNX
+ if ((fd = shm_open(file, O_RDWR, 0)) == -1) {
+#else
+ if ((fd = open(file, O_RDWR, 0)) == -1) {
+#endif
+ fprintf(stderr, "%s: open %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ maddr = mmap(NULL, len,
+ PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, (off_t)0);
+ if (maddr == MAP_FAILED) {
+ fprintf(stderr, "%s: mmap: %s\n", file, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ if (gm_addrp != NULL)
+ *gm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align;
+ if (tm_addrp != NULL)
+ *tm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * nthreads;
+ if (lm_addrp != NULL)
+ *lm_addrp = (DB_MUTEX *)maddr;
+ maddr = (u_int8_t *)maddr + align * maxlocks;
+ if (id_addrp != NULL)
+ *id_addrp = (u_long *)maddr;
+ if (fdp != NULL)
+ *fdp = fd;
+}
+
+/*
+ * unmap_file --
+ * Discard backing file map.
+ */
+void
+unmap_file(maddr, fd)
+ void *maddr;
+ int fd;
+{
+ if (munmap(maddr, len) != 0) {
+ fprintf(stderr, "munmap: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ if (close(fd) != 0) {
+ fprintf(stderr, "close: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+}
diff --git a/bdb/mutex/uts4_cc.s b/bdb/mutex/uts4_cc.s
index ee5f4143bde..9ebc45aad54 100644
--- a/bdb/mutex/uts4_cc.s
+++ b/bdb/mutex/uts4_cc.s
@@ -1,3 +1,9 @@
+ / See the file LICENSE for redistribution information.
+ /
+ / Copyright (c) 1997-2002
+ / Sleepycat Software. All rights reserved.
+ /
+ / $Id: uts4_cc.s,v 11.2 2002/04/25 13:42:14 bostic Exp $
/
/ int uts_lock ( int *p, int i );
/ Update the lock word pointed to by p with the
diff --git a/bdb/os/os_abs.c b/bdb/os/os_abs.c
index 04be9873360..cd7d0a5d2be 100644
--- a/bdb/os/os_abs.c
+++ b/bdb/os/os_abs.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_abs.c,v 11.3 2000/02/14 03:00:04 bostic Exp $";
+static const char revid[] = "$Id: os_abs.c,v 11.5 2002/01/11 15:52:58 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
diff --git a/bdb/os/os_alloc.c b/bdb/os/os_alloc.c
index ee4a0f3c91f..5b38cc7d6f1 100644
--- a/bdb/os/os_alloc.c
+++ b/bdb/os/os_alloc.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_alloc.c,v 11.18 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_alloc.c,v 11.32 2002/08/06 04:57:07 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,10 +19,14 @@ static const char revid[] = "$Id: os_alloc.c,v 11.18 2000/11/30 00:58:42 ubell E
#endif
#include "db_int.h"
-#include "os_jump.h"
#ifdef DIAGNOSTIC
-static void __os_guard __P((void));
+static void __os_guard __P((DB_ENV *));
+
+union __db_alloc {
+ size_t size;
+ double align;
+};
#endif
/*
@@ -37,12 +41,150 @@ static void __os_guard __P((void));
* !!!
* Correct for systems that don't set errno when malloc and friends fail.
*
+ * !!!
+ * There is no circumstance in which we can call __os_umalloc, __os_urealloc
+ * or __os_ufree without an environment handle, as we need one to determine
+ * whether or not to use an application-specified malloc function. If we
+ * don't have an environment handle, we should be calling __os_XXX instead.
+ * Make DIAGNOSTIC blow up if we get this wrong.
+ *
* Out of memory.
* We wish to hold the whole sky,
* But we never will.
*/
/*
+ * __os_umalloc --
+ * A malloc(3) function that will use, in order of preference,
+ * the allocation function specified to the DB handle, the DB_ENV
+ * handle, or __os_malloc.
+ *
+ * PUBLIC: int __os_umalloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_umalloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_malloc == NULL) {
+ if (DB_GLOBAL(j_malloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_malloc)(size);
+ else
+ *(void **)storep = malloc(size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct error return, see __os_malloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "malloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_malloc(size)) == NULL) {
+ __db_err(dbenv, "User-specified malloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_urealloc --
+ * realloc(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_urealloc __P((DB_ENV *, size_t, void *));
+ */
+int
+__os_urealloc(dbenv, size, storep)
+ DB_ENV *dbenv;
+ size_t size;
+ void *storep;
+{
+ int ret;
+ void *ptr;
+
+ ptr = *(void **)storep;
+
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ /* Never allocate 0 bytes -- some C libraries don't like it. */
+ if (size == 0)
+ ++size;
+
+ if (dbenv == NULL || dbenv->db_realloc == NULL) {
+ if (ptr == NULL)
+ return (__os_umalloc(dbenv, size, storep));
+
+ if (DB_GLOBAL(j_realloc) != NULL)
+ *(void **)storep = DB_GLOBAL(j_realloc)(ptr, size);
+ else
+ *(void **)storep = realloc(ptr, size);
+ if (*(void **)storep == NULL) {
+ /*
+ * Correct errno, see __os_realloc.
+ */
+ if ((ret = __os_get_errno()) == 0) {
+ ret = ENOMEM;
+ __os_set_errno(ENOMEM);
+ }
+ __db_err(dbenv,
+ "realloc: %s: %lu", strerror(ret), (u_long)size);
+ return (ret);
+ }
+ return (0);
+ }
+
+ if ((*(void **)storep = dbenv->db_realloc(ptr, size)) == NULL) {
+ __db_err(dbenv,
+ "User-specified realloc function returned NULL");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/*
+ * __os_ufree --
+ * free(3) counterpart to __os_umalloc.
+ *
+ * PUBLIC: int __os_ufree __P((DB_ENV *, void *));
+ */
+int
+__os_ufree(dbenv, ptr)
+ DB_ENV *dbenv;
+ void *ptr;
+{
+ /* Require an environment handle. */
+ DB_ASSERT(dbenv != NULL);
+
+ if (dbenv != NULL && dbenv->db_free != NULL)
+ dbenv->db_free(ptr);
+ else if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
+ else
+ free(ptr);
+
+ return (0);
+}
+
+/*
* __os_strdup --
* The strdup(3) function for DB.
*
@@ -61,7 +203,7 @@ __os_strdup(dbenv, str, storep)
*(void **)storep = NULL;
size = strlen(str) + 1;
- if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
return (ret);
memcpy(p, str, size);
@@ -86,7 +228,7 @@ __os_calloc(dbenv, num, size, storep)
int ret;
size *= num;
- if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
return (ret);
memset(p, 0, size);
@@ -99,13 +241,13 @@ __os_calloc(dbenv, num, size, storep)
* __os_malloc --
* The malloc(3) function for DB.
*
- * PUBLIC: int __os_malloc __P((DB_ENV *, size_t, void *(*)(size_t), void *));
+ * PUBLIC: int __os_malloc __P((DB_ENV *, size_t, void *));
*/
int
-__os_malloc(dbenv, size, db_malloc, storep)
+__os_malloc(dbenv, size, storep)
DB_ENV *dbenv;
size_t size;
- void *(*db_malloc) __P((size_t)), *storep;
+ void *storep;
{
int ret;
void *p;
@@ -115,24 +257,26 @@ __os_malloc(dbenv, size, db_malloc, storep)
/* Never allocate 0 bytes -- some C libraries don't like it. */
if (size == 0)
++size;
+
#ifdef DIAGNOSTIC
- else
- ++size; /* Add room for a guard byte. */
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
#endif
- /* Some C libraries don't correctly set errno when malloc(3) fails. */
- __os_set_errno(0);
- if (db_malloc != NULL)
- p = db_malloc(size);
- else if (__db_jump.j_malloc != NULL)
- p = __db_jump.j_malloc(size);
+ if (DB_GLOBAL(j_malloc) != NULL)
+ p = DB_GLOBAL(j_malloc)(size);
else
p = malloc(size);
if (p == NULL) {
- ret = __os_get_errno();
- if (ret == 0) {
- __os_set_errno(ENOMEM);
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
+ if ((ret = __os_get_errno()) == 0) {
ret = ENOMEM;
+ __os_set_errno(ENOMEM);
}
__db_err(dbenv,
"malloc: %s: %lu", strerror(ret), (u_long)size);
@@ -143,15 +287,12 @@ __os_malloc(dbenv, size, db_malloc, storep)
/*
* Guard bytes: if #DIAGNOSTIC is defined, we allocate an additional
* byte after the memory and set it to a special value that we check
- * for when the memory is free'd. This is fine for structures, but
- * not quite so fine for strings. There are places in DB where memory
- * is allocated sufficient to hold the largest possible string that
- * we'll see, and then only some subset of the memory is used. To
- * support this usage, the __os_freestr() function checks the byte
- * after the string's nul, which may or may not be the last byte in
- * the originally allocated memory.
+ * for when the memory is free'd.
*/
- memset(p, CLEAR_BYTE, size); /* Initialize guard byte. */
+ ((u_int8_t *)p)[size - 1] = CLEAR_BYTE;
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
#endif
*(void **)storep = p;
@@ -162,46 +303,50 @@ __os_malloc(dbenv, size, db_malloc, storep)
* __os_realloc --
* The realloc(3) function for DB.
*
- * PUBLIC: int __os_realloc __P((DB_ENV *,
- * PUBLIC: size_t, void *(*)(void *, size_t), void *));
+ * PUBLIC: int __os_realloc __P((DB_ENV *, size_t, void *));
*/
int
-__os_realloc(dbenv, size, db_realloc, storep)
+__os_realloc(dbenv, size, storep)
DB_ENV *dbenv;
size_t size;
- void *(*db_realloc) __P((void *, size_t)), *storep;
+ void *storep;
{
int ret;
void *p, *ptr;
ptr = *(void **)storep;
- /* If we haven't yet allocated anything yet, simply call malloc. */
- if (ptr == NULL && db_realloc == NULL)
- return (__os_malloc(dbenv, size, NULL, storep));
-
/* Never allocate 0 bytes -- some C libraries don't like it. */
if (size == 0)
++size;
+
+ /* If we haven't yet allocated anything yet, simply call malloc. */
+ if (ptr == NULL)
+ return (__os_malloc(dbenv, size, storep));
+
#ifdef DIAGNOSTIC
- else
- ++size; /* Add room for a guard byte. */
+ /* Add room for size and a guard byte. */
+ size += sizeof(union __db_alloc) + 1;
+
+ /* Back up to the real begining */
+ ptr = &((union __db_alloc *)ptr)[-1];
#endif
/*
- * Some C libraries don't correctly set errno when realloc(3) fails.
- *
* Don't overwrite the original pointer, there are places in DB we
* try to continue after realloc fails.
*/
- __os_set_errno(0);
- if (db_realloc != NULL)
- p = db_realloc(ptr, size);
- else if (__db_jump.j_realloc != NULL)
- p = __db_jump.j_realloc(ptr, size);
+ if (DB_GLOBAL(j_realloc) != NULL)
+ p = DB_GLOBAL(j_realloc)(ptr, size);
else
p = realloc(ptr, size);
if (p == NULL) {
+ /*
+ * Some C libraries don't correctly set errno when malloc(3)
+ * fails. We'd like to 0 out errno before calling malloc,
+ * but it turns out that setting errno is quite expensive on
+ * Windows/NT in an MT environment.
+ */
if ((ret = __os_get_errno()) == 0) {
ret = ENOMEM;
__os_set_errno(ENOMEM);
@@ -212,6 +357,9 @@ __os_realloc(dbenv, size, db_realloc, storep)
}
#ifdef DIAGNOSTIC
((u_int8_t *)p)[size - 1] = CLEAR_BYTE; /* Initialize guard byte. */
+
+ ((union __db_alloc *)p)->size = size;
+ p = &((union __db_alloc *)p)[1];
#endif
*(void **)storep = p;
@@ -223,64 +371,35 @@ __os_realloc(dbenv, size, db_realloc, storep)
* __os_free --
* The free(3) function for DB.
*
- * PUBLIC: void __os_free __P((void *, size_t));
- */
-void
-__os_free(ptr, size)
- void *ptr;
- size_t size;
-{
-#ifdef DIAGNOSTIC
- if (size != 0) {
- /*
- * Check that the guard byte (one past the end of the memory) is
- * still CLEAR_BYTE.
- */
- if (((u_int8_t *)ptr)[size] != CLEAR_BYTE)
- __os_guard();
-
- /* Clear memory. */
- if (size != 0)
- memset(ptr, CLEAR_BYTE, size);
- }
-#else
- COMPQUIET(size, 0);
-#endif
-
- if (__db_jump.j_free != NULL)
- __db_jump.j_free(ptr);
- else
- free(ptr);
-}
-
-/*
- * __os_freestr --
- * The free(3) function for DB, freeing a string.
- *
- * PUBLIC: void __os_freestr __P((void *));
+ * PUBLIC: void __os_free __P((DB_ENV *, void *));
*/
void
-__os_freestr(ptr)
+__os_free(dbenv, ptr)
+ DB_ENV *dbenv;
void *ptr;
{
#ifdef DIAGNOSTIC
- size_t size;
-
- size = strlen(ptr) + 1;
-
+ int size;
/*
* Check that the guard byte (one past the end of the memory) is
* still CLEAR_BYTE.
*/
- if (((u_int8_t *)ptr)[size] != CLEAR_BYTE)
- __os_guard();
+ if (ptr == NULL)
+ return;
+
+ ptr = &((union __db_alloc *)ptr)[-1];
+ size = ((union __db_alloc *)ptr)->size;
+ if (((u_int8_t *)ptr)[size - 1] != CLEAR_BYTE)
+ __os_guard(dbenv);
/* Clear memory. */
- memset(ptr, CLEAR_BYTE, size);
+ if (size != 0)
+ memset(ptr, CLEAR_BYTE, size);
#endif
+ COMPQUIET(dbenv, NULL);
- if (__db_jump.j_free != NULL)
- __db_jump.j_free(ptr);
+ if (DB_GLOBAL(j_free) != NULL)
+ DB_GLOBAL(j_free)(ptr);
else
free(ptr);
}
@@ -291,13 +410,10 @@ __os_freestr(ptr)
* Complain and abort.
*/
static void
-__os_guard()
+__os_guard(dbenv)
+ DB_ENV *dbenv;
{
- /*
- * Eventually, once we push a DB_ENV handle down to these
- * routines, we should use the standard output channels.
- */
- fprintf(stderr, "Guard byte incorrect during free.\n");
+ __db_err(dbenv, "Guard byte incorrect during free");
abort();
/* NOTREACHED */
}
diff --git a/bdb/os/os_clock.c b/bdb/os/os_clock.c
new file mode 100644
index 00000000000..8da02cf6f9c
--- /dev/null
+++ b/bdb/os/os_clock.c
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_clock.c,v 1.9 2002/03/29 20:46:44 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ *
+ * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+#if defined(HAVE_GETTIMEOFDAY)
+ struct timeval tp;
+ int ret;
+
+retry: if (gettimeofday(&tp, NULL) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "gettimeofday: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_usec;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && defined(HAVE_CLOCK_GETTIME)
+ struct timespec tp;
+ int ret;
+
+retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "clock_gettime: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_nsec / 1000;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && !defined(HAVE_CLOCK_GETTIME)
+ time_t now;
+ int ret;
+
+ if (time(&now) == (time_t)-1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "time: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = now;
+ if (usecsp != NULL)
+ *usecsp = 0;
+#endif
+ return (0);
+}
diff --git a/bdb/os/os_config.c b/bdb/os/os_config.c
new file mode 100644
index 00000000000..b64952a8302
--- /dev/null
+++ b/bdb/os/os_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_config.c,v 11.13 2002/01/31 19:54:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /* Most filesystems zero out implicitly created pages. */
+ return (0);
+}
diff --git a/bdb/os/os_dir.c b/bdb/os/os_dir.c
index 50d00a5562f..3f59a23d963 100644
--- a/bdb/os/os_dir.c
+++ b/bdb/os/os_dir.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_dir.c,v 11.8 2000/06/27 17:29:52 sue Exp $";
+static const char revid[] = "$Id: os_dir.c,v 11.14 2002/07/12 18:56:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -34,7 +34,6 @@ static const char revid[] = "$Id: os_dir.c,v 11.8 2000/06/27 17:29:52 sue Exp $"
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_dirlist --
@@ -54,8 +53,8 @@ __os_dirlist(dbenv, dir, namesp, cntp)
int arraysz, cnt, ret;
char **names;
- if (__db_jump.j_dirlist != NULL)
- return (__db_jump.j_dirlist(dir, namesp, cntp));
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
#ifdef HAVE_VXWORKS
if ((dirp = opendir((char *)dir)) == NULL)
@@ -68,7 +67,7 @@ __os_dirlist(dbenv, dir, namesp, cntp)
if (cnt >= arraysz) {
arraysz += 100;
if ((ret = __os_realloc(dbenv,
- arraysz * sizeof(names[0]), NULL, &names)) != 0)
+ arraysz * sizeof(names[0]), &names)) != 0)
goto nomem;
}
if ((ret = __os_strdup(dbenv, dp->d_name, &names[cnt])) != 0)
@@ -81,7 +80,7 @@ __os_dirlist(dbenv, dir, namesp, cntp)
return (0);
nomem: if (names != NULL)
- __os_dirfree(names, cnt);
+ __os_dirfree(dbenv, names, cnt);
if (dirp != NULL)
(void)closedir(dirp);
return (ret);
@@ -91,18 +90,19 @@ nomem: if (names != NULL)
* __os_dirfree --
* Free the list of files.
*
- * PUBLIC: void __os_dirfree __P((char **, int));
+ * PUBLIC: void __os_dirfree __P((DB_ENV *, char **, int));
*/
void
-__os_dirfree(names, cnt)
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
char **names;
int cnt;
{
- if (__db_jump.j_dirfree != NULL)
- __db_jump.j_dirfree(names, cnt);
+ if (DB_GLOBAL(j_dirfree) != NULL)
+ DB_GLOBAL(j_dirfree)(names, cnt);
else {
while (cnt > 0)
- __os_free(names[--cnt], 0);
- __os_free(names, 0);
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
}
}
diff --git a/bdb/os/os_errno.c b/bdb/os/os_errno.c
index f9b60f6354e..4b40f88d177 100644
--- a/bdb/os/os_errno.c
+++ b/bdb/os/os_errno.c
@@ -1,32 +1,52 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_errno.c,v 11.3 2000/02/14 03:00:05 bostic Exp $";
+static const char revid[] = "$Id: os_errno.c,v 11.8 2002/01/11 15:52:59 bostic Exp $";
#endif /* not lint */
-#ifndef NO_SYSTEM_INCLUDES
-#include <errno.h>
-#endif
-
#include "db_int.h"
/*
+ * __os_get_errno_ret_zero --
+ * Return the value of errno, even if it's zero.
+ *
+ * PUBLIC: int __os_get_errno_ret_zero __P((void));
+ */
+int
+__os_get_errno_ret_zero()
+{
+ /* This routine must be able to return the same value repeatedly. */
+ return (errno);
+}
+
+/*
* __os_get_errno --
- * Return the value of errno.
+ * Return the value of errno, or EAGAIN if errno is zero.
*
* PUBLIC: int __os_get_errno __P((void));
*/
int
__os_get_errno()
{
- /* This routine must be able to return the same value repeatedly. */
+ /*
+ * This routine must be able to return the same value repeatedly.
+ *
+ * We've seen cases where system calls failed but errno was never set.
+ * This version of __os_get_errno() sets errno to EAGAIN if it's not
+ * already set, to work around that problem. For obvious reasons, we
+ * can only call this function if we know an error has occurred, that
+ * is, we can't test errno for a non-zero value after this call.
+ */
+ if (errno == 0)
+ __os_set_errno(EAGAIN);
+
return (errno);
}
diff --git a/bdb/os/os_fid.c b/bdb/os/os_fid.c
index f853f6a8dba..125e6f0712c 100644
--- a/bdb/os/os_fid.c
+++ b/bdb/os/os_fid.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_fid.c,v 11.7 2000/10/26 14:17:05 bostic Exp $";
+static const char revid[] = "$Id: os_fid.c,v 11.14 2002/08/26 14:37:38 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -26,6 +26,7 @@ static const char revid[] = "$Id: os_fid.c,v 11.7 2000/10/26 14:17:05 bostic Exp
#endif
#endif
+#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#endif
@@ -37,7 +38,12 @@ static u_int32_t fid_serial = SERIAL_INIT;
/*
* __os_fileid --
- * Return a unique identifier for a file.
+ * Return a unique identifier for a file. The structure
+ * of a fileid is: ino(4) dev(4) time(4) pid(4) extra(4).
+ * For real files, which have a backing inode and device, the first
+ * 16 bytes are filled in and the extra bytes are left 0. For
+ * temporary files, the inode and device fields are left blank and
+ * the extra four bytes are filled in with a random value.
*
* PUBLIC: int __os_fileid __P((DB_ENV *, const char *, int, u_int8_t *));
*/
@@ -58,12 +64,14 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
memset(fidp, 0, DB_FILE_ID_LEN);
/* On POSIX/UNIX, use a dev/inode pair. */
+retry:
#ifdef HAVE_VXWORKS
- if (stat((char *)fname, &sb)) {
+ if (stat((char *)fname, &sb) != 0) {
#else
- if (stat(fname, &sb)) {
+ if (stat(fname, &sb) != 0) {
#endif
- ret = __os_get_errno();
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
__db_err(dbenv, "%s: %s", fname, strerror(ret));
return (ret);
}
@@ -83,7 +91,7 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
* interesting properties in base 2.
*/
if (fid_serial == SERIAL_INIT)
- fid_serial = (u_int32_t)getpid();
+ __os_id(&fid_serial);
else
fid_serial += 100000;
diff --git a/bdb/os/os_finit.c b/bdb/os/os_finit.c
deleted file mode 100644
index 23b606ecb2c..00000000000
--- a/bdb/os/os_finit.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1998, 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: os_finit.c,v 11.8 2000/11/30 00:58:42 ubell Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <string.h>
-#endif
-
-#include "db_int.h"
-
-/*
- * __os_finit --
- * Initialize a regular file, optionally zero-filling it as well.
- *
- * PUBLIC: int __os_finit __P((DB_ENV *, DB_FH *, size_t, int));
- */
-int
-__os_finit(dbenv, fhp, size, zerofill)
- DB_ENV *dbenv;
- DB_FH *fhp;
- size_t size;
- int zerofill;
-{
- db_pgno_t pages;
- size_t i;
- size_t nw;
- u_int32_t relative;
- int ret;
- char buf[OS_VMPAGESIZE];
-
- /* Write nuls to the new bytes. */
- memset(buf, 0, sizeof(buf));
-
- /*
- * Extend the region by writing the last page. If the region is >4Gb,
- * increment may be larger than the maximum possible seek "relative"
- * argument, as it's an unsigned 32-bit value. Break the offset into
- * pages of 1MB each so that we don't overflow (2^20 + 2^32 is bigger
- * than any memory I expect to see for awhile).
- */
- if ((ret = __os_seek(dbenv, fhp, 0, 0, 0, 0, DB_OS_SEEK_END)) != 0)
- return (ret);
- pages = (size - OS_VMPAGESIZE) / MEGABYTE;
- relative = (size - OS_VMPAGESIZE) % MEGABYTE;
- if ((ret = __os_seek(dbenv,
- fhp, MEGABYTE, pages, relative, 0, DB_OS_SEEK_CUR)) != 0)
- return (ret);
- if ((ret = __os_write(dbenv, fhp, buf, sizeof(buf), &nw)) != 0)
- return (ret);
- if (nw != sizeof(buf))
- return (EIO);
-
- /*
- * We may want to guarantee that there is enough disk space for the
- * file, so we also write a byte to each page. We write the byte
- * because reading it is insufficient on systems smart enough not to
- * instantiate disk pages to satisfy a read (e.g., Solaris).
- */
- if (zerofill) {
- pages = size / MEGABYTE;
- relative = size % MEGABYTE;
- if ((ret = __os_seek(dbenv, fhp,
- MEGABYTE, pages, relative, 1, DB_OS_SEEK_END)) != 0)
- return (ret);
-
- /* Write a byte to each page. */
- for (i = 0; i < size; i += OS_VMPAGESIZE) {
- if ((ret = __os_write(dbenv, fhp, buf, 1, &nw)) != 0)
- return (ret);
- if (nw != 1)
- return (EIO);
- if ((ret = __os_seek(dbenv, fhp,
- 0, 0, OS_VMPAGESIZE - 1, 0, DB_OS_SEEK_CUR)) != 0)
- return (ret);
- }
- }
- return (0);
-}
-
-/*
- * __os_fpinit --
- * Initialize a page in a regular file.
- *
- * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
- */
-int
-__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
- DB_ENV *dbenv;
- DB_FH *fhp;
- db_pgno_t pgno;
- int pagecount, pagesize;
-{
- COMPQUIET(dbenv, NULL);
- COMPQUIET(fhp, NULL);
- COMPQUIET(pgno, 0);
- COMPQUIET(pagecount, 0);
- COMPQUIET(pagesize, 0);
-
- return (0);
-}
diff --git a/bdb/os/os_fsync.c b/bdb/os/os_fsync.c
index f5fd5f56abd..46ab4885a16 100644
--- a/bdb/os/os_fsync.c
+++ b/bdb/os/os_fsync.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_fsync.c,v 11.9 2000/04/04 23:29:20 ubell Exp $";
+static const char revid[] = "$Id: os_fsync.c,v 11.14 2002/07/12 18:56:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,6 @@ static const char revid[] = "$Id: os_fsync.c,v 11.9 2000/04/04 23:29:20 ubell Ex
#endif
#include "db_int.h"
-#include "os_jump.h"
#ifdef HAVE_VXWORKS
#include "ioLib.h"
@@ -79,12 +78,12 @@ __os_fsync(dbenv, fhp)
if (F_ISSET(fhp, DB_FH_NOSYNC))
return (0);
- ret = __db_jump.j_fsync != NULL ?
- __db_jump.j_fsync(fhp->fd) : fsync(fhp->fd);
+ do {
+ ret = DB_GLOBAL(j_fsync) != NULL ?
+ DB_GLOBAL(j_fsync)(fhp->fd) : fsync(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
- if (ret != 0) {
- ret = __os_get_errno();
+ if (ret != 0)
__db_err(dbenv, "fsync %s", strerror(ret));
- }
return (ret);
}
diff --git a/bdb/os/os_handle.c b/bdb/os/os_handle.c
index b127c5ff506..19a337f7d22 100644
--- a/bdb/os/os_handle.c
+++ b/bdb/os/os_handle.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_handle.c,v 11.19 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_handle.c,v 11.28 2002/07/12 18:56:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,6 @@ static const char revid[] = "$Id: os_handle.c,v 11.19 2000/11/30 00:58:42 ubell
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_openhandle --
@@ -43,29 +42,32 @@ __os_openhandle(dbenv, name, flags, mode, fhp)
memset(fhp, 0, sizeof(*fhp));
/* If the application specified an interface, use it. */
- if (__db_jump.j_open != NULL) {
- if ((fhp->fd = __db_jump.j_open(name, flags, mode)) == -1)
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
return (__os_get_errno());
F_SET(fhp, DB_FH_VALID);
return (0);
}
- for (ret = 0, nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
#ifdef HAVE_VXWORKS
/*
* VxWorks does not support O_CREAT on open, you have to use
* creat() instead. (It does not support O_EXCL or O_TRUNC
* either, even though they are defined "for future support".)
- * If O_EXCL is specified, single thread and try to open the
- * file. If successful, return EEXIST. Otherwise, call creat
- * and then end single threading.
+ * We really want the POSIX behavior that if O_CREAT is set,
+ * we open if it exists, or create it if it doesn't exist.
+ * If O_CREAT is specified, single thread and try to open the
+ * file. If successful, and O_EXCL return EEXIST. If
+ * unsuccessful call creat and then end single threading.
*/
if (LF_ISSET(O_CREAT)) {
DB_BEGIN_SINGLE_THREAD;
newflags = flags & ~(O_CREAT | O_EXCL);
- if (LF_ISSET(O_EXCL)) {
- if ((fhp->fd =
- open(name, newflags, mode)) != -1) {
+ if ((fhp->fd =
+ open(name, newflags, mode)) != -1) {
+ if (LF_ISSET(O_EXCL)) {
/*
* If we get here, we want O_EXCL
* create, and it exists. Close and
@@ -84,8 +86,8 @@ __os_openhandle(dbenv, name, flags, mode, fhp)
* verify we truly got the equivalent of
* ENOENT.
*/
- }
- fhp->fd = creat(name, newflags);
+ } else
+ fhp->fd = creat(name, newflags);
DB_END_SINGLE_THREAD;
} else
@@ -118,6 +120,15 @@ __os_openhandle(dbenv, name, flags, mode, fhp)
(void)__os_sleep(dbenv, nrepeat * 2, 0);
continue;
}
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
} else {
#if defined(HAVE_FCNTL_F_SETFD)
/* Deny file descriptor access to any child process. */
@@ -125,7 +136,7 @@ __os_openhandle(dbenv, name, flags, mode, fhp)
ret = __os_get_errno();
__db_err(dbenv, "fcntl(F_SETFD): %s",
strerror(ret));
- (void)__os_closehandle(fhp);
+ (void)__os_closehandle(dbenv, fhp);
} else
#endif
F_SET(fhp, DB_FH_VALID);
@@ -140,10 +151,11 @@ __os_openhandle(dbenv, name, flags, mode, fhp)
* __os_closehandle --
* Close a file.
*
- * PUBLIC: int __os_closehandle __P((DB_FH *));
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
*/
int
-__os_closehandle(fhp)
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
DB_FH *fhp;
{
int ret;
@@ -151,8 +163,16 @@ __os_closehandle(fhp)
/* Don't close file descriptors that were never opened. */
DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) && fhp->fd != -1);
- ret = __db_jump.j_close != NULL ?
- __db_jump.j_close(fhp->fd) : close(fhp->fd);
+ do {
+ ret = DB_GLOBAL(j_close) != NULL ?
+ DB_GLOBAL(j_close)(fhp->fd) : close(fhp->fd);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+
+ /* Unlink the file if we haven't already done so. */
+ if (F_ISSET(fhp, DB_FH_UNLINK)) {
+ (void)__os_unlink(dbenv, fhp->name);
+ (void)__os_free(dbenv, fhp->name);
+ }
/*
* Smash the POSIX file descriptor -- it's never tested, but we want
@@ -161,5 +181,5 @@ __os_closehandle(fhp)
fhp->fd = -1;
F_CLR(fhp, DB_FH_VALID);
- return (ret == 0 ? 0 : __os_get_errno());
+ return (ret);
}
diff --git a/bdb/os/os_id.c b/bdb/os/os_id.c
new file mode 100644
index 00000000000..c242bb12e23
--- /dev/null
+++ b/bdb/os/os_id.c
@@ -0,0 +1,47 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_id.c,v 1.2 2002/01/11 15:52:59 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_id --
+ * Return a 32-bit value identifying the current thread of control.
+ *
+ * PUBLIC: void __os_id __P((u_int32_t *));
+ */
+void
+__os_id(idp)
+ u_int32_t *idp;
+{
+ /*
+ * By default, use the process ID.
+ *
+ * getpid() returns a pid_t which we convert to a u_int32_t. I have
+ * not yet seen a system where a pid_t has 64-bits, but I'm sure they
+ * exist. Since we're returning only the bottom 32-bits, you cannot
+ * use the return of __os_id to reference a process (for example, you
+ * cannot send a signal to the value returned by __os_id). To send a
+ * signal to the current process, use raise(3) instead.
+ */
+#ifdef HAVE_VXWORKS
+ *idp = taskIdSelf();
+#else
+ *idp = getpid();
+#endif
+}
diff --git a/bdb/os/os_map.c b/bdb/os/os_map.c
index bb96a917d87..6d385b6a84d 100644
--- a/bdb/os/os_map.c
+++ b/bdb/os/os_map.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_map.c,v 11.32 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_map.c,v 11.44 2002/07/12 18:56:51 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -26,9 +26,6 @@ static const char revid[] = "$Id: os_map.c,v 11.32 2000/11/30 00:58:42 ubell Exp
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_ext.h"
-#include "os_jump.h"
#ifdef HAVE_MMAP
static int __os_map __P((DB_ENV *, char *, DB_FH *, size_t, int, int, void **));
@@ -143,12 +140,13 @@ __os_r_sysattach(dbenv, infop, rp)
int ret;
/*
- * Try to open/create the shared region file. We DO NOT need to
- * ensure that multiple threads/processes attempting to
- * simultaneously create the region are properly ordered,
- * our caller has already taken care of that.
+ * Try to open/create the shared region file. We DO NOT need to ensure
+ * that multiple threads/processes attempting to simultaneously create
+ * the region are properly ordered, our caller has already taken care
+ * of that.
*/
- if ((ret = __os_open(dbenv, infop->name, DB_OSO_REGION |
+ if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_REGION | DB_OSO_DIRECT |
(F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE : 0),
infop->mode, &fh)) != 0)
__db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
@@ -161,15 +159,16 @@ __os_r_sysattach(dbenv, infop, rp)
* point, *badly* merged VM/buffer cache systems.
*/
if (ret == 0 && F_ISSET(infop, REGION_CREATE))
- ret = __os_finit(dbenv,
- &fh, rp->size, DB_GLOBAL(db_region_init));
+ ret = __db_fileinit(dbenv,
+ &fh, rp->size, F_ISSET(dbenv, DB_ENV_REGION_INIT) ? 1 : 0);
/* Map the file in. */
if (ret == 0)
ret = __os_map(dbenv,
infop->name, &fh, rp->size, 1, 0, &infop->addr);
- (void)__os_closehandle(&fh);
+ if (F_ISSET(&fh, DB_FH_VALID))
+ (void)__os_closehandle(dbenv, &fh);
return (ret);
}
@@ -295,17 +294,25 @@ __os_unmapfile(dbenv, addr, len)
size_t len;
{
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_unmap != NULL)
- return (__db_jump.j_unmap(addr, len));
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
#ifdef HAVE_MMAP
#ifdef HAVE_MUNLOCK
if (F_ISSET(dbenv, DB_ENV_LOCKDOWN))
- (void)munlock(addr, len);
+ while (munlock(addr, len) != 0 && __os_get_errno() == EINTR)
+ ;
#else
COMPQUIET(dbenv, NULL);
#endif
- return (munmap(addr, len) ? __os_get_errno() : 0);
+ {
+ int ret;
+
+ while ((ret = munmap(addr, len)) != 0 &&
+ __os_get_errno() == EINTR)
+ ;
+ return (ret ? __os_get_errno() : 0);
+ }
#else
COMPQUIET(dbenv, NULL);
@@ -331,8 +338,8 @@ __os_map(dbenv, path, fhp, len, is_region, is_rdonly, addrp)
int flags, prot, ret;
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_map != NULL)
- return (__db_jump.j_map
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)
(path, len, is_region, is_rdonly, addrp));
/*
diff --git a/bdb/os/os_method.c b/bdb/os/os_method.c
index 0e2bd394792..04367654efa 100644
--- a/bdb/os/os_method.c
+++ b/bdb/os/os_method.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_method.c,v 11.6 2000/11/15 19:25:39 sue Exp $";
+static const char revid[] = "$Id: os_method.c,v 11.15 2002/07/12 18:56:51 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,191 +16,219 @@ static const char revid[] = "$Id: os_method.c,v 11.6 2000/11/15 19:25:39 sue Exp
#endif
#include "db_int.h"
-#include "os_jump.h"
-
-struct __db_jumptab __db_jump;
+/*
+ * EXTERN: int db_env_set_func_close __P((int (*)(int)));
+ */
int
db_env_set_func_close(func_close)
int (*func_close) __P((int));
{
- __db_jump.j_close = func_close;
+ DB_GLOBAL(j_close) = func_close;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_dirfree __P((void (*)(char **, int)));
+ */
int
db_env_set_func_dirfree(func_dirfree)
void (*func_dirfree) __P((char **, int));
{
- __db_jump.j_dirfree = func_dirfree;
+ DB_GLOBAL(j_dirfree) = func_dirfree;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_dirlist
+ * EXTERN: __P((int (*)(const char *, char ***, int *)));
+ */
int
db_env_set_func_dirlist(func_dirlist)
int (*func_dirlist) __P((const char *, char ***, int *));
{
- __db_jump.j_dirlist = func_dirlist;
+ DB_GLOBAL(j_dirlist) = func_dirlist;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_exists __P((int (*)(const char *, int *)));
+ */
int
db_env_set_func_exists(func_exists)
int (*func_exists) __P((const char *, int *));
{
- __db_jump.j_exists = func_exists;
+ DB_GLOBAL(j_exists) = func_exists;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_free __P((void (*)(void *)));
+ */
int
db_env_set_func_free(func_free)
void (*func_free) __P((void *));
{
- __db_jump.j_free = func_free;
+ DB_GLOBAL(j_free) = func_free;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_fsync __P((int (*)(int)));
+ */
int
db_env_set_func_fsync(func_fsync)
int (*func_fsync) __P((int));
{
- __db_jump.j_fsync = func_fsync;
+ DB_GLOBAL(j_fsync) = func_fsync;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_ioinfo __P((int (*)(const char *,
+ * EXTERN: int, u_int32_t *, u_int32_t *, u_int32_t *)));
+ */
int
db_env_set_func_ioinfo(func_ioinfo)
int (*func_ioinfo)
__P((const char *, int, u_int32_t *, u_int32_t *, u_int32_t *));
{
- __db_jump.j_ioinfo = func_ioinfo;
+ DB_GLOBAL(j_ioinfo) = func_ioinfo;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_malloc __P((void *(*)(size_t)));
+ */
int
db_env_set_func_malloc(func_malloc)
void *(*func_malloc) __P((size_t));
{
- __db_jump.j_malloc = func_malloc;
+ DB_GLOBAL(j_malloc) = func_malloc;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_map
+ * EXTERN: __P((int (*)(char *, size_t, int, int, void **)));
+ */
int
db_env_set_func_map(func_map)
int (*func_map) __P((char *, size_t, int, int, void **));
{
- __db_jump.j_map = func_map;
+ DB_GLOBAL(j_map) = func_map;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_open __P((int (*)(const char *, int, ...)));
+ */
int
db_env_set_func_open(func_open)
int (*func_open) __P((const char *, int, ...));
{
- __db_jump.j_open = func_open;
+ DB_GLOBAL(j_open) = func_open;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_read __P((ssize_t (*)(int, void *, size_t)));
+ */
int
db_env_set_func_read(func_read)
ssize_t (*func_read) __P((int, void *, size_t));
{
- __db_jump.j_read = func_read;
+ DB_GLOBAL(j_read) = func_read;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_realloc __P((void *(*)(void *, size_t)));
+ */
int
db_env_set_func_realloc(func_realloc)
void *(*func_realloc) __P((void *, size_t));
{
- __db_jump.j_realloc = func_realloc;
+ DB_GLOBAL(j_realloc) = func_realloc;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_rename
+ * EXTERN: __P((int (*)(const char *, const char *)));
+ */
int
db_env_set_func_rename(func_rename)
int (*func_rename) __P((const char *, const char *));
{
- __db_jump.j_rename = func_rename;
+ DB_GLOBAL(j_rename) = func_rename;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_seek
+ * EXTERN: __P((int (*)(int, size_t, db_pgno_t, u_int32_t, int, int)));
+ */
int
db_env_set_func_seek(func_seek)
int (*func_seek) __P((int, size_t, db_pgno_t, u_int32_t, int, int));
{
- __db_jump.j_seek = func_seek;
+ DB_GLOBAL(j_seek) = func_seek;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_sleep __P((int (*)(u_long, u_long)));
+ */
int
db_env_set_func_sleep(func_sleep)
int (*func_sleep) __P((u_long, u_long));
{
- __db_jump.j_sleep = func_sleep;
+ DB_GLOBAL(j_sleep) = func_sleep;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_unlink __P((int (*)(const char *)));
+ */
int
db_env_set_func_unlink(func_unlink)
int (*func_unlink) __P((const char *));
{
- __db_jump.j_unlink = func_unlink;
+ DB_GLOBAL(j_unlink) = func_unlink;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_unmap __P((int (*)(void *, size_t)));
+ */
int
db_env_set_func_unmap(func_unmap)
int (*func_unmap) __P((void *, size_t));
{
- __db_jump.j_unmap = func_unmap;
+ DB_GLOBAL(j_unmap) = func_unmap;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_write
+ * EXTERN: __P((ssize_t (*)(int, const void *, size_t)));
+ */
int
db_env_set_func_write(func_write)
ssize_t (*func_write) __P((int, const void *, size_t));
{
- __db_jump.j_write = func_write;
+ DB_GLOBAL(j_write) = func_write;
return (0);
}
+/*
+ * EXTERN: int db_env_set_func_yield __P((int (*)(void)));
+ */
int
db_env_set_func_yield(func_yield)
int (*func_yield) __P((void));
{
- __db_jump.j_yield = func_yield;
- return (0);
-}
-
-int
-db_env_set_pageyield(onoff)
- int onoff;
-{
- DB_GLOBAL(db_pageyield) = onoff;
- return (0);
-}
-
-int
-db_env_set_panicstate(onoff)
- int onoff;
-{
- DB_GLOBAL(db_panic) = onoff;
- return (0);
-}
-
-int
-db_env_set_region_init(onoff)
- int onoff;
-{
- DB_GLOBAL(db_region_init) = onoff;
- return (0);
-}
-
-int
-db_env_set_tas_spins(tas_spins)
- u_int32_t tas_spins;
-{
- DB_GLOBAL(db_tas_spins) = tas_spins;
+ DB_GLOBAL(j_yield) = func_yield;
return (0);
}
diff --git a/bdb/os/os_oflags.c b/bdb/os/os_oflags.c
index fd413bdacbe..f75178de75e 100644
--- a/bdb/os/os_oflags.c
+++ b/bdb/os/os_oflags.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_oflags.c,v 11.6 2000/10/27 20:32:02 dda Exp $";
+static const char revid[] = "$Id: os_oflags.c,v 11.9 2002/01/11 15:53:00 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -72,23 +72,35 @@ __db_omode(perm)
{
int mode;
-#ifndef S_IRUSR
#ifdef DB_WIN32
+#ifndef S_IRUSR
#define S_IRUSR S_IREAD /* R for owner */
+#endif
+#ifndef S_IWUSR
#define S_IWUSR S_IWRITE /* W for owner */
+#endif
+#ifndef S_IRGRP
#define S_IRGRP 0 /* R for group */
+#endif
+#ifndef S_IWGRP
#define S_IWGRP 0 /* W for group */
+#endif
+#ifndef S_IROTH
#define S_IROTH 0 /* R for other */
+#endif
+#ifndef S_IWOTH
#define S_IWOTH 0 /* W for other */
+#endif
#else
+#ifndef S_IRUSR
#define S_IRUSR 0000400 /* R for owner */
#define S_IWUSR 0000200 /* W for owner */
#define S_IRGRP 0000040 /* R for group */
#define S_IWGRP 0000020 /* W for group */
#define S_IROTH 0000004 /* R for other */
#define S_IWOTH 0000002 /* W for other */
-#endif /* DB_WIN32 */
#endif
+#endif /* DB_WIN32 */
mode = 0;
if (perm[0] == 'r')
mode |= S_IRUSR;
diff --git a/bdb/os/os_open.c b/bdb/os/os_open.c
index cdc75cd737b..0a4dbadc6e8 100644
--- a/bdb/os/os_open.c
+++ b/bdb/os/os_open.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_open.c,v 11.21 2001/01/11 18:19:53 bostic Exp $";
+static const char revid[] = "$Id: os_open.c,v 11.37 2002/06/21 20:35:16 sandstro Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -42,6 +42,15 @@ __os_open(dbenv, name, flags, mode, fhp)
oflags = 0;
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
+
#if defined(O_BINARY)
/*
* If there's a binary-mode open flag, set it, we never want any
@@ -84,6 +93,11 @@ __os_open(dbenv, name, flags, mode, fhp)
if (LF_ISSET(DB_OSO_TRUNC))
oflags |= O_TRUNC;
+#ifdef HAVE_O_DIRECT
+ if (LF_ISSET(DB_OSO_DIRECT))
+ oflags |= O_DIRECT;
+#endif
+
#ifdef HAVE_QNX
if (LF_ISSET(DB_OSO_REGION))
return (__os_region_open(dbenv, name, oflags, mode, fhp));
@@ -92,6 +106,11 @@ __os_open(dbenv, name, flags, mode, fhp)
if ((ret = __os_openhandle(dbenv, name, oflags, mode, fhp)) != 0)
return (ret);
+#ifdef HAVE_DIRECTIO
+ if (LF_ISSET(DB_OSO_DIRECT))
+ (void)directio(fhp->fd, DIRECTIO_ON);
+#endif
+
/*
* Delete any temporary file.
*
@@ -102,8 +121,18 @@ __os_open(dbenv, name, flags, mode, fhp)
* reasonable way to avoid the race (playing signal games isn't worth
* the portability nightmare), so we just live with it.
*/
- if (LF_ISSET(DB_OSO_TEMP))
+ if (LF_ISSET(DB_OSO_TEMP)) {
+#if defined(HAVE_UNLINK_WITH_OPEN_FAILURE) || defined(CONFIG_TEST)
+ if ((ret = __os_strdup(dbenv, name, &fhp->name)) != 0) {
+ (void)__os_closehandle(dbenv, fhp);
+ (void)__os_unlink(dbenv, name);
+ return (ret);
+ }
+ F_SET(fhp, DB_FH_UNLINK);
+#else
(void)__os_unlink(dbenv, name);
+#endif
+ }
return (0);
}
@@ -136,7 +165,7 @@ __os_region_open(dbenv, name, oflags, mode, fhp)
if (fcntl(fhp->fd, F_SETFD, 1) == -1) {
ret = __os_get_errno();
__db_err(dbenv, "fcntl(F_SETFD): %s", strerror(ret));
- __os_closehandle(fhp);
+ __os_closehandle(dbenv, fhp);
} else
#endif
F_SET(fhp, DB_FH_VALID);
@@ -147,7 +176,7 @@ __os_region_open(dbenv, name, oflags, mode, fhp)
*/
err:
if (newname != NULL)
- __os_free(newname, 0);
+ __os_free(dbenv, newname);
return (ret);
}
@@ -155,7 +184,9 @@ err:
* __os_shmname --
* Translate a pathname into a shm_open memory object name.
*
+ * PUBLIC: #ifdef HAVE_QNX
* PUBLIC: int __os_shmname __P((DB_ENV *, const char *, char **));
+ * PUBLIC: #endif
*/
int
__os_shmname(dbenv, name, newnamep)
@@ -206,7 +237,7 @@ __os_shmname(dbenv, name, newnamep)
* If we have a path component, copy and return it.
*/
ret = __os_strdup(dbenv, p, newnamep);
- __os_free(tmpname, 0);
+ __os_free(dbenv, tmpname);
return (ret);
}
@@ -215,11 +246,11 @@ __os_shmname(dbenv, name, newnamep)
* Add a leading slash, and copy the remainder.
*/
size = strlen(tmpname) + 2;
- if ((ret = __os_malloc(dbenv, size, NULL, &p)) != 0)
+ if ((ret = __os_malloc(dbenv, size, &p)) != 0)
return (ret);
p[0] = '/';
memcpy(&p[1], tmpname, size-1);
- __os_free(tmpname, 0);
+ __os_free(dbenv, tmpname);
*newnamep = p;
return (0);
}
diff --git a/bdb/os/os_region.c b/bdb/os/os_region.c
index 1e36fc2cbe0..6529f708b2c 100644
--- a/bdb/os/os_region.c
+++ b/bdb/os/os_region.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_region.c,v 11.9 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_region.c,v 11.15 2002/07/12 18:56:51 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,7 +17,6 @@ static const char revid[] = "$Id: os_region.c,v 11.9 2000/11/30 00:58:42 ubell E
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_r_attach --
@@ -70,7 +69,7 @@ __os_r_attach(dbenv, infop, rp)
}
#endif
if ((ret =
- __os_malloc(dbenv, rp->size, NULL, &infop->addr)) != 0)
+ __os_malloc(dbenv, rp->size, &infop->addr)) != 0)
return (ret);
#if defined(UMRW) && !defined(DIAGNOSTIC)
memset(infop->addr, CLEAR_BYTE, rp->size);
@@ -79,8 +78,8 @@ __os_r_attach(dbenv, infop, rp)
}
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_map != NULL)
- return (__db_jump.j_map(infop->name,
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(infop->name,
rp->size, 1, 0, &infop->addr));
return (__os_r_sysattach(dbenv, infop, rp));
@@ -104,13 +103,13 @@ __os_r_detach(dbenv, infop, destroy)
/* If a region is private, free the memory. */
if (F_ISSET(dbenv, DB_ENV_PRIVATE)) {
- __os_free(infop->addr, rp->size);
+ __os_free(dbenv, infop->addr);
return (0);
}
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_unmap != NULL)
- return (__db_jump.j_unmap(infop->addr, rp->size));
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(infop->addr, rp->size));
return (__os_r_sysdetach(dbenv, infop, destroy));
}
diff --git a/bdb/os/os_rename.c b/bdb/os/os_rename.c
index 8108bba67d9..2569a9c3186 100644
--- a/bdb/os/os_rename.c
+++ b/bdb/os/os_rename.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_rename.c,v 11.6 2000/04/14 16:56:33 ubell Exp $";
+static const char revid[] = "$Id: os_rename.c,v 11.12 2002/07/12 18:56:52 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,28 +19,29 @@ static const char revid[] = "$Id: os_rename.c,v 11.6 2000/04/14 16:56:33 ubell E
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_rename --
- * Rename a file.
+ * Rename a file. If flags is non-zero, then errors are OK and we
+ * should not output an error message.
*
- * PUBLIC: int __os_rename __P((DB_ENV *, const char *, const char *));
+ * PUBLIC: int __os_rename __P((DB_ENV *,
+ * PUBLIC: const char *, const char *, u_int32_t));
*/
int
-__os_rename(dbenv, old, new)
+__os_rename(dbenv, old, new, flags)
DB_ENV *dbenv;
const char *old, *new;
+ u_int32_t flags;
{
int ret;
- ret = __db_jump.j_rename != NULL ?
- __db_jump.j_rename(old, new) : rename(old, new);
-
- if (ret == -1) {
- ret = __os_get_errno();
- __db_err(dbenv, "Rename %s %s: %s", old, new, strerror(ret));
- }
+ do {
+ ret = DB_GLOBAL(j_rename) != NULL ?
+ DB_GLOBAL(j_rename)(old, new) : rename(old, new);
+ } while (ret != 0 && (ret = __os_get_errno()) == EINTR);
+ if (ret != 0 && flags == 0)
+ __db_err(dbenv, "rename %s %s: %s", old, new, strerror(ret));
return (ret);
}
diff --git a/bdb/os/os_root.c b/bdb/os/os_root.c
index 753285c1be6..cd5bfc352e9 100644
--- a/bdb/os/os_root.c
+++ b/bdb/os/os_root.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_root.c,v 11.4 2000/02/14 03:00:05 bostic Exp $";
+static const char revid[] = "$Id: os_root.c,v 11.6 2002/01/11 15:53:01 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
diff --git a/bdb/os/os_rpath.c b/bdb/os/os_rpath.c
index 75d394ef210..b9ccba01bd5 100644
--- a/bdb/os/os_rpath.c
+++ b/bdb/os/os_rpath.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_rpath.c,v 11.5 2000/06/30 13:40:30 sue Exp $";
+static const char revid[] = "$Id: os_rpath.c,v 11.7 2002/01/11 15:53:01 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
diff --git a/bdb/os/os_rw.c b/bdb/os/os_rw.c
index 7e8e1255d6b..9a79342c7b8 100644
--- a/bdb/os/os_rw.c
+++ b/bdb/os/os_rw.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_rw.c,v 11.15 2000/11/15 19:25:39 sue Exp $";
+static const char revid[] = "$Id: os_rw.c,v 11.24 2002/07/12 18:56:52 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,7 +19,6 @@ static const char revid[] = "$Id: os_rw.c,v 11.15 2000/11/15 19:25:39 sue Exp $"
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_io --
@@ -39,13 +38,13 @@ __os_io(dbenv, db_iop, op, niop)
#if defined(HAVE_PREAD) && defined(HAVE_PWRITE)
switch (op) {
case DB_IO_READ:
- if (__db_jump.j_read != NULL)
+ if (DB_GLOBAL(j_read) != NULL)
goto slow;
*niop = pread(db_iop->fhp->fd, db_iop->buf,
db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
break;
case DB_IO_WRITE:
- if (__db_jump.j_write != NULL)
+ if (DB_GLOBAL(j_write) != NULL)
goto slow;
*niop = pwrite(db_iop->fhp->fd, db_iop->buf,
db_iop->bytes, (off_t)db_iop->pgno * db_iop->pagesize);
@@ -98,10 +97,11 @@ __os_read(dbenv, fhp, addr, len, nrp)
for (taddr = addr,
offset = 0; offset < len; taddr += nr, offset += nr) {
- if ((nr = __db_jump.j_read != NULL ?
- __db_jump.j_read(fhp->fd, taddr, len - offset) :
+retry: if ((nr = DB_GLOBAL(j_read) != NULL ?
+ DB_GLOBAL(j_read)(fhp->fd, taddr, len - offset) :
read(fhp->fd, taddr, len - offset)) < 0) {
- ret = __os_get_errno();
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
__db_err(dbenv, "read: 0x%x, %lu: %s", taddr,
(u_long)len-offset, strerror(ret));
return (ret);
@@ -134,10 +134,11 @@ __os_write(dbenv, fhp, addr, len, nwp)
for (taddr = addr,
offset = 0; offset < len; taddr += nw, offset += nw)
- if ((nw = __db_jump.j_write != NULL ?
- __db_jump.j_write(fhp->fd, taddr, len - offset) :
+retry: if ((nw = DB_GLOBAL(j_write) != NULL ?
+ DB_GLOBAL(j_write)(fhp->fd, taddr, len - offset) :
write(fhp->fd, taddr, len - offset)) < 0) {
- ret = __os_get_errno();
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
__db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
(u_long)len-offset, strerror(ret));
return (ret);
diff --git a/bdb/os/os_seek.c b/bdb/os/os_seek.c
index 1c4dc2238e1..5b2aa45d5dd 100644
--- a/bdb/os/os_seek.c
+++ b/bdb/os/os_seek.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_seek.c,v 11.12 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_seek.c,v 11.18 2002/07/12 18:56:52 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,6 @@ static const char revid[] = "$Id: os_seek.c,v 11.12 2000/11/30 00:58:42 ubell Ex
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_seek --
@@ -56,15 +55,17 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
return (EINVAL);
}
- if (__db_jump.j_seek != NULL)
- ret = __db_jump.j_seek(fhp->fd,
+ if (DB_GLOBAL(j_seek) != NULL)
+ ret = DB_GLOBAL(j_seek)(fhp->fd,
pgsize, pageno, relative, isrewind, whence);
else {
offset = (off_t)pgsize * pageno + relative;
if (isrewind)
offset = -offset;
- ret =
- lseek(fhp->fd, offset, whence) == -1 ? __os_get_errno() : 0;
+ do {
+ ret = lseek(fhp->fd, offset, whence) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
}
if (ret != 0)
diff --git a/bdb/os/os_sleep.c b/bdb/os/os_sleep.c
index 137cd73b708..42d496dbae7 100644
--- a/bdb/os/os_sleep.c
+++ b/bdb/os/os_sleep.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_sleep.c,v 11.7 2000/04/07 14:26:36 bostic Exp $";
+static const char revid[] = "$Id: os_sleep.c,v 11.15 2002/07/12 18:56:52 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -21,6 +21,7 @@ static const char revid[] = "$Id: os_sleep.c,v 11.7 2000/04/07 14:26:36 bostic E
#ifdef HAVE_VXWORKS
#include <sys/times.h>
#include <time.h>
+#include <selectLib.h>
#else
#if TIME_WITH_SYS_TIME
#include <sys/time.h>
@@ -39,7 +40,6 @@ static const char revid[] = "$Id: os_sleep.c,v 11.7 2000/04/07 14:26:36 bostic E
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_sleep --
@@ -59,8 +59,8 @@ __os_sleep(dbenv, secs, usecs)
for (; usecs >= 1000000; usecs -= 1000000)
++secs;
- if (__db_jump.j_sleep != NULL)
- return (__db_jump.j_sleep(secs, usecs));
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
/*
* It's important that we yield the processor here so that other
@@ -68,7 +68,10 @@ __os_sleep(dbenv, secs, usecs)
*/
t.tv_sec = secs;
t.tv_usec = usecs;
- ret = select(0, NULL, NULL, NULL, &t) == -1 ? __os_get_errno() : 0;
+ do {
+ ret = select(0, NULL, NULL, NULL, &t) == -1 ?
+ __os_get_errno() : 0;
+ } while (ret == EINTR);
if (ret != 0)
__db_err(dbenv, "select: %s", strerror(ret));
diff --git a/bdb/os/os_spin.c b/bdb/os/os_spin.c
index b0800b98830..fb36977cb44 100644
--- a/bdb/os/os_spin.c
+++ b/bdb/os/os_spin.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_spin.c,v 11.5 2000/03/30 01:46:42 ubell Exp $";
+static const char revid[] = "$Id: os_spin.c,v 11.13 2002/08/07 02:02:07 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -22,9 +22,10 @@ static const char revid[] = "$Id: os_spin.c,v 11.5 2000/03/30 01:46:42 ubell Exp
#endif
#include "db_int.h"
-#include "os_jump.h"
#if defined(HAVE_PSTAT_GETDYNAMIC)
+static int __os_pstat_getdynamic __P((void));
+
/*
* __os_pstat_getdynamic --
* HP/UX.
@@ -40,6 +41,8 @@ __os_pstat_getdynamic()
#endif
#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+static int __os_sysconf __P((void));
+
/*
* __os_sysconf --
* Solaris, Linux.
@@ -47,9 +50,9 @@ __os_pstat_getdynamic()
static int
__os_sysconf()
{
- int nproc;
+ long nproc;
- return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? nproc : 1);
+ return ((nproc = sysconf(_SC_NPROCESSORS_ONLN)) > 1 ? (int)nproc : 1);
}
#endif
@@ -57,10 +60,11 @@ __os_sysconf()
* __os_spin --
* Return the number of default spins before blocking.
*
- * PUBLIC: int __os_spin __P((void));
+ * PUBLIC: int __os_spin __P((DB_ENV *));
*/
int
-__os_spin()
+__os_spin(dbenv)
+ DB_ENV *dbenv;
{
/*
* If the application specified a value or we've already figured it
@@ -71,25 +75,25 @@ __os_spin()
* it can be expensive (e.g., requiring multiple filesystem accesses
* under Debian Linux).
*/
- if (DB_GLOBAL(db_tas_spins) != 0)
- return (DB_GLOBAL(db_tas_spins));
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
- DB_GLOBAL(db_tas_spins) = 1;
+ dbenv->tas_spins = 1;
#if defined(HAVE_PSTAT_GETDYNAMIC)
- DB_GLOBAL(db_tas_spins) = __os_pstat_getdynamic();
+ dbenv->tas_spins = __os_pstat_getdynamic();
#endif
#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
- DB_GLOBAL(db_tas_spins) = __os_sysconf();
+ dbenv->tas_spins = __os_sysconf();
#endif
/*
* Spin 50 times per processor, we have anecdotal evidence that this
* is a reasonable value.
*/
- if (DB_GLOBAL(db_tas_spins) != 1)
- DB_GLOBAL(db_tas_spins) *= 50;
+ if (dbenv->tas_spins != 1)
+ dbenv->tas_spins *= 50;
- return (DB_GLOBAL(db_tas_spins));
+ return (dbenv->tas_spins);
}
/*
@@ -103,7 +107,7 @@ __os_yield(dbenv, usecs)
DB_ENV *dbenv;
u_long usecs;
{
- if (__db_jump.j_yield != NULL && __db_jump.j_yield() == 0)
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
return;
- __os_sleep(dbenv, 0, usecs);
+ (void)__os_sleep(dbenv, 0, usecs);
}
diff --git a/bdb/os/os_stat.c b/bdb/os/os_stat.c
index 1590e8ecd77..c3510e36f5d 100644
--- a/bdb/os/os_stat.c
+++ b/bdb/os/os_stat.c
@@ -1,24 +1,24 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_stat.c,v 11.8 2000/10/27 20:32:02 dda Exp $";
+static const char revid[] = "$Id: os_stat.c,v 11.20 2002/07/12 18:56:53 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <sys/stat.h>
+
#include <string.h>
#endif
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_exists --
@@ -31,20 +31,29 @@ __os_exists(path, isdirp)
const char *path;
int *isdirp;
{
+ int ret;
struct stat sb;
- if (__db_jump.j_exists != NULL)
- return (__db_jump.j_exists(path, isdirp));
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+ do {
+ ret =
#ifdef HAVE_VXWORKS
- if (stat((char *)path, &sb) != 0)
+ stat((char *)path, &sb);
#else
- if (stat(path, &sb) != 0)
+ stat(path, &sb);
#endif
- return (__os_get_errno());
+ if (ret != 0)
+ ret = __os_get_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
#if !defined(S_ISDIR) || defined(STAT_MACROS_BROKEN)
-#ifdef DB_WIN32
+#undef S_ISDIR
+#ifdef _S_IFDIR
#define S_ISDIR(m) (_S_IFDIR & (m))
#else
#define S_ISDIR(m) (((m) & 0170000) == 0040000)
@@ -74,21 +83,23 @@ __os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
int ret;
struct stat sb;
- if (__db_jump.j_ioinfo != NULL)
- return (__db_jump.j_ioinfo(path,
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
fhp->fd, mbytesp, bytesp, iosizep));
+retry:
if (fstat(fhp->fd, &sb) == -1) {
- ret = __os_get_errno();
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
__db_err(dbenv, "fstat: %s", strerror(ret));
return (ret);
}
/* Return the size of the file. */
if (mbytesp != NULL)
- *mbytesp = sb.st_size / MEGABYTE;
+ *mbytesp = (u_int32_t)(sb.st_size / MEGABYTE);
if (bytesp != NULL)
- *bytesp = sb.st_size % MEGABYTE;
+ *bytesp = (u_int32_t)(sb.st_size % MEGABYTE);
/*
* Return the underlying filesystem blocksize, if available.
@@ -97,7 +108,7 @@ __os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
* Check for a 0 size -- the HP MPE/iX architecture has st_blksize,
* but it's always 0.
*/
-#ifdef HAVE_ST_BLKSIZE
+#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
if (iosizep != NULL && (*iosizep = sb.st_blksize) == 0)
*iosizep = DB_DEF_IOSIZE;
#else
diff --git a/bdb/os/os_tmpdir.c b/bdb/os/os_tmpdir.c
index 0dff5c5b7f0..94645af5e71 100644
--- a/bdb/os/os_tmpdir.c
+++ b/bdb/os/os_tmpdir.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_tmpdir.c,v 11.16 2001/01/08 20:42:06 bostic Exp $";
+static const char revid[] = "$Id: os_tmpdir.c,v 11.19 2002/01/11 15:53:02 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -37,6 +37,8 @@ __os_tmpdir(dbenv, flags)
DB_ENV *dbenv;
u_int32_t flags;
{
+ int isdir;
+
/*
* !!!
* Don't change this to:
@@ -96,7 +98,7 @@ __os_tmpdir(dbenv, flags)
#endif
#ifdef DB_WIN32
/* Get the path to the temporary directory. */
- {int isdir, len;
+ {int len;
char *eos, temp[MAXPATHLEN + 1];
if ((len = GetTempPath(sizeof(temp) - 1, temp)) > 2) {
@@ -113,7 +115,7 @@ __os_tmpdir(dbenv, flags)
/* Step through the static list looking for a possibility. */
for (lp = list; *lp != NULL; ++lp)
- if (__os_exists(*lp, NULL) == 0)
+ if (__os_exists(*lp, &isdir) == 0 && isdir != 0)
return (__os_strdup(dbenv, *lp, &dbenv->db_tmp_dir));
return (0);
}
diff --git a/bdb/os/os_unlink.c b/bdb/os/os_unlink.c
index 56c401fe342..28b03afd1aa 100644
--- a/bdb/os/os_unlink.c
+++ b/bdb/os/os_unlink.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_unlink.c,v 11.13 2000/11/30 00:58:42 ubell Exp $";
+static const char revid[] = "$Id: os_unlink.c,v 11.24 2002/07/12 18:56:53 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,7 +19,42 @@ static const char revid[] = "$Id: os_unlink.c,v 11.13 2000/11/30 00:58:42 ubell
#endif
#include "db_int.h"
-#include "os_jump.h"
+
+/*
+ * __os_region_unlink --
+ * Remove a shared memory object file.
+ *
+ * PUBLIC: int __os_region_unlink __P((DB_ENV *, const char *));
+ */
+int
+__os_region_unlink(dbenv, path)
+ DB_ENV *dbenv;
+ const char *path;
+{
+#ifdef HAVE_QNX
+ int ret;
+ char *newname;
+
+ if ((ret = __os_shmname(dbenv, path, &newname)) != 0)
+ goto err;
+
+ if ((ret = shm_unlink(newname)) != 0) {
+ ret = __os_get_errno();
+ if (ret != ENOENT)
+ __db_err(dbenv, "shm_unlink: %s: %s",
+ newname, strerror(ret));
+ }
+err:
+ if (newname != NULL)
+ __os_free(dbenv, newname);
+ return (ret);
+#else
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, path);
+
+ return (__os_unlink(dbenv, path));
+#endif
+}
/*
* __os_unlink --
@@ -34,15 +69,16 @@ __os_unlink(dbenv, path)
{
int ret;
- ret = __db_jump.j_unlink != NULL ?
- __db_jump.j_unlink(path) :
+retry: ret = DB_GLOBAL(j_unlink) != NULL ?
+ DB_GLOBAL(j_unlink)(path) :
#ifdef HAVE_VXWORKS
unlink((char *)path);
#else
unlink(path);
#endif
if (ret == -1) {
- ret = __os_get_errno();
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
/*
* XXX
* We really shouldn't be looking at this value ourselves,
@@ -66,41 +102,8 @@ __os_unlink(dbenv, path)
/* FALLTHROUGH */
#endif
if (ret != ENOENT)
- __db_err(dbenv, "Unlink: %s: %s", path, strerror(ret));
+ __db_err(dbenv, "unlink: %s: %s", path, strerror(ret));
}
return (ret);
}
-
-/*
- * __os_region_unlink --
- * Remove a shared memory object file.
- *
- * PUBLIC: int __os_region_unlink __P((DB_ENV *, const char *));
- */
-int
-__os_region_unlink(dbenv, path)
- DB_ENV *dbenv;
- const char *path;
-{
-#ifdef HAVE_QNX
- int ret;
- char *newname;
-
- if ((ret = __os_shmname(dbenv, path, &newname)) != 0)
- goto err;
-
- if ((ret = shm_unlink(newname)) != 0) {
- ret = __os_get_errno();
- if (ret != ENOENT)
- __db_err(dbenv, "Shm_unlink: %s: %s",
- newname, strerror(ret));
- }
-err:
- if (newname != NULL)
- __os_free(newname, 0);
- return (ret);
-#else
- return (__os_unlink(dbenv, path));
-#endif
-}
diff --git a/bdb/os_vxworks/os_finit.c b/bdb/os_vxworks/os_finit.c
deleted file mode 100644
index 305f52f0996..00000000000
--- a/bdb/os_vxworks/os_finit.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: os_finit.c,v 1.1 2000/06/21 20:05:18 sue Exp $";
-#endif /* not lint */
-
-#include "db_int.h"
-
-/*
- * __os_fpinit --
- * Initialize a page in a regular file.
- *
- * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
- */
-int
-__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
- DB_ENV *dbenv;
- DB_FH *fhp;
- db_pgno_t pgno;
- int pagecount, pagesize;
-{
- size_t nw, totalbytes, curbytes;
- int ret;
- char buf[1024];
-
- /*
- * Some VxWorks FS drivers do not zero-fill pages that were never
- * explicitly written to the file, they give you random garbage,
- * and that breaks DB.
- */
- if ((ret = __os_seek(dbenv,
- fhp, pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- return (ret);
-
- memset(buf, 0, sizeof(buf));
- totalbytes = pagecount * pagesize;
-
- while (totalbytes > 0) {
- if (totalbytes > sizeof(buf))
- curbytes = sizeof(buf);
- else
- curbytes = totalbytes;
- if ((ret = __os_write(dbenv, fhp, buf, curbytes, &nw)) != 0)
- return (ret);
- if (nw != curbytes)
- return (EIO);
- totalbytes -= curbytes;
- }
- return (0);
-}
diff --git a/bdb/os_vxworks/os_abs.c b/bdb/os_vxworks/os_vx_abs.c
index 162a556d3fa..93e9be7269b 100644
--- a/bdb/os_vxworks/os_abs.c
+++ b/bdb/os_vxworks/os_vx_abs.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_abs.c,v 1.4 2000/08/09 19:09:48 sue Exp $";
+static const char revid[] = "$Id: os_vx_abs.c,v 1.7 2002/01/11 15:53:02 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
@@ -32,11 +32,11 @@ __os_abspath(path)
if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
return (0);
/*
- * If the routine used the default device, then we are not
- * an abs path.
+ * If the routine used a device, then ptail points to the
+ * rest and we are an abs path.
*/
- if (ptail == path)
- return (0);
+ if (ptail != path)
+ return (1);
/*
* If the path starts with a '/', then we are an absolute path,
* using the host machine, otherwise we are not.
diff --git a/bdb/os_vxworks/os_vx_config.c b/bdb/os_vxworks/os_vx_config.c
new file mode 100644
index 00000000000..810983b38ff
--- /dev/null
+++ b/bdb/os_vxworks/os_vx_config.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_vx_config.c,v 1.4 2002/01/11 15:53:03 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Some VxWorks FS drivers do not zero-fill pages that were never
+ * explicitly written to the file, they give you random garbage,
+ * and that breaks Berkeley DB.
+ */
+ return (1);
+}
diff --git a/bdb/os_vxworks/os_map.c b/bdb/os_vxworks/os_vx_map.c
index 7397995d1d1..8ad4f0765ce 100644
--- a/bdb/os_vxworks/os_map.c
+++ b/bdb/os_vxworks/os_vx_map.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*
* This code is derived from software contributed to Sleepycat Software by
@@ -11,7 +11,7 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_map.c,v 1.14 2000/12/04 19:01:43 sue Exp $";
+static const char revid[] = "$Id: os_vx_map.c,v 1.21 2002/03/06 19:36:58 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,6 @@ static const char revid[] = "$Id: os_map.c,v 1.14 2000/12/04 19:01:43 sue Exp $"
#endif
#include "db_int.h"
-#include "common_ext.h"
/*
* DB uses memory-mapped files for two things:
@@ -187,6 +186,7 @@ __os_unmapfile(dbenv, addr, len)
size_t len;
{
/* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
COMPQUIET(addr, NULL);
COMPQUIET(len, 0);
return (EINVAL);
@@ -226,10 +226,11 @@ __os_segdata_init(dbenv)
* Currently not called. This function should be called if the
* user creates a function to unload or shutdown.
*
- * PUBLIC: int __os_segdata_destroy __P((void));
+ * PUBLIC: int __os_segdata_destroy __P((DB_ENV *));
*/
int
-__os_segdata_destroy()
+__os_segdata_destroy(dbenv)
+ DB_ENV *dbenv;
{
os_segdata_t *p;
int i;
@@ -241,17 +242,17 @@ __os_segdata_destroy()
for (i = 0; i < __os_segdata_size; i++) {
p = &__os_segdata[i];
if (p->name != NULL) {
- __os_freestr(p->name);
+ __os_free(dbenv, p->name);
p->name = NULL;
}
if (p->segment != NULL) {
- __os_free(p->segment, p->size);
+ __os_free(dbenv, p->segment);
p->segment = NULL;
}
p->size = 0;
}
- __os_free(__os_segdata, __os_segdata_size * sizeof(os_segdata_t));
+ __os_free(dbenv, __os_segdata);
__os_segdata = NULL;
__os_segdata_size = 0;
DB_END_SINGLE_THREAD;
@@ -283,7 +284,7 @@ __os_segdata_allocate(dbenv, name, infop, rp)
if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
return (ret);
if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
- __os_free(p->segment, rp->size);
+ __os_free(dbenv, p->segment);
p->segment = NULL;
return (ret);
}
@@ -329,7 +330,7 @@ __os_segdata_new(dbenv, segidp)
*/
newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
- NULL, &__os_segdata)) != 0)
+ &__os_segdata)) != 0)
return (ret);
memset(&__os_segdata[__os_segdata_size],
0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
@@ -423,11 +424,11 @@ __os_segdata_release(dbenv, rp, is_locked)
DB_BEGIN_SINGLE_THREAD;
p = &__os_segdata[rp->segid];
if (p->name != NULL) {
- __os_freestr(p->name);
+ __os_free(dbenv, p->name);
p->name = NULL;
}
if (p->segment != NULL) {
- __os_free(p->segment, p->size);
+ __os_free(dbenv, p->segment);
p->segment = NULL;
}
p->size = 0;
diff --git a/bdb/os_win32/os_abs.c b/bdb/os_win32/os_abs.c
index 7b1e3fd05d5..c8bead83ec3 100644
--- a/bdb/os_win32/os_abs.c
+++ b/bdb/os_win32/os_abs.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_abs.c,v 11.3 2000/02/14 03:00:06 bostic Exp $";
+static const char revid[] = "$Id: os_abs.c,v 11.5 2002/01/11 15:53:05 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
diff --git a/bdb/os_win32/os_clock.c b/bdb/os_win32/os_clock.c
new file mode 100644
index 00000000000..1bf154f9da9
--- /dev/null
+++ b/bdb/os_win32/os_clock.c
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_clock.c,v 1.7 2002/07/12 18:56:53 bostic Exp $";
+#endif /* not lint */
+
+#include <sys/types.h>
+#include <sys/timeb.h>
+#include <string.h>
+
+#include "db_int.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+ struct _timeb now;
+
+ _ftime(&now);
+ if (secsp != NULL)
+ *secsp = (u_int32_t)now.time;
+ if (usecsp != NULL)
+ *usecsp = now.millitm * 1000;
+ return (0);
+}
diff --git a/bdb/os_win32/os_config.c b/bdb/os_win32/os_config.c
new file mode 100644
index 00000000000..a2c220daf1a
--- /dev/null
+++ b/bdb/os_win32/os_config.c
@@ -0,0 +1,29 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_config.c,v 11.13 2002/01/11 15:53:06 bostic Exp $";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Windows/NT zero-fills pages that were never explicitly written to
+ * the file. Windows 95/98 gives you random garbage, and that breaks
+ * Berkeley DB.
+ */
+ return (__os_is_winnt() ? 0 : 1);
+}
diff --git a/bdb/os_win32/os_dir.c b/bdb/os_win32/os_dir.c
index d37b7601051..3f47c4960b0 100644
--- a/bdb/os_win32/os_dir.c
+++ b/bdb/os_win32/os_dir.c
@@ -1,18 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_dir.c,v 11.4 2000/03/28 21:50:17 ubell Exp $";
+static const char revid[] = "$Id: os_dir.c,v 11.12 2002/07/12 18:56:54 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_dirlist --
@@ -26,12 +25,16 @@ __os_dirlist(dbenv, dir, namesp, cntp)
int *cntp;
{
struct _finddata_t fdata;
+#ifdef _WIN64
+ intptr_t dirhandle;
+#else
long dirhandle;
+#endif
int arraysz, cnt, finished, ret;
char **names, filespec[MAXPATHLEN];
- if (__db_jump.j_dirlist != NULL)
- return (__db_jump.j_dirlist(dir, namesp, cntp));
+ if (DB_GLOBAL(j_dirlist) != NULL)
+ return (DB_GLOBAL(j_dirlist)(dir, namesp, cntp));
(void)snprintf(filespec, sizeof(filespec), "%s/*", dir);
if ((dirhandle = _findfirst(filespec, &fdata)) == -1)
@@ -43,12 +46,12 @@ __os_dirlist(dbenv, dir, namesp, cntp)
if (cnt >= arraysz) {
arraysz += 100;
if ((ret = __os_realloc(dbenv,
- arraysz * sizeof(names[0]), NULL, &names)) != 0)
+ arraysz * sizeof(names[0]), &names)) != 0)
goto nomem;
}
if ((ret = __os_strdup(dbenv, fdata.name, &names[cnt])) != 0)
goto nomem;
- if (_findnext(dirhandle,&fdata) != 0)
+ if (_findnext(dirhandle, &fdata) != 0)
finished = 1;
}
_findclose(dirhandle);
@@ -58,7 +61,7 @@ __os_dirlist(dbenv, dir, namesp, cntp)
return (0);
nomem: if (names != NULL)
- __os_dirfree(names, cnt);
+ __os_dirfree(dbenv, names, cnt);
return (ret);
}
@@ -67,16 +70,17 @@ nomem: if (names != NULL)
* Free the list of files.
*/
void
-__os_dirfree(names, cnt)
+__os_dirfree(dbenv, names, cnt)
+ DB_ENV *dbenv;
char **names;
int cnt;
{
- if (__db_jump.j_dirfree != NULL) {
- __db_jump.j_dirfree(names, cnt);
+ if (DB_GLOBAL(j_dirfree) != NULL) {
+ DB_GLOBAL(j_dirfree)(names, cnt);
return;
}
while (cnt > 0)
- __os_free(names[--cnt], 0);
- __os_free(names, 0);
+ __os_free(dbenv, names[--cnt]);
+ __os_free(dbenv, names);
}
diff --git a/bdb/os_win32/os_errno.c b/bdb/os_win32/os_errno.c
index 8324826b6f9..d6fac82e6f3 100644
--- a/bdb/os_win32/os_errno.c
+++ b/bdb/os_win32/os_errno.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_errno.c,v 11.5 2000/11/30 00:58:43 ubell Exp $";
+static const char revid[] = "$Id: os_errno.c,v 11.10 2002/07/12 04:05:00 mjc Exp $";
#endif /* not lint */
#include "db_int.h"
@@ -58,13 +58,7 @@ __os_win32_errno(void)
DWORD last_error;
int ret;
- /*
- * It's possible that errno was set after the error.
- * The caller must take care to set it to 0 before
- * any system operation.
- */
- if (__os_get_errno() != 0)
- return (__os_get_errno());
+ /* Ignore errno - we used to check it here. */
last_error = GetLastError();
@@ -113,6 +107,7 @@ __os_win32_errno(void)
break;
case ERROR_FILE_EXISTS:
+ case ERROR_ALREADY_EXISTS:
ret = EEXIST;
break;
@@ -133,6 +128,10 @@ __os_win32_errno(void)
ret = EBUSY;
break;
+ case ERROR_RETRY:
+ ret = EINTR;
+ break;
+
case 0:
ret = EFAULT;
break;
diff --git a/bdb/os_win32/os_fid.c b/bdb/os_win32/os_fid.c
index c66ac52102b..1190ad26e81 100644
--- a/bdb/os_win32/os_fid.c
+++ b/bdb/os_win32/os_fid.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_fid.c,v 11.7 2000/10/26 14:18:08 bostic Exp $";
+static const char revid[] = "$Id: os_fid.c,v 11.15 2002/08/26 14:37:39 margo Exp $";
#endif /* not lint */
#include "db_int.h"
@@ -40,10 +40,11 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
* Can't think of a better solution right now.
*/
DB_FH fh;
- HANDLE handle;
BY_HANDLE_FILE_INFORMATION fi;
BOOL retval = FALSE;
+ DB_ASSERT(fname != NULL);
+
/* Clear the buffer. */
memset(fidp, 0, DB_FILE_ID_LEN);
@@ -62,7 +63,7 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
* interesting properties in base 2.
*/
if (fid_serial == SERIAL_INIT)
- fid_serial = (u_int32_t)getpid();
+ __os_id(&fid_serial);
else
fid_serial += 100000;
@@ -74,15 +75,11 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
return (ret);
/* File open, get its info */
- handle = (HANDLE)_get_osfhandle(fh.fd);
- if (handle == INVALID_HANDLE_VALUE)
+ if ((retval = GetFileInformationByHandle(fh.handle, &fi)) == FALSE)
ret = __os_win32_errno();
- else
- if ((retval = GetFileInformationByHandle(handle, &fi)) == FALSE)
- ret = __os_win32_errno();
- __os_closehandle(&fh);
+ __os_closehandle(dbenv, &fh);
- if (handle == INVALID_HANDLE_VALUE || retval == FALSE)
+ if (retval == FALSE)
return (ret);
/*
@@ -113,6 +110,7 @@ __os_fileid(dbenv, fname, unique_okay, fidp)
tmp = (u_int32_t)fi.nFileIndexHigh;
for (p = (u_int8_t *)&tmp, i = sizeof(u_int32_t); i > 0; --i)
*fidp++ = *p++;
+
if (unique_okay) {
/*
* Use the system time to try to get a unique value
diff --git a/bdb/os_win32/os_finit.c b/bdb/os_win32/os_finit.c
deleted file mode 100644
index 61d2a33c7b4..00000000000
--- a/bdb/os_win32/os_finit.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1999, 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifndef lint
-static const char revid[] = "$Id: os_finit.c,v 11.9 2000/03/29 20:50:52 ubell Exp $";
-#endif /* not lint */
-
-#include "db_int.h"
-
-/*
- * __os_fpinit --
- * Initialize a page in a regular file.
- *
- * PUBLIC: int __os_fpinit __P((DB_ENV *, DB_FH *, db_pgno_t, int, int));
- */
-int
-__os_fpinit(dbenv, fhp, pgno, pagecount, pagesize)
- DB_ENV *dbenv;
- DB_FH *fhp;
- db_pgno_t pgno;
- int pagecount, pagesize;
-{
- size_t nw, totalbytes, curbytes;
- int ret;
- char buf[1024];
-
- /*
- * Windows/NT zero-fills pages that were never explicitly written to
- * the file. Windows 95/98 gives you random garbage, and that breaks
- * DB.
- */
- if (__os_is_winnt())
- return (0);
-
- if ((ret = __os_seek(dbenv,
- fhp, pagesize, pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
- return (ret);
-
- memset(buf, 0, sizeof(buf));
- totalbytes = pagecount * pagesize;
-
- while (totalbytes > 0) {
- if (totalbytes > sizeof(buf))
- curbytes = sizeof(buf);
- else
- curbytes = totalbytes;
- if ((ret = __os_write(dbenv, fhp, buf, curbytes, &nw)) != 0)
- return (ret);
- if (nw != curbytes)
- return (EIO);
- totalbytes -= curbytes;
- }
- return (0);
-}
diff --git a/bdb/os_win32/os_fsync.c b/bdb/os_win32/os_fsync.c
new file mode 100644
index 00000000000..6fd3e1dcdf4
--- /dev/null
+++ b/bdb/os_win32/os_fsync.c
@@ -0,0 +1,59 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_fsync.c,v 11.15 2002/07/12 18:56:54 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h> /* XXX: Required by __hp3000s900 */
+#include <unistd.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_fsync --
+ * Flush a file descriptor.
+ *
+ * PUBLIC: int __os_fsync __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_fsync(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ /*
+ * Do nothing if the file descriptor has been marked as not requiring
+ * any sync to disk.
+ */
+ if (F_ISSET(fhp, DB_FH_NOSYNC))
+ return (0);
+
+ ret = 0;
+ do {
+ if (DB_GLOBAL(j_fsync) != NULL)
+ success = (DB_GLOBAL(j_fsync)(fhp->fd) == 0);
+ else {
+ success = FlushFileBuffers(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ if (ret != 0)
+ __db_err(dbenv, "fsync %s", strerror(ret));
+ return (ret);
+}
diff --git a/bdb/os_win32/os_handle.c b/bdb/os_win32/os_handle.c
new file mode 100644
index 00000000000..7db9c3da977
--- /dev/null
+++ b/bdb/os_win32/os_handle.c
@@ -0,0 +1,126 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_handle.c,v 11.30 2002/07/12 18:56:54 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_openhandle --
+ * Open a file, using POSIX 1003.1 open flags.
+ *
+ * PUBLIC: int __os_openhandle __P((DB_ENV *, const char *, int, int, DB_FH *));
+ */
+int
+__os_openhandle(dbenv, name, flags, mode, fhp)
+ DB_ENV *dbenv;
+ const char *name;
+ int flags, mode;
+ DB_FH *fhp;
+{
+ int ret, nrepeat;
+
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->handle = INVALID_HANDLE_VALUE;
+
+ /* If the application specified an interface, use it. */
+ if (DB_GLOBAL(j_open) != NULL) {
+ if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1)
+ return (__os_get_errno());
+ F_SET(fhp, DB_FH_VALID);
+ return (0);
+ }
+
+ for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
+ ret = 0;
+ fhp->fd = open(name, flags, mode);
+
+ if (fhp->fd == -1) {
+ /*
+ * If it's a "temporary" error, we retry up to 3 times,
+ * waiting up to 12 seconds. While it's not a problem
+ * if we can't open a database, an inability to open a
+ * log file is cause for serious dismay.
+ */
+ ret = __os_get_errno();
+ if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ continue;
+ }
+
+ /*
+ * If it was an EINTR it's reasonable to retry
+ * immediately, and arbitrarily often.
+ */
+ if (ret == EINTR) {
+ --nrepeat;
+ continue;
+ }
+ } else {
+ F_SET(fhp, DB_FH_VALID);
+ }
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * __os_closehandle --
+ * Close a file.
+ *
+ * PUBLIC: int __os_closehandle __P((DB_ENV *, DB_FH *));
+ */
+int
+__os_closehandle(dbenv, fhp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+{
+ BOOL success;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+ /* Don't close file descriptors that were never opened. */
+ DB_ASSERT(F_ISSET(fhp, DB_FH_VALID) &&
+ ((fhp->fd != -1) || (fhp->handle != INVALID_HANDLE_VALUE)));
+
+ ret = 0;
+
+ do {
+ if (DB_GLOBAL(j_close) != NULL)
+ success = (DB_GLOBAL(j_close)(fhp->fd) == 0);
+ else if (fhp->handle != INVALID_HANDLE_VALUE) {
+ success = CloseHandle(fhp->handle);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+ else
+ success = (close(fhp->fd) == 0);
+ } while (!success && (ret = __os_get_errno()) == EINTR);
+
+ /*
+ * Smash the POSIX file descriptor -- it's never tested, but we want
+ * to catch any mistakes.
+ */
+ fhp->fd = -1;
+ fhp->handle = INVALID_HANDLE_VALUE;
+ F_CLR(fhp, DB_FH_VALID);
+
+ return (ret);
+}
diff --git a/bdb/os_win32/os_map.c b/bdb/os_win32/os_map.c
index d7b2839ed29..1f16c9fead4 100644
--- a/bdb/os_win32/os_map.c
+++ b/bdb/os_win32/os_map.c
@@ -1,22 +1,21 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_map.c,v 11.22 2000/10/26 14:18:08 bostic Exp $";
+static const char revid[] = "$Id: os_map.c,v 11.38 2002/09/10 02:35:48 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
static int __os_map
__P((DB_ENV *, char *, REGINFO *, DB_FH *, size_t, int, int, int, void **));
-static int __os_unique_name __P((char *, int, char *));
+static int __os_unique_name __P((char *, HANDLE, char *, size_t));
/*
* __os_r_sysattach --
@@ -37,6 +36,7 @@ __os_r_sysattach(dbenv, infop, rp)
* properly ordered, our caller has already taken care of that.
*/
if ((ret = __os_open(dbenv, infop->name,
+ DB_OSO_DIRECT |
F_ISSET(infop, REGION_CREATE_OK) ? DB_OSO_CREATE: 0,
infop->mode, &fh)) != 0) {
__db_err(dbenv, "%s: %s", infop->name, db_strerror(ret));
@@ -63,7 +63,7 @@ __os_r_sysattach(dbenv, infop, rp)
if (ret == 0 && is_system == 1)
rp->segid = 1;
- (void)__os_closehandle(&fh);
+ (void)__os_closehandle(dbenv, &fh);
return (ret);
}
@@ -82,17 +82,19 @@ __os_r_sysdetach(dbenv, infop, destroy)
if (infop->wnt_handle != NULL) {
(void)CloseHandle(*((HANDLE*)(infop->wnt_handle)));
- __os_free(infop->wnt_handle, sizeof(HANDLE));
+ __os_free(dbenv, infop->wnt_handle);
}
- __os_set_errno(0);
ret = !UnmapViewOfFile(infop->addr) ? __os_win32_errno() : 0;
if (ret != 0)
__db_err(dbenv, "UnmapViewOfFile: %s", strerror(ret));
- if (F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy &&
- (t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
- ret = t_ret;
+ if (!F_ISSET(dbenv, DB_ENV_SYSTEM_MEM) && destroy) {
+ if (F_ISSET(dbenv, DB_ENV_OVERWRITE))
+ (void)__db_overwrite(dbenv, infop->name);
+ if ((t_ret = __os_unlink(dbenv, infop->name)) != 0 && ret == 0)
+ ret = t_ret;
+ }
return (ret);
}
@@ -111,8 +113,8 @@ __os_mapfile(dbenv, path, fhp, len, is_rdonly, addr)
void **addr;
{
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_map != NULL)
- return (__db_jump.j_map(path, len, 0, is_rdonly, addr));
+ if (DB_GLOBAL(j_map) != NULL)
+ return (DB_GLOBAL(j_map)(path, len, 0, is_rdonly, addr));
return (__os_map(dbenv, path, NULL, fhp, len, 0, 0, is_rdonly, addr));
}
@@ -128,10 +130,9 @@ __os_unmapfile(dbenv, addr, len)
size_t len;
{
/* If the user replaced the map call, call through their interface. */
- if (__db_jump.j_unmap != NULL)
- return (__db_jump.j_unmap(addr, len));
+ if (DB_GLOBAL(j_unmap) != NULL)
+ return (DB_GLOBAL(j_unmap)(addr, len));
- __os_set_errno(0);
return (!UnmapViewOfFile(addr) ? __os_win32_errno() : 0);
}
@@ -151,23 +152,55 @@ __os_unmapfile(dbenv, addr, len)
* foo.bar == Foo.Bar (FAT file system)
* foo.bar != Foo.Bar (NTFS)
*
- * The best solution is to use the identifying number in the file
+ * The best solution is to use the file index, found in the file
* information structure (similar to UNIX inode #).
+ *
+ * When a file is deleted, its file index may be reused,
+ * but if the unique name has not gone from its namespace,
+ * we may get a conflict. So to ensure some tie in to the
+ * original pathname, we also use the creation time and the
+ * file basename. This is not a perfect system, but it
+ * should work for all but anamolous test cases.
+ *
*/
static int
-__os_unique_name(orig_path, fd, result_path)
+__os_unique_name(orig_path, hfile, result_path, result_path_len)
char *orig_path, *result_path;
- int fd;
+ HANDLE hfile;
+ size_t result_path_len;
{
BY_HANDLE_FILE_INFORMATION fileinfo;
+ char *basename, *p;
- __os_set_errno(0);
- if (!GetFileInformationByHandle(
- (HANDLE)_get_osfhandle(fd), &fileinfo))
+ /*
+ * In Windows, pathname components are delimited by '/' or '\', and
+ * if neither is present, we need to strip off leading drive letter
+ * (e.g. c:foo.txt).
+ */
+ basename = strrchr(orig_path, '/');
+ p = strrchr(orig_path, '\\');
+ if (basename == NULL || (p != NULL && p > basename))
+ basename = p;
+ if (basename == NULL)
+ basename = strrchr(orig_path, ':');
+
+ if (basename == NULL)
+ basename = orig_path;
+ else
+ basename++;
+
+ if (!GetFileInformationByHandle(hfile, &fileinfo))
return (__os_win32_errno());
- (void)sprintf(result_path, "%ld.%ld.%ld",
+
+ (void)snprintf(result_path, result_path_len,
+ "__db_shmem.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%8.8lx.%s",
fileinfo.dwVolumeSerialNumber,
- fileinfo.nFileIndexHigh, fileinfo.nFileIndexLow);
+ fileinfo.nFileIndexHigh,
+ fileinfo.nFileIndexLow,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ fileinfo.ftCreationTime.dwHighDateTime,
+ basename);
+
return (0);
}
@@ -187,10 +220,9 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
{
HANDLE hMemory;
REGENV *renv;
- int ret;
- void *pMemory;
+ int ret, use_pagefile;
char shmem_name[MAXPATHLEN];
- int use_pagefile;
+ void *pMemory;
ret = 0;
if (infop != NULL)
@@ -202,12 +234,9 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
* If creating a region in system space, get a matching name in the
* paging file namespace.
*/
- if (use_pagefile) {
- (void)strcpy(shmem_name, "__db_shmem.");
- if ((ret = __os_unique_name(path, fhp->fd,
- &shmem_name[strlen(shmem_name)])) != 0)
- return (ret);
- }
+ if (use_pagefile && (ret = __os_unique_name(
+ path, fhp->handle, shmem_name, sizeof(shmem_name))) != 0)
+ return (ret);
/*
* XXX
@@ -235,7 +264,6 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
* the section.
*/
hMemory = NULL;
- __os_set_errno(0);
if (use_pagefile)
hMemory = OpenFileMapping(
is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS,
@@ -244,24 +272,23 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
if (hMemory == NULL)
hMemory = CreateFileMapping(
- use_pagefile ?
- (HANDLE)0xFFFFFFFF : (HANDLE)_get_osfhandle(fhp->fd),
+ use_pagefile ? (HANDLE)-1 : fhp->handle,
0,
is_rdonly ? PAGE_READONLY : PAGE_READWRITE,
- 0, len,
+ 0, (DWORD)len,
use_pagefile ? shmem_name : NULL);
if (hMemory == NULL) {
- __db_err(dbenv,
- "OpenFileMapping: %s", strerror(__os_win32_errno()));
- return (__os_win32_errno());
+ ret = __os_win32_errno();
+ __db_err(dbenv, "OpenFileMapping: %s", strerror(ret));
+ return (ret);
}
pMemory = MapViewOfFile(hMemory,
(is_rdonly ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS), 0, 0, len);
if (pMemory == NULL) {
- __db_err(dbenv,
- "MapViewOfFile: %s", strerror(__os_win32_errno()));
- return (__os_win32_errno());
+ ret = __os_win32_errno();
+ __db_err(dbenv, "MapViewOfFile: %s", strerror(ret));
+ return (ret);
}
/*
@@ -279,8 +306,8 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
* errors, it just means we leak the memory.
*/
if (use_pagefile && infop != NULL) {
- if (__os_malloc(NULL,
- sizeof(HANDLE), NULL, &infop->wnt_handle) == 0)
+ if (__os_malloc(dbenv,
+ sizeof(HANDLE), &infop->wnt_handle) == 0)
memcpy(infop->wnt_handle, &hMemory, sizeof(HANDLE));
} else
CloseHandle(hMemory);
@@ -295,7 +322,7 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
* the REGINFO structure so that they do so.
*/
renv = (REGENV *)pMemory;
- if (renv->magic == 0)
+ if (renv->magic == 0) {
if (F_ISSET(infop, REGION_CREATE_OK))
F_SET(infop, REGION_CREATE);
else {
@@ -303,6 +330,7 @@ __os_map(dbenv, path, infop, fhp, len, is_region, is_system, is_rdonly, addr)
pMemory = NULL;
ret = EAGAIN;
}
+ }
}
*addr = pMemory;
diff --git a/bdb/os_win32/os_open.c b/bdb/os_win32/os_open.c
index 7ecd96126df..c8bae54d585 100644
--- a/bdb/os_win32/os_open.c
+++ b/bdb/os_win32/os_open.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_open.c,v 11.9 2000/11/30 00:58:43 ubell Exp $";
+static const char revid[] = "$Id: os_open.c,v 11.21 2002/07/12 18:56:55 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -21,9 +21,6 @@ static const char revid[] = "$Id: os_open.c,v 11.9 2000/11/30 00:58:43 ubell Exp
#endif
#include "db_int.h"
-#include "os_jump.h"
-
-int __os_win32_errno __P((void));
/*
* __os_open --
@@ -38,17 +35,26 @@ __os_open(dbenv, name, flags, mode, fhp)
DB_FH *fhp;
{
DWORD bytesWritten;
- HANDLE wh;
- u_int32_t log_size;
+ u_int32_t log_size, pagesize, sectorsize;
int access, attr, oflags, share, createflag;
int ret, nrepeat;
+ char *drive, dbuf[4]; /* <letter><colon><slosh><nul> */
+
+#ifdef DIAGNOSTIC
+#define OKFLAGS \
+ (DB_OSO_CREATE | DB_OSO_DIRECT | DB_OSO_EXCL | DB_OSO_LOG | \
+ DB_OSO_RDONLY | DB_OSO_REGION | DB_OSO_SEQ | DB_OSO_TEMP | \
+ DB_OSO_TRUNC)
+ if ((ret = __db_fchk(dbenv, "__os_open", flags, OKFLAGS)) != 0)
+ return (ret);
+#endif
/*
* The "public" interface to the __os_open routine passes around POSIX
* 1003.1 flags, not DB flags. If the user has defined their own open
* interface, use the POSIX flags.
*/
- if (__db_jump.j_open != NULL) {
+ if (DB_GLOBAL(j_open) != NULL) {
oflags = O_BINARY | O_NOINHERIT;
if (LF_ISSET(DB_OSO_CREATE))
@@ -76,10 +82,15 @@ __os_open(dbenv, name, flags, mode, fhp)
return (__os_openhandle(dbenv, name, oflags, mode, fhp));
}
+ ret = 0;
+
if (LF_ISSET(DB_OSO_LOG))
log_size = fhp->log_size; /* XXX: Gag. */
+ pagesize = fhp->pagesize;
+
memset(fhp, 0, sizeof(*fhp));
+ fhp->fd = -1;
/*
* Otherwise, use the Windows/32 CreateFile interface so that we can
@@ -97,8 +108,6 @@ __os_open(dbenv, name, flags, mode, fhp)
* largely meaningless on FAT, the most common file system, which
* only has a "readable" and "writeable" flag, applying to all users.
*/
- wh = INVALID_HANDLE_VALUE;
-
access = GENERIC_READ;
if (!LF_ISSET(DB_OSO_RDONLY))
access |= GENERIC_WRITE;
@@ -134,11 +143,28 @@ __os_open(dbenv, name, flags, mode, fhp)
if (LF_ISSET(DB_OSO_TEMP))
attr |= FILE_FLAG_DELETE_ON_CLOSE;
- for (nrepeat = 1; nrepeat < 4; ++nrepeat) {
- ret = 0;
- __os_set_errno(0);
- wh = CreateFile(name, access, share, NULL, createflag, attr, 0);
- if (wh == INVALID_HANDLE_VALUE) {
+ /*
+ * We can turn filesystem buffering off if the page size is a
+ * multiple of the disk's sector size. To find the sector size,
+ * we call GetDiskFreeSpace, which expects a drive name like "d:\\"
+ * or NULL for the current disk (i.e., a relative path)
+ */
+ if (LF_ISSET(DB_OSO_DIRECT) && pagesize != 0 && name[0] != '\0') {
+ if (name[1] == ':') {
+ drive = dbuf;
+ snprintf(dbuf, sizeof(dbuf), "%c:\\", name[0]);
+ } else
+ drive = NULL;
+
+ if (GetDiskFreeSpace(drive, NULL, &sectorsize, NULL, NULL) &&
+ pagesize % sectorsize == 0)
+ attr |= FILE_FLAG_NO_BUFFERING;
+ }
+
+ for (nrepeat = 1;; ++nrepeat) {
+ fhp->handle =
+ CreateFile(name, access, share, NULL, createflag, attr, 0);
+ if (fhp->handle == INVALID_HANDLE_VALUE) {
/*
* If it's a "temporary" error, we retry up to 3 times,
* waiting up to 12 seconds. While it's not a problem
@@ -146,13 +172,13 @@ __os_open(dbenv, name, flags, mode, fhp)
* log file is cause for serious dismay.
*/
ret = __os_win32_errno();
- if (ret == ENFILE || ret == EMFILE || ret == ENOSPC) {
- (void)__os_sleep(dbenv, nrepeat * 2, 0);
- continue;
- }
- goto err;
- }
- break;
+ if ((ret != ENFILE && ret != EMFILE && ret != ENOSPC) ||
+ nrepeat > 3)
+ goto err;
+
+ (void)__os_sleep(dbenv, nrepeat * 2, 0);
+ } else
+ break;
}
/*
@@ -163,39 +189,29 @@ __os_open(dbenv, name, flags, mode, fhp)
* This strategy only works for Win/NT; Win/9X does not
* guarantee that the logs will be zero filled.
*/
- if (LF_ISSET(DB_OSO_LOG) && log_size != 0 &&
- __os_is_winnt()) {
- if (SetFilePointer(wh,
+ if (LF_ISSET(DB_OSO_LOG) && log_size != 0 && __os_is_winnt()) {
+ if (SetFilePointer(fhp->handle,
log_size - 1, NULL, FILE_BEGIN) == (DWORD)-1)
goto err;
- if (WriteFile(wh, "\x00", 1, &bytesWritten, NULL) == 0)
+ if (WriteFile(fhp->handle, "\x00", 1, &bytesWritten, NULL) == 0)
goto err;
if (bytesWritten != 1)
goto err;
- if (SetEndOfFile(wh) == 0)
+ if (SetEndOfFile(fhp->handle) == 0)
goto err;
- if (SetFilePointer(wh, 0, NULL, FILE_BEGIN) == (DWORD)-1)
+ if (SetFilePointer(
+ fhp->handle, 0, NULL, FILE_BEGIN) == (DWORD)-1)
goto err;
- if (FlushFileBuffers(wh) == 0)
+ if (FlushFileBuffers(fhp->handle) == 0)
goto err;
}
- /*
- * We acquire a POSIX file descriptor as this allows us to use the
- * general UNIX I/O routines instead of writing Windows specific
- * ones. Closing that file descriptor is sufficient to close the
- * Windows HANDLE.
- */
- fhp->fd =
- _open_osfhandle((long)wh, LF_ISSET(DB_OSO_RDONLY) ? O_RDONLY : 0);
- fhp->handle = wh;
F_SET(fhp, DB_FH_VALID);
-
return (0);
err: if (ret == 0)
ret = __os_win32_errno();
- if (wh != INVALID_HANDLE_VALUE)
- (void)CloseHandle(wh);
+ if (fhp->handle != INVALID_HANDLE_VALUE)
+ (void)CloseHandle(fhp->handle);
return (ret);
}
diff --git a/bdb/os_win32/os_rename.c b/bdb/os_win32/os_rename.c
index cd53ec02022..ba14cb73bb0 100644
--- a/bdb/os_win32/os_rename.c
+++ b/bdb/os_win32/os_rename.c
@@ -1,57 +1,74 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_rename.c,v 1.2 2000/06/13 19:52:19 dda Exp $";
+static const char revid[] = "$Id: os_rename.c,v 1.12 2002/07/12 18:56:55 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_rename --
* Rename a file.
*/
int
-__os_rename(dbenv, old, new)
+__os_rename(dbenv, oldname, newname, flags)
DB_ENV *dbenv;
- const char *old, *new;
+ const char *oldname, *newname;
+ u_int32_t flags;
{
int ret;
+ char oldbuf[MAX_PATH], newbuf[MAX_PATH];
ret = 0;
- if (__db_jump.j_rename != NULL) {
- if (__db_jump.j_rename(old, new) == -1)
+ if (DB_GLOBAL(j_rename) != NULL) {
+ if (DB_GLOBAL(j_rename)(oldname, newname) == -1)
ret = __os_get_errno();
+ goto done;
}
- else {
- /* Normally we would use a single MoveFileEx call with
- * MOVEFILE_REPLACE_EXISTING flag to simulate Unix rename().
- * But if the target file exists, and the two files' 8.3
- * names are identical, a Windows bug causes the target file
- * to be deleted, but the original file will not be renamed,
- * and an ENOENT error will be returned. (See MSDN for a
- * description of the bug).
- *
- * After the failed call, a MoveFile seems to perform
- * the rename correctly (even another call to MoveFileEx
- * does not)! The expense of this extra call only occurs
- * on systems with the bug: Windows/98, for one, but
- * apparently not Windows/NT and Windows/2000.
- */
- if (MoveFileEx(old, new, MOVEFILE_REPLACE_EXISTING) != TRUE)
- ret = __os_win32_errno();
- if ((ret == ENOENT || ret == EIO) && MoveFile(old, new) == TRUE)
- ret = 0;
+
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+
+ if (ret == EEXIST) {
+ ret = 0;
+ if (__os_is_winnt()) {
+ if (!MoveFileEx(
+ oldname, newname, MOVEFILE_REPLACE_EXISTING))
+ ret = __os_win32_errno();
+ } else {
+ /*
+ * There is no MoveFileEx for Win9x/Me, so we have to
+ * do the best we can.
+ */
+ if (!GetLongPathName(oldname, oldbuf, sizeof oldbuf) ||
+ !GetLongPathName(newname, newbuf, sizeof newbuf)) {
+ ret = __os_win32_errno();
+ goto done;
+ }
+
+ /*
+ * If the old and new names differ only in case, we're
+ * done.
+ */
+ if (strcasecmp(oldbuf, newbuf) == 0)
+ goto done;
+
+ (void)DeleteFile(newname);
+ if (!MoveFile(oldname, newname))
+ ret = __os_win32_errno();
+ }
}
- if (ret != 0)
- __db_err(dbenv, "Rename %s %s: %s", old, new, strerror(ret));
+
+done: if (ret != 0 && flags == 0)
+ __db_err(dbenv,
+ "Rename %s %s: %s", oldname, newname, strerror(ret));
return (ret);
}
diff --git a/bdb/os_win32/os_rw.c b/bdb/os_win32/os_rw.c
new file mode 100644
index 00000000000..63d1f715c53
--- /dev/null
+++ b/bdb/os_win32/os_rw.c
@@ -0,0 +1,182 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_rw.c,v 11.28 2002/08/06 04:56:19 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_io --
+ * Do an I/O.
+ *
+ * PUBLIC: int __os_io __P((DB_ENV *, DB_IO *, int, size_t *));
+ */
+int
+__os_io(dbenv, db_iop, op, niop)
+ DB_ENV *dbenv;
+ DB_IO *db_iop;
+ int op;
+ size_t *niop;
+{
+ int ret;
+
+ if (__os_is_winnt()) {
+ ULONG64 off = (ULONG64)db_iop->pagesize * db_iop->pgno;
+ OVERLAPPED over;
+ DWORD nbytes;
+ over.Offset = (DWORD)(off & 0xffffffff);
+ over.OffsetHigh = (DWORD)(off >> 32);
+ over.hEvent = 0; /* we don't want asynchronous notifications */
+
+ switch (op) {
+ case DB_IO_READ:
+ if (DB_GLOBAL(j_read) != NULL)
+ goto slow;
+ if (!ReadFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ case DB_IO_WRITE:
+ if (DB_GLOBAL(j_write) != NULL)
+ goto slow;
+ if (!WriteFile(db_iop->fhp->handle,
+ db_iop->buf, (DWORD)db_iop->bytes, &nbytes, &over))
+ goto slow;
+ break;
+ }
+ if (nbytes == db_iop->bytes) {
+ *niop = (size_t)nbytes;
+ return (0);
+ }
+ }
+
+slow: MUTEX_THREAD_LOCK(dbenv, db_iop->mutexp);
+
+ if ((ret = __os_seek(dbenv, db_iop->fhp,
+ db_iop->pagesize, db_iop->pgno, 0, 0, DB_OS_SEEK_SET)) != 0)
+ goto err;
+
+ switch (op) {
+ case DB_IO_READ:
+ ret = __os_read(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ case DB_IO_WRITE:
+ ret = __os_write(dbenv,
+ db_iop->fhp, db_iop->buf, db_iop->bytes, niop);
+ break;
+ }
+
+err: MUTEX_THREAD_UNLOCK(dbenv, db_iop->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __os_read --
+ * Read from a file handle.
+ *
+ * PUBLIC: int __os_read __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_read(dbenv, fhp, addr, len, nrp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nrp;
+{
+ size_t offset;
+ DWORD nr;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nr, offset += nr) {
+retry: if (DB_GLOBAL(j_read) != NULL) {
+ nr = (DWORD)DB_GLOBAL(j_read)(fhp->fd,
+ taddr, len - offset);
+ success = (nr >= 0);
+ } else {
+ success = ReadFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nr, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "read: 0x%lx, %lu: %s",
+ P_TO_ULONG(taddr),
+ (u_long)len - offset, strerror(ret));
+ return (ret);
+ }
+ if (nr == 0)
+ break;
+ }
+ *nrp = taddr - (u_int8_t *)addr;
+ return (0);
+}
+
+/*
+ * __os_write --
+ * Write to a file handle.
+ *
+ * PUBLIC: int __os_write __P((DB_ENV *, DB_FH *, void *, size_t, size_t *));
+ */
+int
+__os_write(dbenv, fhp, addr, len, nwp)
+ DB_ENV *dbenv;
+ DB_FH *fhp;
+ void *addr;
+ size_t len;
+ size_t *nwp;
+{
+ size_t offset;
+ DWORD nw;
+ int ret;
+ BOOL success;
+ u_int8_t *taddr;
+
+ for (taddr = addr,
+ offset = 0; offset < len; taddr += nw, offset += nw) {
+retry: if (DB_GLOBAL(j_write) != NULL) {
+ nw = (DWORD)DB_GLOBAL(j_write)(fhp->fd,
+ taddr, len - offset);
+ success = (nw >= 0);
+ } else {
+ success = WriteFile(fhp->handle,
+ taddr, (DWORD)(len - offset), &nw, NULL);
+ if (!success)
+ __os_set_errno(__os_win32_errno());
+ }
+
+ if (!success) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "write: 0x%x, %lu: %s", taddr,
+ (u_long)len-offset, strerror(ret));
+ return (ret);
+ }
+ }
+
+ *nwp = len;
+ return (0);
+}
diff --git a/bdb/os_win32/os_seek.c b/bdb/os_win32/os_seek.c
index 8cf3c98aa91..40140f51534 100644
--- a/bdb/os_win32/os_seek.c
+++ b/bdb/os_win32/os_seek.c
@@ -1,18 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_seek.c,v 11.8 2000/05/17 19:30:19 bostic Exp $";
+static const char revid[] = "$Id: os_seek.c,v 11.17 2002/08/06 04:56:20 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_seek --
@@ -28,32 +27,56 @@ __os_seek(dbenv, fhp, pgsize, pageno, relative, isrewind, db_whence)
int isrewind;
DB_OS_SEEK db_whence;
{
- __int64 offset;
+ /* Yes, this really is how Microsoft have designed their API */
+ union {
+ __int64 bigint;
+ struct {
+ unsigned long low;
+ long high;
+ };
+ } offset;
int ret, whence;
+ DWORD from;
- switch (db_whence) {
- case DB_OS_SEEK_CUR:
- whence = SEEK_CUR;
- break;
- case DB_OS_SEEK_END:
- whence = SEEK_END;
- break;
- case DB_OS_SEEK_SET:
- whence = SEEK_SET;
- break;
- default:
- return (EINVAL);
- }
+ if (DB_GLOBAL(j_seek) != NULL) {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ whence = SEEK_CUR;
+ break;
+ case DB_OS_SEEK_END:
+ whence = SEEK_END;
+ break;
+ case DB_OS_SEEK_SET:
+ whence = SEEK_SET;
+ break;
+ default:
+ return (EINVAL);
+ }
- if (__db_jump.j_seek != NULL)
- ret = __db_jump.j_seek(fhp->fd, pgsize, pageno,
+ ret = DB_GLOBAL(j_seek)(fhp->fd, pgsize, pageno,
relative, isrewind, whence);
- else {
- offset = (__int64)pgsize * pageno + relative;
+ } else {
+ switch (db_whence) {
+ case DB_OS_SEEK_CUR:
+ from = FILE_CURRENT;
+ break;
+ case DB_OS_SEEK_END:
+ from = FILE_END;
+ break;
+ case DB_OS_SEEK_SET:
+ from = FILE_BEGIN;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ offset.bigint = (__int64)pgsize * pageno + relative;
if (isrewind)
- offset = -offset;
- ret = _lseeki64(
- fhp->fd, offset, whence) == -1 ? __os_get_errno() : 0;
+ offset.bigint = -offset.bigint;
+
+ ret = (SetFilePointer(fhp->handle,
+ offset.low, &offset.high, from) == (DWORD) - 1) ?
+ __os_win32_errno() : 0;
}
if (ret != 0)
diff --git a/bdb/os_win32/os_sleep.c b/bdb/os_win32/os_sleep.c
index f0248a583de..12b4a7dbc2d 100644
--- a/bdb/os_win32/os_sleep.c
+++ b/bdb/os_win32/os_sleep.c
@@ -1,18 +1,17 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_sleep.c,v 11.4 2000/03/30 01:46:43 ubell Exp $";
+static const char revid[] = "$Id: os_sleep.c,v 11.8 2002/07/12 18:56:56 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_sleep --
@@ -29,8 +28,8 @@ __os_sleep(dbenv, secs, usecs)
for (; usecs >= 1000000; ++secs, usecs -= 1000000)
;
- if (__db_jump.j_sleep != NULL)
- return (__db_jump.j_sleep(secs, usecs));
+ if (DB_GLOBAL(j_sleep) != NULL)
+ return (DB_GLOBAL(j_sleep)(secs, usecs));
/*
* It's important that we yield the processor here so that other
diff --git a/bdb/os_win32/os_spin.c b/bdb/os_win32/os_spin.c
index f250c523d14..eb50b3b53ff 100644
--- a/bdb/os_win32/os_spin.c
+++ b/bdb/os_win32/os_spin.c
@@ -1,25 +1,25 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1997, 1998, 1999, 2000
+ * Copyright (c) 1997-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_spin.c,v 11.6 2000/05/17 19:30:19 bostic Exp $";
+static const char revid[] = "$Id: os_spin.c,v 11.11 2002/07/12 18:56:56 bostic Exp $";
#endif /* not lint */
#include "db_int.h"
-#include "os_jump.h"
/*
* __os_spin --
* Return the number of default spins before blocking.
*/
int
-__os_spin()
+__os_spin(dbenv)
+ DB_ENV *dbenv;
{
SYSTEM_INFO SystemInfo;
@@ -27,8 +27,8 @@ __os_spin()
* If the application specified a value or we've already figured it
* out, return it.
*/
- if (DB_GLOBAL(db_tas_spins) != 0)
- return (DB_GLOBAL(db_tas_spins));
+ if (dbenv->tas_spins != 0)
+ return (dbenv->tas_spins);
/* Get the number of processors */
GetSystemInfo(&SystemInfo);
@@ -38,10 +38,10 @@ __os_spin()
* is a reasonable value.
*/
if (SystemInfo.dwNumberOfProcessors > 1)
- DB_GLOBAL(db_tas_spins) = 50 * SystemInfo.dwNumberOfProcessors;
+ dbenv->tas_spins = 50 * SystemInfo.dwNumberOfProcessors;
else
- DB_GLOBAL(db_tas_spins) = 1;
- return (DB_GLOBAL(db_tas_spins));
+ dbenv->tas_spins = 1;
+ return (dbenv->tas_spins);
}
/*
@@ -53,7 +53,7 @@ __os_yield(dbenv, usecs)
DB_ENV *dbenv;
u_long usecs;
{
- if (__db_jump.j_yield != NULL && __db_jump.j_yield() == 0)
+ if (DB_GLOBAL(j_yield) != NULL && DB_GLOBAL(j_yield)() == 0)
return;
__os_sleep(dbenv, 0, usecs);
}
diff --git a/bdb/os_win32/os_stat.c b/bdb/os_win32/os_stat.c
new file mode 100644
index 00000000000..c1cba698bea
--- /dev/null
+++ b/bdb/os_win32/os_stat.c
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: os_stat.c,v 11.22 2002/07/12 18:56:56 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+
+/*
+ * __os_exists --
+ * Return if the file exists.
+ *
+ * PUBLIC: int __os_exists __P((const char *, int *));
+ */
+int
+__os_exists(path, isdirp)
+ const char *path;
+ int *isdirp;
+{
+ int ret;
+ DWORD attrs;
+
+ if (DB_GLOBAL(j_exists) != NULL)
+ return (DB_GLOBAL(j_exists)(path, isdirp));
+
+ ret = 0;
+ do {
+ attrs = GetFileAttributes(path);
+ if (attrs == (DWORD)-1)
+ ret = __os_win32_errno();
+ } while (ret == EINTR);
+
+ if (ret != 0)
+ return (ret);
+
+ if (isdirp != NULL)
+ *isdirp = (attrs & FILE_ATTRIBUTE_DIRECTORY);
+
+ return (0);
+}
+
+/*
+ * __os_ioinfo --
+ * Return file size and I/O size; abstracted to make it easier
+ * to replace.
+ *
+ * PUBLIC: int __os_ioinfo __P((DB_ENV *, const char *,
+ * PUBLIC: DB_FH *, u_int32_t *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_ioinfo(dbenv, path, fhp, mbytesp, bytesp, iosizep)
+ DB_ENV *dbenv;
+ const char *path;
+ DB_FH *fhp;
+ u_int32_t *mbytesp, *bytesp, *iosizep;
+{
+ int ret;
+ BY_HANDLE_FILE_INFORMATION bhfi;
+ unsigned __int64 filesize;
+
+ if (DB_GLOBAL(j_ioinfo) != NULL)
+ return (DB_GLOBAL(j_ioinfo)(path,
+ fhp->fd, mbytesp, bytesp, iosizep));
+
+retry: if (!GetFileInformationByHandle(fhp->handle, &bhfi)) {
+ if ((ret = __os_win32_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv,
+ "GetFileInformationByHandle: %s", strerror(ret));
+ return (ret);
+ }
+
+ filesize = ((unsigned __int64)bhfi.nFileSizeHigh << 32) +
+ bhfi.nFileSizeLow;
+
+ /* Return the size of the file. */
+ if (mbytesp != NULL)
+ *mbytesp = (u_int32_t)(filesize / MEGABYTE);
+ if (bytesp != NULL)
+ *bytesp = (u_int32_t)(filesize % MEGABYTE);
+
+ /* The filesystem blocksize is not easily available. */
+ if (iosizep != NULL)
+ *iosizep = DB_DEF_IOSIZE;
+ return (0);
+}
diff --git a/bdb/os_win32/os_type.c b/bdb/os_win32/os_type.c
index a82fc4b1d4e..583da0aaf1e 100644
--- a/bdb/os_win32/os_type.c
+++ b/bdb/os_win32/os_type.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: os_type.c,v 11.3 2000/02/14 03:00:07 bostic Exp $";
+static const char revid[] = "$Id: os_type.c,v 11.6 2002/01/11 15:53:08 bostic Exp $";
#endif /* not lint */
/*
@@ -26,10 +26,11 @@ __os_is_winnt()
* The value of __os_type is computed only once, and cached to
* avoid the overhead of repeated calls to GetVersion().
*/
- if (__os_type == -1)
+ if (__os_type == -1) {
if ((GetVersion() & 0x80000000) == 0)
__os_type = 1;
else
__os_type = 0;
+ }
return (__os_type);
}
diff --git a/bdb/patches/log-corruption.patch b/bdb/patches/log-corruption.patch
deleted file mode 100644
index 16c37ff7c06..00000000000
--- a/bdb/patches/log-corruption.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-# This patch fixes a bug caused mysqld to get a core dump while
-# bdb tries to resolve the log file after mysqld was killed with kill -9.
-#
-# Author: Michael Ubell, Sleepycat Software
-# Mon, 26 Feb 2001 12:56:23 -0500 (EST)
-#
-
-*** log/log_rec.c 2001/02/08 03:05:01 11.50
---- log/log_rec.c 2001/02/24 00:42:46 11.51
-***************
-*** 50,55 ****
---- 50,56 ----
- #include "db_am.h"
- #include "log.h"
-
-+ static int __log_check_master __P((DB_ENV *, u_int8_t *, char *));
- static int __log_do_open __P((DB_ENV *, DB_LOG *,
- u_int8_t *, char *, DBTYPE, int32_t, db_pgno_t));
- static int __log_open_file __P((DB_ENV *, DB_LOG *, __log_register_args *));
-***************
-*** 341,346 ****
---- 342,350 ----
- * Verify that we are opening the same file that we were
- * referring to when we wrote this log record.
- */
-+ if (meta_pgno != PGNO_BASE_MD &&
-+ __log_check_master(dbenv, uid, name) != 0)
-+ goto not_right;
- if (memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0) {
- memset(zeroid, 0, DB_FILE_ID_LEN);
- if (memcmp(dbp->fileid, zeroid, DB_FILE_ID_LEN) != 0)
-***************
-*** 359,364 ****
---- 363,390 ----
- (void)__log_add_logid(dbenv, lp, NULL, ndx);
-
- return (ENOENT);
-+ }
-+
-+ static int
-+ __log_check_master(dbenv, uid, name)
-+ DB_ENV *dbenv;
-+ u_int8_t *uid;
-+ char *name;
-+ {
-+ DB *dbp;
-+ int ret;
-+
-+ ret = 0;
-+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
-+ return (ret);
-+ dbp->type = DB_BTREE;
-+ ret = __db_dbopen(dbp, name, 0, __db_omode("rw----"), PGNO_BASE_MD);
-+
-+ if (ret == 0 && memcmp(uid, dbp->fileid, DB_FILE_ID_LEN) != 0)
-+ ret = EINVAL;
-+
-+ (void) dbp->close(dbp, 0);
-+ return (ret);
- }
-
- /*
diff --git a/bdb/perl.BerkeleyDB/mkconsts b/bdb/perl.BerkeleyDB/mkconsts
deleted file mode 100644
index 24ef4fca7b2..00000000000
--- a/bdb/perl.BerkeleyDB/mkconsts
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/perl
-
-%constants = (
- # Symbol 0 = define, 1 = enum
- DB_AFTER => 0,
- DB_APPEND => 0,
- DB_ARCH_ABS => 0,
- DB_ARCH_DATA => 0,
- DB_ARCH_LOG => 0,
- DB_BEFORE => 0,
- DB_BTREE => 1,
- DB_BTREEMAGIC => 0,
- DB_BTREEOLDVER => 0,
- DB_BTREEVERSION => 0,
- DB_CHECKPOINT => 0,
- DB_CONSUME => 0,
- DB_CREATE => 0,
- DB_CURLSN => 0,
- DB_CURRENT => 0,
- DB_DBT_MALLOC => 0,
- DB_DBT_PARTIAL => 0,
- DB_DBT_USERMEM => 0,
- DB_DELETED => 0,
- DB_DELIMITER => 0,
- DB_DUP => 0,
- DB_DUPSORT => 0,
- DB_ENV_APPINIT => 0,
- DB_ENV_STANDALONE => 0,
- DB_ENV_THREAD => 0,
- DB_EXCL => 0,
- DB_FILE_ID_LEN => 0,
- DB_FIRST => 0,
- DB_FIXEDLEN => 0,
- DB_FLUSH => 0,
- DB_FORCE => 0,
- DB_GET_BOTH => 0,
- DB_GET_RECNO => 0,
- DB_HASH => 1,
- DB_HASHMAGIC => 0,
- DB_HASHOLDVER => 0,
- DB_HASHVERSION => 0,
- DB_INCOMPLETE => 0,
- DB_INIT_CDB => 0,
- DB_INIT_LOCK => 0,
- DB_INIT_LOG => 0,
- DB_INIT_MPOOL => 0,
- DB_INIT_TXN => 0,
- DB_JOIN_ITEM => 0,
- DB_KEYEMPTY => 0,
- DB_KEYEXIST => 0,
- DB_KEYFIRST => 0,
- DB_KEYLAST => 0,
- DB_LAST => 0,
- DB_LOCK_CONFLICT => 0,
- DB_LOCK_DEADLOCK => 0,
- DB_LOCK_DEFAULT => 0,
- DB_LOCK_GET => 1,
- DB_LOCK_NORUN => 0,
- DB_LOCK_NOTGRANTED => 0,
- DB_LOCK_NOTHELD => 0,
- DB_LOCK_NOWAIT => 0,
- DB_LOCK_OLDEST => 0,
- DB_LOCK_RANDOM => 0,
- DB_LOCK_RIW_N => 0,
- DB_LOCK_RW_N => 0,
- DB_LOCK_YOUNGEST => 0,
- DB_LOCKMAGIC => 0,
- DB_LOCKVERSION => 0,
- DB_LOGMAGIC => 0,
- DB_LOGOLDVER => 0,
- DB_MAX_PAGES => 0,
- DB_MAX_RECORDS => 0,
- DB_MPOOL_CLEAN => 0,
- DB_MPOOL_CREATE => 0,
- DB_MPOOL_DIRTY => 0,
- DB_MPOOL_DISCARD => 0,
- DB_MPOOL_LAST => 0,
- DB_MPOOL_NEW => 0,
- DB_MPOOL_PRIVATE => 0,
- DB_MUTEXDEBUG => 0,
- DB_MUTEXLOCKS => 0,
- DB_NEEDSPLIT => 0,
- DB_NEXT => 0,
- DB_NEXT_DUP => 0,
- DB_NOMMAP => 0,
- DB_NOOVERWRITE => 0,
- DB_NOSYNC => 0,
- DB_NOTFOUND => 0,
- DB_PAD => 0,
- DB_PAGEYIELD => 0,
- DB_POSITION => 0,
- DB_PREV => 0,
- DB_PRIVATE => 0,
- DB_QUEUE => 1,
- DB_RDONLY => 0,
- DB_RECNO => 1,
- DB_RECNUM => 0,
- DB_RECORDCOUNT => 0,
- DB_RECOVER => 0,
- DB_RECOVER_FATAL => 0,
- DB_REGISTERED => 0,
- DB_RENUMBER => 0,
- DB_RMW => 0,
- DB_RUNRECOVERY => 0,
- DB_SEQUENTIAL => 0,
- DB_SET => 0,
- DB_SET_RANGE => 0,
- DB_SET_RECNO => 0,
- DB_SNAPSHOT => 0,
- DB_SWAPBYTES => 0,
- DB_TEMPORARY => 0,
- DB_THREAD => 0,
- DB_TRUNCATE => 0,
- DB_TXN_ABORT => 1,
- DB_TXN_BACKWARD_ROLL => 1,
- DB_TXN_CKP => 0,
- DB_TXN_FORWARD_ROLL => 1,
- DB_TXN_LOCK_2PL => 0,
- DB_TXN_LOCK_MASK => 0,
- DB_TXN_LOCK_OPTIMISTIC => 0,
- DB_TXN_LOG_MASK => 0,
- DB_TXN_LOG_REDO => 0,
- DB_TXN_LOG_UNDO => 0,
- DB_TXN_LOG_UNDOREDO => 0,
- DB_TXN_NOSYNC => 0,
- DB_TXN_NOWAIT => 0,
- DB_TXN_SYNC => 0,
- DB_TXN_OPENFILES => 1,
- DB_TXN_REDO => 0,
- DB_TXN_UNDO => 0,
- DB_TXNMAGIC => 0,
- DB_TXNVERSION => 0,
- DB_TXN_LOCK_OPTIMIST => 0,
- DB_UNKNOWN => 1,
- DB_USE_ENVIRON => 0,
- DB_USE_ENVIRON_ROOT => 0,
- DB_VERSION_MAJOR => 0,
- DB_VERSION_MINOR => 0,
- DB_VERSION_PATCH => 0,
- DB_WRITECURSOR => 0,
- ) ;
-
-sub OutputXS
-{
- # skip to the marker
- if (0) {
- while (<>)
- {
- last if /^MARKER/ ;
- print ;
- }
- }
-
- foreach my $key (sort keys %constants)
- {
- my $isEnum = $constants{$key} ;
-
- if ($isEnum) {
- print <<EOM
- if (strEQ(name, "$key"))
- return $key;
-EOM
- }
- else
- {
- print <<EOM
- if (strEQ(name, "$key"))
-#ifdef $key
- return $key;
-#else
- goto not_there;
-#endif
-EOM
- }
-
- }
-
- if (0) {
- while (<>)
- {
- print ;
- }
- }
-}
-
-sub OutputPM
-{
- # skip to the marker
- if (0) {
- while (<>)
- {
- last if /^MARKER/ ;
- print ;
- }
- }
-
- foreach my $key (sort keys %constants)
- {
- print "\t$key\n";
- }
-
- if (0) {
- while (<>)
- {
- print ;
- }
- }
-}
-
-OutputXS() if $ARGV[0] =~ /xs/i ;
-OutputPM() if $ARGV[0] =~ /pm/i ;
diff --git a/bdb/perl.BerkeleyDB/t/db-3.0.t b/bdb/perl.BerkeleyDB/t/db-3.0.t
deleted file mode 100644
index 9c324dc7bab..00000000000
--- a/bdb/perl.BerkeleyDB/t/db-3.0.t
+++ /dev/null
@@ -1,128 +0,0 @@
-#!./perl -w
-
-# ID: 1.2, 7/17/97
-
-use strict ;
-
-BEGIN {
- unless(grep /blib/, @INC) {
- chdir 't' if -d 't';
- @INC = '../lib' if -d '../lib';
- }
-}
-
-use BerkeleyDB;
-use File::Path qw(rmtree);
-
-BEGIN
-{
- if ($BerkeleyDB::db_version < 3) {
- print "1..0 # Skipped - this needs Berkeley DB 3.x or better\n" ;
- exit 0 ;
- }
-}
-
-print "1..14\n";
-
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
-
-
-my $Dfile = "dbhash.tmp";
-
-umask(0);
-
-{
- # set_mutexlocks
-
- my $home = "./fred" ;
- ok 1, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
- mkdir "./fred", 0777 ;
- chdir "./fred" ;
- ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
- ok 3, $env->set_mutexlocks(0) == 0 ;
- chdir ".." ;
- undef $env ;
- rmtree $home ;
-}
-
-{
- # c_dup
-
-
- my $lex = new LexFile $Dfile ;
- my %hash ;
- my ($k, $v) ;
- ok 4, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
- -Flags => DB_CREATE ;
-
- # create some data
- my %data = (
- "red" => 2,
- "green" => "house",
- "blue" => "sea",
- ) ;
-
- my $ret = 0 ;
- while (($k, $v) = each %data) {
- $ret += $db->db_put($k, $v) ;
- }
- ok 5, $ret == 0 ;
-
- # create a cursor
- ok 6, my $cursor = $db->db_cursor() ;
-
- # point to a specific k/v pair
- $k = "green" ;
- ok 7, $cursor->c_get($k, $v, DB_SET) == 0 ;
- ok 8, $v eq "house" ;
-
- # duplicate the cursor
- my $dup_cursor = $cursor->c_dup(DB_POSITION);
- ok 9, $dup_cursor ;
-
- # move original cursor off green/house
- $cursor->c_get($k, $v, DB_NEXT) ;
- ok 10, $k ne "green" ;
- ok 11, $v ne "house" ;
-
- # duplicate cursor should still be on green/house
- ok 12, $dup_cursor->c_get($k, $v, DB_CURRENT) == 0;
- ok 13, $k eq "green" ;
- ok 14, $v eq "house" ;
-
-}
diff --git a/bdb/perl.BerkeleyDB/t/db-3.1.t b/bdb/perl.BerkeleyDB/t/db-3.1.t
deleted file mode 100644
index 35076b6cd49..00000000000
--- a/bdb/perl.BerkeleyDB/t/db-3.1.t
+++ /dev/null
@@ -1,172 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%
-
-use strict ;
-
-BEGIN {
- unless(grep /blib/, @INC) {
- chdir 't' if -d 't';
- @INC = '../lib' if -d '../lib';
- }
-}
-
-#use Config;
-#
-#BEGIN {
-# if(-d "lib" && -f "TEST") {
-# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
-# print "1..74\n";
-# exit 0;
-# }
-# }
-#}
-
-use BerkeleyDB;
-use File::Path qw(rmtree);
-
-BEGIN
-{
- if ($BerkeleyDB::db_version < 3.1) {
- print "1..0 # Skipping test, this needs Berkeley DB 3.1.x or better\n" ;
- exit 0 ;
- }
-}
-
-print "1..25\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-
-{
- # c_count
-
- my $lex = new LexFile $Dfile ;
- my %hash ;
- ok 1, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
- -Property => DB_DUP,
- -Flags => DB_CREATE ;
-
- $hash{'Wall'} = 'Larry' ;
- $hash{'Wall'} = 'Stone' ;
- $hash{'Smith'} = 'John' ;
- $hash{'Wall'} = 'Brick' ;
- $hash{'Wall'} = 'Brick' ;
- $hash{'mouse'} = 'mickey' ;
-
- ok 2, keys %hash == 6 ;
-
- # create a cursor
- ok 3, my $cursor = $db->db_cursor() ;
-
- my $key = "Wall" ;
- my $value ;
- ok 4, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 5, $key eq "Wall" && $value eq "Larry" ;
-
- my $count ;
- ok 6, $cursor->c_count($count) == 0 ;
- ok 7, $count == 4 ;
-
- $key = "Smith" ;
- ok 8, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 9, $key eq "Smith" && $value eq "John" ;
-
- ok 10, $cursor->c_count($count) == 0 ;
- ok 11, $count == 1 ;
-
-
- undef $db ;
- undef $cursor ;
- untie %hash ;
-
-}
-
-{
- # db_key_range
-
- my $lex = new LexFile $Dfile ;
- my %hash ;
- ok 12, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
- -Property => DB_DUP,
- -Flags => DB_CREATE ;
-
- $hash{'Wall'} = 'Larry' ;
- $hash{'Wall'} = 'Stone' ;
- $hash{'Smith'} = 'John' ;
- $hash{'Wall'} = 'Brick' ;
- $hash{'Wall'} = 'Brick' ;
- $hash{'mouse'} = 'mickey' ;
-
- ok 13, keys %hash == 6 ;
-
- my $key = "Wall" ;
- my ($less, $equal, $greater) ;
- ok 14, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
-
- ok 15, $less != 0 ;
- ok 16, $equal != 0 ;
- ok 17, $greater != 0 ;
-
- $key = "Smith" ;
- ok 18, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
-
- ok 19, $less == 0 ;
- ok 20, $equal != 0 ;
- ok 21, $greater != 0 ;
-
- $key = "NotThere" ;
- ok 22, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
-
- ok 23, $less == 0 ;
- ok 24, $equal == 0 ;
- ok 25, $greater == 1 ;
-
- undef $db ;
- untie %hash ;
-
-}
diff --git a/bdb/perl.BerkeleyDB/t/db-3.2.t b/bdb/perl.BerkeleyDB/t/db-3.2.t
deleted file mode 100644
index 0cff248733c..00000000000
--- a/bdb/perl.BerkeleyDB/t/db-3.2.t
+++ /dev/null
@@ -1,90 +0,0 @@
-#!./perl -w
-
-# ID: %I%, %G%
-
-use strict ;
-
-BEGIN {
- unless(grep /blib/, @INC) {
- chdir 't' if -d 't';
- @INC = '../lib' if -d '../lib';
- }
-}
-
-#use Config;
-#
-#BEGIN {
-# if(-d "lib" && -f "TEST") {
-# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
-# print "1..74\n";
-# exit 0;
-# }
-# }
-#}
-
-use BerkeleyDB;
-use File::Path qw(rmtree);
-
-BEGIN
-{
- if ($BerkeleyDB::db_version < 3.2) {
- print "1..0 # Skipping test, this needs Berkeley DB 3.2.x or better\n" ;
- exit 0 ;
- }
-}
-
-print "1..1\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-my $Dfile = "dbhash.tmp";
-my $Dfile2 = "dbhash2.tmp";
-my $Dfile3 = "dbhash3.tmp";
-unlink $Dfile;
-
-umask(0) ;
-
-
-
-{
- # set_q_extentsize
-
- ok 1, 1 ;
-}
-
diff --git a/bdb/perl.BerkeleyDB/t/mldbm.t b/bdb/perl.BerkeleyDB/t/mldbm.t
deleted file mode 100644
index eb6673b35f5..00000000000
--- a/bdb/perl.BerkeleyDB/t/mldbm.t
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/perl -w
-
-BEGIN
-{
- if ($] < 5.005) {
- print "1..0 # This is Perl $], skipping test\n" ;
- exit 0 ;
- }
-
- eval { require Data::Dumper ; };
- if ($@) {
- print "1..0 # Data::Dumper is not installed on this system.\n";
- exit 0 ;
- }
- if ($Data::Dumper::VERSION < 2.08) {
- print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
- exit 0 ;
- }
- eval { require MLDBM ; };
- if ($@) {
- print "1..0 # MLDBM is not installed on this system.\n";
- exit 0 ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-print "1..12\n";
-
-{
-package BTREE ;
-
-use BerkeleyDB ;
-use MLDBM qw(BerkeleyDB::Btree) ;
-use Data::Dumper;
-
-$filename = 'testmldbm' ;
-
-unlink $filename ;
-$MLDBM::UseDB = "BerkeleyDB::Btree" ;
-$db = tie %o, MLDBM, -Filename => $filename,
- -Flags => DB_CREATE
- or die $!;
-::ok 1, $db ;
-::ok 2, $db->type() == DB_BTREE ;
-
-$c = [\'c'];
-$b = {};
-$a = [1, $b, $c];
-$b->{a} = $a;
-$b->{b} = $a->[1];
-$b->{c} = $a->[2];
-@o{qw(a b c)} = ($a, $b, $c);
-$o{d} = "{once upon a time}";
-$o{e} = 1024;
-$o{f} = 1024.1024;
-$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
-$second = <<'EOT';
-$a = [
- 1,
- {
- a => $a,
- b => $a->[1],
- c => [
- \'c'
- ]
- },
- $a->[1]{c}
- ];
-$b = {
- a => [
- 1,
- $b,
- [
- \'c'
- ]
- ],
- b => $b,
- c => $b->{a}[2]
- };
-$c = [
- \'c'
- ];
-EOT
-
-::ok 3, $first eq $second ;
-::ok 4, $o{d} eq "{once upon a time}" ;
-::ok 5, $o{e} == 1024 ;
-::ok 6, $o{f} eq 1024.1024 ;
-
-unlink $filename ;
-}
-
-{
-
-package HASH ;
-
-use BerkeleyDB ;
-use MLDBM qw(BerkeleyDB::Hash) ;
-use Data::Dumper;
-
-$filename = 'testmldbm' ;
-
-unlink $filename ;
-$MLDBM::UseDB = "BerkeleyDB::Hash" ;
-$db = tie %o, MLDBM, -Filename => $filename,
- -Flags => DB_CREATE
- or die $!;
-::ok 7, $db ;
-::ok 8, $db->type() == DB_HASH ;
-
-
-$c = [\'c'];
-$b = {};
-$a = [1, $b, $c];
-$b->{a} = $a;
-$b->{b} = $a->[1];
-$b->{c} = $a->[2];
-@o{qw(a b c)} = ($a, $b, $c);
-$o{d} = "{once upon a time}";
-$o{e} = 1024;
-$o{f} = 1024.1024;
-$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
-$second = <<'EOT';
-$a = [
- 1,
- {
- a => $a,
- b => $a->[1],
- c => [
- \'c'
- ]
- },
- $a->[1]{c}
- ];
-$b = {
- a => [
- 1,
- $b,
- [
- \'c'
- ]
- ],
- b => $b,
- c => $b->{a}[2]
- };
-$c = [
- \'c'
- ];
-EOT
-
-::ok 9, $first eq $second ;
-::ok 10, $o{d} eq "{once upon a time}" ;
-::ok 11, $o{e} == 1024 ;
-::ok 12, $o{f} eq 1024.1024 ;
-
-unlink $filename ;
-
-}
diff --git a/bdb/perl.DB_File/Makefile.PL b/bdb/perl.DB_File/Makefile.PL
deleted file mode 100644
index 25e707df6ea..00000000000
--- a/bdb/perl.DB_File/Makefile.PL
+++ /dev/null
@@ -1,187 +0,0 @@
-#! perl -w
-use strict ;
-use ExtUtils::MakeMaker 5.16 ;
-use Config ;
-
-my $VER_INFO ;
-my $LIB_DIR ;
-my $INC_DIR ;
-my $DB_NAME ;
-my $LIBS ;
-my $COMPAT185 = "" ;
-
-my @files = ('DB_File.pm', glob "t/*.t") ;
-# See if warnings is available
-eval 'use warnings;';
-if ($@) {
- # not there, so write a dummy warnings.pm
- oldWarnings(@files) ;
-} else {
- # is there,
- newWarnings(@files) ;
-}
-
-ParseCONFIG() ;
-
-if (defined $DB_NAME)
- { $LIBS = $DB_NAME }
-else {
- if ($^O eq 'MSWin32')
- { $LIBS = '-llibdb' }
- else
- { $LIBS = '-ldb' }
-}
-
-# Solaris is special.
-#$LIBS .= " -lthread" if $^O eq 'solaris' ;
-
-# OS2 is a special case, so check for it now.
-my $OS2 = "" ;
-$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
-
-WriteMakefile(
- NAME => 'DB_File',
- LIBS => ["-L${LIB_DIR} $LIBS"],
- MAN3PODS => ' ', # Pods will be built by installman.
- INC => "-I$INC_DIR",
- VERSION_FROM => 'DB_File.pm',
- XSPROTOARG => '-noprototypes',
- DEFINE => "$OS2 $VER_INFO $COMPAT185",
- OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
- OPTIMIZE => '-g',
- 'macro' => { INSTALLDIRS => 'perl' },
- 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
- );
-
-
-sub MY::postamble {
- '
-version$(OBJ_EXT): version.c
-
-$(NAME).xs: typemap
- @$(TOUCH) $(NAME).xs
-
-Makefile: config.in
-
-' ;
-}
-
-
-sub ParseCONFIG
-{
- my ($k, $v) ;
- my @badkey = () ;
- my %Info = () ;
- my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
- my %ValidOption = map {$_, 1} @Options ;
- my %Parsed = %ValidOption ;
- my $CONFIG = 'config.in' ;
-
- print "Parsing $CONFIG...\n" ;
-
- # DBNAME & COMPAT185 are optional, so pretend they have
- # been parsed.
- delete $Parsed{'DBNAME'} ;
- delete $Parsed{'COMPAT185'} ;
- $Info{COMPAT185} = "No" ;
-
-
- open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
- while (<F>) {
- s/^\s*|\s*$//g ;
- next if /^\s*$/ or /^\s*#/ ;
- s/\s*#\s*$// ;
-
- ($k, $v) = split(/\s+=\s+/, $_, 2) ;
- $k = uc $k ;
- if ($ValidOption{$k}) {
- delete $Parsed{$k} ;
- $Info{$k} = $v ;
- }
- else {
- push(@badkey, $k) ;
- }
- }
- close F ;
-
- print "Unknown keys in $CONFIG ignored [@badkey]\n"
- if @badkey ;
-
- # check parsed values
- my @missing = () ;
- die "The following keys are missing from $CONFIG file: [@missing]\n"
- if @missing = keys %Parsed ;
-
- $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
- $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
- $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
- $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
- if (defined $ENV{'DB_FILE_COMPAT185'} &&
- $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
- $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
- my $PREFIX = $Info{'PREFIX'} ;
- my $HASH = $Info{'HASH'} ;
-
- $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
-
- print <<EOM if 0 ;
- INCLUDE [$INC_DIR]
- LIB [$LIB_DIR]
- HASH [$HASH]
- PREFIX [$PREFIX]
- DBNAME [$DB_NAME]
-
-EOM
-
- print "Looks Good.\n" ;
-
-}
-
-sub oldWarnings
-{
- local ($^I) = ".bak" ;
- local (@ARGV) = @_ ;
-
- while (<>)
- {
- if (/^__END__/)
- {
- print ;
- my $this = $ARGV ;
- while (<>)
- {
- last if $ARGV ne $this ;
- print ;
- }
- }
-
- s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
- s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
- print ;
- }
-}
-
-sub newWarnings
-{
- local ($^I) = ".bak" ;
- local (@ARGV) = @_ ;
-
- while (<>)
- {
- if (/^__END__/)
- {
- my $this = $ARGV ;
- print ;
- while (<>)
- {
- last if $ARGV ne $this ;
- print ;
- }
- }
-
- s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
- print ;
- }
-}
-
-# end of file Makefile.PL
diff --git a/bdb/perl.DB_File/t/db-recno.t b/bdb/perl.DB_File/t/db-recno.t
deleted file mode 100644
index c64d83b5916..00000000000
--- a/bdb/perl.DB_File/t/db-recno.t
+++ /dev/null
@@ -1,899 +0,0 @@
-#!./perl -w
-
-use warnings;
-use strict ;
-
-BEGIN {
- unless(grep /blib/, @INC) {
- chdir 't' if -d 't';
- @INC = '../lib' if -d '../lib';
- }
-}
-
-use Config;
-
-BEGIN {
- if(-d "lib" && -f "TEST") {
- if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
- print "1..128\n";
- exit 0;
- }
- }
-}
-
-use DB_File;
-use Fcntl;
-use vars qw($dbh $Dfile $bad_ones $FA) ;
-
-# full tied array support started in Perl 5.004_57
-# Double check to see if it is available.
-
-{
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- $FA = 0 ;
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-
- return $result ;
-}
-
-{
- package Redirect ;
- use Symbol ;
-
- sub new
- {
- my $class = shift ;
- my $filename = shift ;
- my $fh = gensym ;
- open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
- my $real_stdout = select($fh) ;
- return bless [$fh, $real_stdout ] ;
-
- }
- sub DESTROY
- {
- my $self = shift ;
- close $self->[0] ;
- select($self->[1]) ;
- }
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
-
-sub docat_del
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file: $!";
- my $result = <CAT>;
- close(CAT);
- unlink $file ;
- return $result;
-}
-
-sub bad_one
-{
- print STDERR <<EOM unless $bad_ones++ ;
-#
-# Some older versions of Berkeley DB version 1 will fail tests 51,
-# 53 and 55.
-#
-# You can safely ignore the errors if you're never going to use the
-# broken functionality (recno databases with a modified bval).
-# Otherwise you'll have to upgrade your DB library.
-#
-# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
-# last versions that were released. Berkeley DB version 2 is continually
-# being updated -- Check out http://www.sleepycat.com/ for more details.
-#
-EOM
-}
-
-print "1..128\n";
-
-my $Dfile = "recno.tmp";
-unlink $Dfile ;
-
-umask(0);
-
-# Check the interface to RECNOINFO
-
-my $dbh = new DB_File::RECNOINFO ;
-ok(1, ! defined $dbh->{bval}) ;
-ok(2, ! defined $dbh->{cachesize}) ;
-ok(3, ! defined $dbh->{psize}) ;
-ok(4, ! defined $dbh->{flags}) ;
-ok(5, ! defined $dbh->{lorder}) ;
-ok(6, ! defined $dbh->{reclen}) ;
-ok(7, ! defined $dbh->{bfname}) ;
-
-$dbh->{bval} = 3000 ;
-ok(8, $dbh->{bval} == 3000 );
-
-$dbh->{cachesize} = 9000 ;
-ok(9, $dbh->{cachesize} == 9000 );
-
-$dbh->{psize} = 400 ;
-ok(10, $dbh->{psize} == 400 );
-
-$dbh->{flags} = 65 ;
-ok(11, $dbh->{flags} == 65 );
-
-$dbh->{lorder} = 123 ;
-ok(12, $dbh->{lorder} == 123 );
-
-$dbh->{reclen} = 1234 ;
-ok(13, $dbh->{reclen} == 1234 );
-
-$dbh->{bfname} = 1234 ;
-ok(14, $dbh->{bfname} == 1234 );
-
-
-# Check that an invalid entry is caught both for store & fetch
-eval '$dbh->{fred} = 1234' ;
-ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
-eval 'my $q = $dbh->{fred}' ;
-ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
-
-# Now check the interface to RECNOINFO
-
-my $X ;
-my @h ;
-ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
-
-ok(18, ((stat($Dfile))[2] & 0777) == ($^O eq 'os2' ? 0666 : 0640)
- || $^O eq 'MSWin32' || $^O eq 'amigaos') ;
-
-#my $l = @h ;
-my $l = $X->length ;
-ok(19, ($FA ? @h == 0 : !$l) );
-
-my @data = qw( a b c d ever f g h i j k longername m n o p) ;
-
-$h[0] = shift @data ;
-ok(20, $h[0] eq 'a' );
-
-my $ i;
-foreach (@data)
- { $h[++$i] = $_ }
-
-unshift (@data, 'a') ;
-
-ok(21, defined $h[1] );
-ok(22, ! defined $h[16] );
-ok(23, $FA ? @h == @data : $X->length == @data );
-
-
-# Overwrite an entry & check fetch it
-$h[3] = 'replaced' ;
-$data[3] = 'replaced' ;
-ok(24, $h[3] eq 'replaced' );
-
-#PUSH
-my @push_data = qw(added to the end) ;
-($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
-push (@data, @push_data) ;
-ok(25, $h[++$i] eq 'added' );
-ok(26, $h[++$i] eq 'to' );
-ok(27, $h[++$i] eq 'the' );
-ok(28, $h[++$i] eq 'end' );
-
-# POP
-my $popped = pop (@data) ;
-my $value = ($FA ? pop @h : $X->pop) ;
-ok(29, $value eq $popped) ;
-
-# SHIFT
-$value = ($FA ? shift @h : $X->shift) ;
-my $shifted = shift @data ;
-ok(30, $value eq $shifted );
-
-# UNSHIFT
-
-# empty list
-($FA ? unshift @h : $X->unshift) ;
-ok(31, ($FA ? @h == @data : $X->length == @data ));
-
-my @new_data = qw(add this to the start of the array) ;
-$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
-unshift (@data, @new_data) ;
-ok(32, $FA ? @h == @data : $X->length == @data );
-ok(33, $h[0] eq "add") ;
-ok(34, $h[1] eq "this") ;
-ok(35, $h[2] eq "to") ;
-ok(36, $h[3] eq "the") ;
-ok(37, $h[4] eq "start") ;
-ok(38, $h[5] eq "of") ;
-ok(39, $h[6] eq "the") ;
-ok(40, $h[7] eq "array") ;
-ok(41, $h[8] eq $data[8]) ;
-
-# SPLICE
-
-# Now both arrays should be identical
-
-my $ok = 1 ;
-my $j = 0 ;
-foreach (@data)
-{
- $ok = 0, last if $_ ne $h[$j ++] ;
-}
-ok(42, $ok );
-
-# Neagtive subscripts
-
-# get the last element of the array
-ok(43, $h[-1] eq $data[-1] );
-ok(44, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
-
-# get the first element using a negative subscript
-eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
-ok(45, $@ eq "" );
-ok(46, $h[0] eq "abcd" );
-
-# now try to read before the start of the array
-eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
-ok(47, $@ =~ '^Modification of non-creatable array value attempted' );
-
-# IMPORTANT - $X must be undefined before the untie otherwise the
-# underlying DB close routine will not get called.
-undef $X ;
-untie(@h);
-
-unlink $Dfile;
-
-
-{
- # Check bval defaults to \n
-
- my @h = () ;
- my $dbh = new DB_File::RECNOINFO ;
- ok(48, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
- $h[0] = "abc" ;
- $h[1] = "def" ;
- $h[3] = "ghi" ;
- untie @h ;
- my $x = docat($Dfile) ;
- unlink $Dfile;
- ok(49, $x eq "abc\ndef\n\nghi\n") ;
-}
-
-{
- # Change bval
-
- my @h = () ;
- my $dbh = new DB_File::RECNOINFO ;
- $dbh->{bval} = "-" ;
- ok(50, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
- $h[0] = "abc" ;
- $h[1] = "def" ;
- $h[3] = "ghi" ;
- untie @h ;
- my $x = docat($Dfile) ;
- unlink $Dfile;
- my $ok = ($x eq "abc-def--ghi-") ;
- bad_one() unless $ok ;
- ok(51, $ok) ;
-}
-
-{
- # Check R_FIXEDLEN with default bval (space)
-
- my @h = () ;
- my $dbh = new DB_File::RECNOINFO ;
- $dbh->{flags} = R_FIXEDLEN ;
- $dbh->{reclen} = 5 ;
- ok(52, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
- $h[0] = "abc" ;
- $h[1] = "def" ;
- $h[3] = "ghi" ;
- untie @h ;
- my $x = docat($Dfile) ;
- unlink $Dfile;
- my $ok = ($x eq "abc def ghi ") ;
- bad_one() unless $ok ;
- ok(53, $ok) ;
-}
-
-{
- # Check R_FIXEDLEN with user-defined bval
-
- my @h = () ;
- my $dbh = new DB_File::RECNOINFO ;
- $dbh->{flags} = R_FIXEDLEN ;
- $dbh->{bval} = "-" ;
- $dbh->{reclen} = 5 ;
- ok(54, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
- $h[0] = "abc" ;
- $h[1] = "def" ;
- $h[3] = "ghi" ;
- untie @h ;
- my $x = docat($Dfile) ;
- unlink $Dfile;
- my $ok = ($x eq "abc--def-------ghi--") ;
- bad_one() unless $ok ;
- ok(55, $ok) ;
-}
-
-{
- # check that attempting to tie an associative array to a DB_RECNO will fail
-
- my $filename = "xyz" ;
- my %x ;
- eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
- ok(56, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
- unlink $filename ;
-}
-
-{
- # sub-class test
-
- package Another ;
-
- use warnings ;
- use strict ;
-
- open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
- print FILE <<'EOM' ;
-
- package SubDB ;
-
- use warnings ;
- use strict ;
- use vars qw( @ISA @EXPORT) ;
-
- require Exporter ;
- use DB_File;
- @ISA=qw(DB_File);
- @EXPORT = @DB_File::EXPORT ;
-
- sub STORE {
- my $self = shift ;
- my $key = shift ;
- my $value = shift ;
- $self->SUPER::STORE($key, $value * 2) ;
- }
-
- sub FETCH {
- my $self = shift ;
- my $key = shift ;
- $self->SUPER::FETCH($key) - 1 ;
- }
-
- sub put {
- my $self = shift ;
- my $key = shift ;
- my $value = shift ;
- $self->SUPER::put($key, $value * 3) ;
- }
-
- sub get {
- my $self = shift ;
- $self->SUPER::get($_[0], $_[1]) ;
- $_[1] -= 2 ;
- }
-
- sub A_new_method
- {
- my $self = shift ;
- my $key = shift ;
- my $value = $self->FETCH($key) ;
- return "[[$value]]" ;
- }
-
- 1 ;
-EOM
-
- close FILE ;
-
- BEGIN { push @INC, '.'; }
- eval 'use SubDB ; ';
- main::ok(57, $@ eq "") ;
- my @h ;
- my $X ;
- eval '
- $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
- ' ;
-
- main::ok(58, $@ eq "") ;
-
- my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
- main::ok(59, $@ eq "") ;
- main::ok(60, $ret == 5) ;
-
- my $value = 0;
- $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
- main::ok(61, $@ eq "") ;
- main::ok(62, $ret == 10) ;
-
- $ret = eval ' R_NEXT eq main::R_NEXT ' ;
- main::ok(63, $@ eq "" ) ;
- main::ok(64, $ret == 1) ;
-
- $ret = eval '$X->A_new_method(1) ' ;
- main::ok(65, $@ eq "") ;
- main::ok(66, $ret eq "[[11]]") ;
-
- undef $X;
- untie(@h);
- unlink "SubDB.pm", "recno.tmp" ;
-
-}
-
-{
-
- # test $#
- my $self ;
- unlink $Dfile;
- ok(67, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
- $h[0] = "abc" ;
- $h[1] = "def" ;
- $h[2] = "ghi" ;
- $h[3] = "jkl" ;
- ok(68, $FA ? $#h == 3 : $self->length() == 4) ;
- undef $self ;
- untie @h ;
- my $x = docat($Dfile) ;
- ok(69, $x eq "abc\ndef\nghi\njkl\n") ;
-
- # $# sets array to same length
- ok(70, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
- if ($FA)
- { $#h = 3 }
- else
- { $self->STORESIZE(4) }
- ok(71, $FA ? $#h == 3 : $self->length() == 4) ;
- undef $self ;
- untie @h ;
- $x = docat($Dfile) ;
- ok(72, $x eq "abc\ndef\nghi\njkl\n") ;
-
- # $# sets array to bigger
- ok(73, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
- if ($FA)
- { $#h = 6 }
- else
- { $self->STORESIZE(7) }
- ok(74, $FA ? $#h == 6 : $self->length() == 7) ;
- undef $self ;
- untie @h ;
- $x = docat($Dfile) ;
- ok(75, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
-
- # $# sets array smaller
- ok(76, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
- if ($FA)
- { $#h = 2 }
- else
- { $self->STORESIZE(3) }
- ok(77, $FA ? $#h == 2 : $self->length() == 3) ;
- undef $self ;
- untie @h ;
- $x = docat($Dfile) ;
- ok(78, $x eq "abc\ndef\nghi\n") ;
-
- unlink $Dfile;
-
-
-}
-
-{
- # DBM Filter tests
- use warnings ;
- use strict ;
- my (@h, $db) ;
- my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- unlink $Dfile;
-
- sub checkOutput
- {
- my($fk, $sk, $fv, $sv) = @_ ;
- return
- $fetch_key eq $fk && $store_key eq $sk &&
- $fetch_value eq $fv && $store_value eq $sv &&
- $_ eq 'original' ;
- }
-
- ok(79, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
- $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
- $db->filter_store_key (sub { $store_key = $_ }) ;
- $db->filter_fetch_value (sub { $fetch_value = $_}) ;
- $db->filter_store_value (sub { $store_value = $_ }) ;
-
- $_ = "original" ;
-
- $h[0] = "joe" ;
- # fk sk fv sv
- ok(80, checkOutput( "", 0, "", "joe")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(81, $h[0] eq "joe");
- # fk sk fv sv
- ok(82, checkOutput( "", 0, "joe", "")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(83, $db->FIRSTKEY() == 0) ;
- # fk sk fv sv
- ok(84, checkOutput( 0, "", "", "")) ;
-
- # replace the filters, but remember the previous set
- my ($old_fk) = $db->filter_fetch_key
- (sub { ++ $_ ; $fetch_key = $_ }) ;
- my ($old_sk) = $db->filter_store_key
- (sub { $_ *= 2 ; $store_key = $_ }) ;
- my ($old_fv) = $db->filter_fetch_value
- (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
- my ($old_sv) = $db->filter_store_value
- (sub { s/o/x/g; $store_value = $_ }) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- $h[1] = "Joe" ;
- # fk sk fv sv
- ok(85, checkOutput( "", 2, "", "Jxe")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(86, $h[1] eq "[Jxe]");
- # fk sk fv sv
- ok(87, checkOutput( "", 2, "[Jxe]", "")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(88, $db->FIRSTKEY() == 1) ;
- # fk sk fv sv
- ok(89, checkOutput( 1, "", "", "")) ;
-
- # put the original filters back
- $db->filter_fetch_key ($old_fk);
- $db->filter_store_key ($old_sk);
- $db->filter_fetch_value ($old_fv);
- $db->filter_store_value ($old_sv);
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- $h[0] = "joe" ;
- ok(90, checkOutput( "", 0, "", "joe")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(91, $h[0] eq "joe");
- ok(92, checkOutput( "", 0, "joe", "")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(93, $db->FIRSTKEY() == 0) ;
- ok(94, checkOutput( 0, "", "", "")) ;
-
- # delete the filters
- $db->filter_fetch_key (undef);
- $db->filter_store_key (undef);
- $db->filter_fetch_value (undef);
- $db->filter_store_value (undef);
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- $h[0] = "joe" ;
- ok(95, checkOutput( "", "", "", "")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(96, $h[0] eq "joe");
- ok(97, checkOutput( "", "", "", "")) ;
-
- ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(98, $db->FIRSTKEY() == 0) ;
- ok(99, checkOutput( "", "", "", "")) ;
-
- undef $db ;
- untie @h;
- unlink $Dfile;
-}
-
-{
- # DBM Filter with a closure
-
- use warnings ;
- use strict ;
- my (@h, $db) ;
-
- unlink $Dfile;
- ok(100, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
- my %result = () ;
-
- sub Closure
- {
- my ($name) = @_ ;
- my $count = 0 ;
- my @kept = () ;
-
- return sub { ++$count ;
- push @kept, $_ ;
- $result{$name} = "$name - $count: [@kept]" ;
- }
- }
-
- $db->filter_store_key(Closure("store key")) ;
- $db->filter_store_value(Closure("store value")) ;
- $db->filter_fetch_key(Closure("fetch key")) ;
- $db->filter_fetch_value(Closure("fetch value")) ;
-
- $_ = "original" ;
-
- $h[0] = "joe" ;
- ok(101, $result{"store key"} eq "store key - 1: [0]");
- ok(102, $result{"store value"} eq "store value - 1: [joe]");
- ok(103, ! defined $result{"fetch key"} );
- ok(104, ! defined $result{"fetch value"} );
- ok(105, $_ eq "original") ;
-
- ok(106, $db->FIRSTKEY() == 0 ) ;
- ok(107, $result{"store key"} eq "store key - 1: [0]");
- ok(108, $result{"store value"} eq "store value - 1: [joe]");
- ok(109, $result{"fetch key"} eq "fetch key - 1: [0]");
- ok(110, ! defined $result{"fetch value"} );
- ok(111, $_ eq "original") ;
-
- $h[7] = "john" ;
- ok(112, $result{"store key"} eq "store key - 2: [0 7]");
- ok(113, $result{"store value"} eq "store value - 2: [joe john]");
- ok(114, $result{"fetch key"} eq "fetch key - 1: [0]");
- ok(115, ! defined $result{"fetch value"} );
- ok(116, $_ eq "original") ;
-
- ok(117, $h[0] eq "joe");
- ok(118, $result{"store key"} eq "store key - 3: [0 7 0]");
- ok(119, $result{"store value"} eq "store value - 2: [joe john]");
- ok(120, $result{"fetch key"} eq "fetch key - 1: [0]");
- ok(121, $result{"fetch value"} eq "fetch value - 1: [joe]");
- ok(122, $_ eq "original") ;
-
- undef $db ;
- untie @h;
- unlink $Dfile;
-}
-
-{
- # DBM Filter recursion detection
- use warnings ;
- use strict ;
- my (@h, $db) ;
- unlink $Dfile;
-
- ok(123, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
-
- $db->filter_store_key (sub { $_ = $h[0] }) ;
-
- eval '$h[1] = 1234' ;
- ok(124, $@ =~ /^recursion detected in filter_store_key at/ );
-
- undef $db ;
- untie @h;
- unlink $Dfile;
-}
-
-
-{
- # Examples from the POD
-
- my $file = "xyzt" ;
- {
- my $redirect = new Redirect $file ;
-
- use warnings FATAL => qw(all);
- use strict ;
- use DB_File ;
-
- my $filename = "text" ;
- unlink $filename ;
-
- my @h ;
- my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
- or die "Cannot open file 'text': $!\n" ;
-
- # Add a few key/value pairs to the file
- $h[0] = "orange" ;
- $h[1] = "blue" ;
- $h[2] = "yellow" ;
-
- $FA ? push @h, "green", "black"
- : $x->push("green", "black") ;
-
- my $elements = $FA ? scalar @h : $x->length ;
- print "The array contains $elements entries\n" ;
-
- my $last = $FA ? pop @h : $x->pop ;
- print "popped $last\n" ;
-
- $FA ? unshift @h, "white"
- : $x->unshift("white") ;
- my $first = $FA ? shift @h : $x->shift ;
- print "shifted $first\n" ;
-
- # Check for existence of a key
- print "Element 1 Exists with value $h[1]\n" if $h[1] ;
-
- # use a negative index
- print "The last element is $h[-1]\n" ;
- print "The 2nd last element is $h[-2]\n" ;
-
- undef $x ;
- untie @h ;
-
- unlink $filename ;
- }
-
- ok(125, docat_del($file) eq <<'EOM') ;
-The array contains 5 entries
-popped black
-shifted white
-Element 1 Exists with value blue
-The last element is green
-The 2nd last element is yellow
-EOM
-
- my $save_output = "xyzt" ;
- {
- my $redirect = new Redirect $save_output ;
-
- use warnings FATAL => qw(all);
- use strict ;
- use vars qw(@h $H $file $i) ;
- use DB_File ;
- use Fcntl ;
-
- $file = "text" ;
-
- unlink $file ;
-
- $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
- or die "Cannot open file $file: $!\n" ;
-
- # first create a text file to play with
- $h[0] = "zero" ;
- $h[1] = "one" ;
- $h[2] = "two" ;
- $h[3] = "three" ;
- $h[4] = "four" ;
-
-
- # Print the records in order.
- #
- # The length method is needed here because evaluating a tied
- # array in a scalar context does not return the number of
- # elements in the array.
-
- print "\nORIGINAL\n" ;
- foreach $i (0 .. $H->length - 1) {
- print "$i: $h[$i]\n" ;
- }
-
- # use the push & pop methods
- $a = $H->pop ;
- $H->push("last") ;
- print "\nThe last record was [$a]\n" ;
-
- # and the shift & unshift methods
- $a = $H->shift ;
- $H->unshift("first") ;
- print "The first record was [$a]\n" ;
-
- # Use the API to add a new record after record 2.
- $i = 2 ;
- $H->put($i, "Newbie", R_IAFTER) ;
-
- # and a new record before record 1.
- $i = 1 ;
- $H->put($i, "New One", R_IBEFORE) ;
-
- # delete record 3
- $H->del(3) ;
-
- # now print the records in reverse order
- print "\nREVERSE\n" ;
- for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
- { print "$i: $h[$i]\n" }
-
- # same again, but use the API functions instead
- print "\nREVERSE again\n" ;
- my ($s, $k, $v) = (0, 0, 0) ;
- for ($s = $H->seq($k, $v, R_LAST) ;
- $s == 0 ;
- $s = $H->seq($k, $v, R_PREV))
- { print "$k: $v\n" }
-
- undef $H ;
- untie @h ;
-
- unlink $file ;
- }
-
- ok(126, docat_del($save_output) eq <<'EOM') ;
-
-ORIGINAL
-0: zero
-1: one
-2: two
-3: three
-4: four
-
-The last record was [four]
-The first record was [zero]
-
-REVERSE
-5: last
-4: three
-3: Newbie
-2: one
-1: New One
-0: first
-
-REVERSE again
-5: last
-4: three
-3: Newbie
-2: one
-1: New One
-0: first
-EOM
-
-}
-
-{
- # Bug ID 20001013.009
- #
- # test that $hash{KEY} = undef doesn't produce the warning
- # Use of uninitialized value in null operation
- use warnings ;
- use strict ;
- use DB_File ;
-
- unlink $Dfile;
- my @h ;
- my $a = "";
- local $SIG{__WARN__} = sub {$a = $_[0]} ;
-
- tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
- or die "Can't open file: $!\n" ;
- $h[0] = undef;
- ok(127, $a eq "") ;
- untie @h ;
- unlink $Dfile;
-}
-
-{
- # test that %hash = () doesn't produce the warning
- # Argument "" isn't numeric in entersub
- use warnings ;
- use strict ;
- use DB_File ;
- my $a = "";
- local $SIG{__WARN__} = sub {$a = $_[0]} ;
-
- unlink $Dfile;
- my @h ;
-
- tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
- or die "Can't open file: $!\n" ;
- @h = (); ;
- ok(128, $a eq "") ;
- untie @h ;
- unlink $Dfile;
-}
-
-exit ;
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pm b/bdb/perl/BerkeleyDB/BerkeleyDB.pm
index cc172a2bd22..c56390ba71f 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB.pm
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB.pm
@@ -2,7 +2,7 @@
package BerkeleyDB;
-# Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+# Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
# This program is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself.
#
@@ -14,54 +14,124 @@ BEGIN { require 5.004_04 }
use strict;
use Carp;
-use vars qw($VERSION @ISA @EXPORT $AUTOLOAD);
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD
+ $use_XSLoader);
-$VERSION = '0.13';
+$VERSION = '0.20';
require Exporter;
-require DynaLoader;
+#require DynaLoader;
require AutoLoader;
-use IO ;
+
+BEGIN {
+ $use_XSLoader = 1 ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
@ISA = qw(Exporter DynaLoader);
# Items to export into callers namespace by default. Note: do not export
# names by default without a very good reason. Use EXPORT_OK instead.
# Do not simply export all your public functions/methods/constants.
-@EXPORT = qw(
+# NOTE -- Do not add to @EXPORT directly. It is written by mkconsts
+@EXPORT = qw(
DB_AFTER
+ DB_AGGRESSIVE
+ DB_ALREADY_ABORTED
DB_APPEND
+ DB_APPLY_LOGREG
+ DB_APP_INIT
DB_ARCH_ABS
DB_ARCH_DATA
DB_ARCH_LOG
+ DB_AUTO_COMMIT
DB_BEFORE
+ DB_BROADCAST_EID
DB_BTREE
DB_BTREEMAGIC
DB_BTREEOLDVER
DB_BTREEVERSION
+ DB_CACHED_COUNTS
+ DB_CDB_ALLDB
DB_CHECKPOINT
+ DB_CHKSUM_SHA1
+ DB_CLIENT
+ DB_CL_WRITER
+ DB_COMMIT
DB_CONSUME
+ DB_CONSUME_WAIT
DB_CREATE
DB_CURLSN
DB_CURRENT
- DB_DBT_MALLOC
- DB_DBT_PARTIAL
- DB_DBT_USERMEM
+ DB_CXX_NO_EXCEPTIONS
DB_DELETED
DB_DELIMITER
+ DB_DIRECT
+ DB_DIRECT_DB
+ DB_DIRECT_LOG
+ DB_DIRTY_READ
+ DB_DONOTINDEX
DB_DUP
+ DB_DUPCURSOR
DB_DUPSORT
+ DB_EID_BROADCAST
+ DB_EID_INVALID
+ DB_ENCRYPT
+ DB_ENCRYPT_AES
DB_ENV_APPINIT
+ DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB
+ DB_ENV_CDB_ALLDB
+ DB_ENV_CREATE
+ DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB
+ DB_ENV_DIRECT_LOG
+ DB_ENV_FATAL
+ DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING
+ DB_ENV_LOGGING
+ DB_ENV_NOLOCKING
+ DB_ENV_NOMMAP
+ DB_ENV_NOPANIC
+ DB_ENV_OPEN_CALLED
+ DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK
+ DB_ENV_PRIVATE
+ DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT
+ DB_ENV_REP_LOGSONLY
+ DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT
+ DB_ENV_RPCCLIENT_GIVEN
DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM
DB_ENV_THREAD
+ DB_ENV_TXN
+ DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC
+ DB_ENV_USER_ALLOC
+ DB_ENV_YIELDCPU
DB_EXCL
+ DB_EXTENT
+ DB_FAST_STAT
+ DB_FCNTL_LOCKING
DB_FILE_ID_LEN
DB_FIRST
DB_FIXEDLEN
DB_FLUSH
DB_FORCE
+ DB_GETREC
DB_GET_BOTH
+ DB_GET_BOTHC
+ DB_GET_BOTH_RANGE
DB_GET_RECNO
+ DB_HANDLE_LOCK
DB_HASH
DB_HASHMAGIC
DB_HASHOLDVER
@@ -72,77 +142,207 @@ use IO ;
DB_INIT_LOG
DB_INIT_MPOOL
DB_INIT_TXN
+ DB_INVALID_EID
+ DB_JAVA_CALLBACK
+ DB_JOINENV
DB_JOIN_ITEM
+ DB_JOIN_NOSORT
DB_KEYEMPTY
DB_KEYEXIST
DB_KEYFIRST
DB_KEYLAST
DB_LAST
+ DB_LOCKDOWN
DB_LOCKMAGIC
DB_LOCKVERSION
DB_LOCK_CONFLICT
DB_LOCK_DEADLOCK
DB_LOCK_DEFAULT
+ DB_LOCK_DUMP
+ DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER
DB_LOCK_GET
+ DB_LOCK_GET_TIMEOUT
+ DB_LOCK_INHERIT
+ DB_LOCK_MAXLOCKS
+ DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE
DB_LOCK_NORUN
+ DB_LOCK_NOTEXIST
DB_LOCK_NOTGRANTED
DB_LOCK_NOTHELD
DB_LOCK_NOWAIT
DB_LOCK_OLDEST
+ DB_LOCK_PUT
+ DB_LOCK_PUT_ALL
+ DB_LOCK_PUT_OBJ
+ DB_LOCK_PUT_READ
DB_LOCK_RANDOM
+ DB_LOCK_RECORD
+ DB_LOCK_REMOVE
DB_LOCK_RIW_N
DB_LOCK_RW_N
+ DB_LOCK_SET_TIMEOUT
+ DB_LOCK_SWITCH
+ DB_LOCK_TIMEOUT
+ DB_LOCK_TRADE
+ DB_LOCK_UPGRADE
+ DB_LOCK_UPGRADE_WRITE
DB_LOCK_YOUNGEST
+ DB_LOGC_BUF_SIZE
+ DB_LOGFILEID_INVALID
DB_LOGMAGIC
DB_LOGOLDVER
+ DB_LOGVERSION
+ DB_LOG_DISK
+ DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR
DB_MAX_PAGES
DB_MAX_RECORDS
DB_MPOOL_CLEAN
DB_MPOOL_CREATE
DB_MPOOL_DIRTY
DB_MPOOL_DISCARD
+ DB_MPOOL_EXTENT
DB_MPOOL_LAST
DB_MPOOL_NEW
+ DB_MPOOL_NEW_GROUP
DB_MPOOL_PRIVATE
+ DB_MULTIPLE
+ DB_MULTIPLE_KEY
DB_MUTEXDEBUG
DB_MUTEXLOCKS
DB_NEEDSPLIT
DB_NEXT
DB_NEXT_DUP
+ DB_NEXT_NODUP
+ DB_NOCOPY
+ DB_NODUPDATA
+ DB_NOLOCKING
DB_NOMMAP
+ DB_NOORDERCHK
DB_NOOVERWRITE
+ DB_NOPANIC
+ DB_NORECURSE
+ DB_NOSERVER
+ DB_NOSERVER_HOME
+ DB_NOSERVER_ID
DB_NOSYNC
DB_NOTFOUND
+ DB_ODDFILESIZE
+ DB_OK_BTREE
+ DB_OK_HASH
+ DB_OK_QUEUE
+ DB_OK_RECNO
+ DB_OLD_VERSION
+ DB_OPEN_CALLED
+ DB_OPFLAGS_MASK
+ DB_ORDERCHKONLY
+ DB_OVERWRITE
DB_PAD
DB_PAGEYIELD
+ DB_PAGE_LOCK
+ DB_PAGE_NOTFOUND
+ DB_PANIC_ENVIRONMENT
+ DB_PERMANENT
DB_POSITION
+ DB_POSITIONI
DB_PREV
+ DB_PREV_NODUP
+ DB_PRINTABLE
+ DB_PRIORITY_DEFAULT
+ DB_PRIORITY_HIGH
+ DB_PRIORITY_LOW
+ DB_PRIORITY_VERY_HIGH
+ DB_PRIORITY_VERY_LOW
DB_PRIVATE
+ DB_PR_HEADERS
+ DB_PR_PAGE
+ DB_PR_RECOVERYTEST
+ DB_QAMMAGIC
+ DB_QAMOLDVER
+ DB_QAMVERSION
DB_QUEUE
DB_RDONLY
+ DB_RDWRMASTER
DB_RECNO
DB_RECNUM
DB_RECORDCOUNT
+ DB_RECORD_LOCK
DB_RECOVER
DB_RECOVER_FATAL
+ DB_REGION_ANON
+ DB_REGION_INIT
+ DB_REGION_MAGIC
+ DB_REGION_NAME
DB_REGISTERED
+ DB_RENAMEMAGIC
DB_RENUMBER
+ DB_REP_CLIENT
+ DB_REP_DUPMASTER
+ DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY
+ DB_REP_MASTER
+ DB_REP_NEWMASTER
+ DB_REP_NEWSITE
+ DB_REP_OUTDATED
+ DB_REP_PERMANENT
+ DB_REP_UNAVAIL
+ DB_REVSPLITOFF
DB_RMW
+ DB_RPC_SERVERPROG
+ DB_RPC_SERVERVERS
DB_RUNRECOVERY
+ DB_SALVAGE
+ DB_SECONDARY_BAD
DB_SEQUENTIAL
DB_SET
+ DB_SET_LOCK_TIMEOUT
DB_SET_RANGE
DB_SET_RECNO
+ DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT
DB_SNAPSHOT
+ DB_STAT_CLEAR
+ DB_SURPRISE_KID
DB_SWAPBYTES
+ DB_SYSTEM_MEM
DB_TEMPORARY
+ DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND
+ DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1
+ DB_TEST_ELECTWAIT2
+ DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG
+ DB_TEST_POSTLOGMETA
+ DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME
+ DB_TEST_POSTSYNC
+ DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE
+ DB_TEST_PREEXTOPEN
+ DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN
+ DB_TEST_PRERENAME
+ DB_TEST_SUBDB_LOCKS
DB_THREAD
+ DB_TIMEOUT
DB_TRUNCATE
DB_TXNMAGIC
DB_TXNVERSION
+ DB_TXN_ABORT
+ DB_TXN_APPLY
+ DB_TXN_BACKWARD_ALLOC
DB_TXN_BACKWARD_ROLL
DB_TXN_CKP
DB_TXN_FORWARD_ROLL
+ DB_TXN_GETPGNOS
+ DB_TXN_LOCK
DB_TXN_LOCK_2PL
DB_TXN_LOCK_MASK
DB_TXN_LOCK_OPTIMIST
@@ -154,39 +354,55 @@ use IO ;
DB_TXN_NOSYNC
DB_TXN_NOWAIT
DB_TXN_OPENFILES
+ DB_TXN_POPENFILES
+ DB_TXN_PRINT
DB_TXN_REDO
DB_TXN_SYNC
DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC
+ DB_UNKNOWN
+ DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY
+ DB_UPGRADE
DB_USE_ENVIRON
DB_USE_ENVIRON_ROOT
+ DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK
+ DB_VERB_RECOVERY
+ DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR
+ DB_VERIFY
+ DB_VERIFY_BAD
+ DB_VERIFY_FATAL
DB_VERSION_MAJOR
DB_VERSION_MINOR
DB_VERSION_PATCH
+ DB_VERSION_STRING
+ DB_VRFY_FLAGMASK
DB_WRITECURSOR
+ DB_WRITELOCK
+ DB_WRITEOPEN
+ DB_WRNOSYNC
+ DB_XA_CREATE
+ DB_XIDDATASIZE
+ DB_YIELDCPU
);
sub AUTOLOAD {
- # This AUTOLOAD is used to 'autoload' constants from the constant()
- # XS function. If a constant is not found then control is passed
- # to the AUTOLOAD in AutoLoader.
-
- my $constname;
+ my($constname);
($constname = $AUTOLOAD) =~ s/.*:://;
- my $val = constant($constname, @_ ? $_[0] : 0);
- if ($! != 0) {
- if ($! =~ /Invalid/) {
- $AutoLoader::AUTOLOAD = $AUTOLOAD;
- goto &AutoLoader::AUTOLOAD;
- }
- else {
- croak "Your vendor has not defined BerkeleyDB macro $constname";
- }
- }
- eval "sub $AUTOLOAD { $val }";
- goto &$AUTOLOAD;
-}
-
-bootstrap BerkeleyDB $VERSION;
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
+
+#bootstrap BerkeleyDB $VERSION;
+if ($use_XSLoader)
+ { XSLoader::load("BerkeleyDB", $VERSION)}
+else
+ { bootstrap BerkeleyDB $VERSION }
# Preloaded methods go here.
@@ -250,15 +466,6 @@ sub env_remove
Config => undef,
}, @_) ;
- if (defined $got->{ErrFile}) {
- if (!isaFilehandle($got->{ErrFile})) {
- my $handle = new IO::File ">$got->{ErrFile}"
- or croak "Cannot open file $got->{ErrFile}: $!\n" ;
- $got->{ErrFile} = $handle ;
- }
- }
-
-
if (defined $got->{Config}) {
croak("Config parameter must be a hash reference")
if ! ref $got->{Config} eq 'HASH' ;
@@ -295,6 +502,52 @@ sub db_remove
return _db_remove($got);
}
+sub db_rename
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Newname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Must specify a Subname")
+ if ! defined $got->{Subname} ;
+
+ croak("Must specify a Newname")
+ if ! defined $got->{Newname} ;
+
+ return _db_rename($got);
+}
+
+sub db_verify
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Outfile => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ return _db_verify($got);
+}
+
package BerkeleyDB::Env ;
use UNIVERSAL qw( isa ) ;
@@ -309,7 +562,8 @@ sub isaFilehandle
}
-%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR ) ;
+%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR
+DB_TMP_DIR ) ;
sub new
{
@@ -319,9 +573,10 @@ sub new
# [ -Home => $path, ]
# [ -Mode => mode, ]
# [ -Config => { name => value, name => value }
- # [ -ErrFile => filename or filehandle, ]
+ # [ -ErrFile => filename, ]
# [ -ErrPrefix => "string", ]
# [ -Flags => DB_INIT_LOCK| ]
+ # [ -Set_Flags => $flags,]
# [ -Cachesize => number ]
# [ -LockDetect => ]
# [ -Verbose => boolean ]
@@ -335,6 +590,7 @@ sub new
ErrFile => undef,
ErrPrefix => undef,
Flags => 0,
+ SetFlags => 0,
Cachesize => 0,
LockDetect => 0,
Verbose => 0,
@@ -342,11 +598,13 @@ sub new
}, @_) ;
if (defined $got->{ErrFile}) {
- if (!isaFilehandle($got->{ErrFile})) {
- my $handle = new IO::File ">$got->{ErrFile}"
- or croak "Cannot open file $got->{ErrFile}: $!\n" ;
- $got->{ErrFile} = $handle ;
- }
+ croak("ErrFile parameter must be a file name")
+ if ref $got->{ErrFile} ;
+ #if (!isaFilehandle($got->{ErrFile})) {
+ # my $handle = new IO::File ">$got->{ErrFile}"
+# or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+# $got->{ErrFile} = $handle ;
+# }
}
@@ -380,7 +638,7 @@ sub new
{ $obj->set_data_dir($v) }
elsif ($k eq 'DB_LOG_DIR')
{ $obj->set_lg_dir($v) }
- elsif ($k eq 'DB_TEMP_DIR')
+ elsif ($k eq 'DB_TEMP_DIR' || $k eq 'DB_TMP_DIR')
{ $obj->set_tmp_dir($v) }
else {
$BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
@@ -469,7 +727,8 @@ sub new
if ($addr) {
$obj = bless [$addr] , $self ;
push @{ $obj }, $got->{Env} if $got->{Env} ;
- $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
}
return $obj ;
}
@@ -524,7 +783,8 @@ sub new
if ($addr) {
$obj = bless [$addr] , $self ;
push @{ $obj }, $got->{Env} if $got->{Env} ;
- $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
}
return $obj ;
}
@@ -586,7 +846,8 @@ sub new
if ($addr) {
$obj = bless [$addr] , $self ;
push @{ $obj }, $got->{Env} if $got->{Env} ;
- $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
}
return $obj ;
}
@@ -639,19 +900,27 @@ sub new
croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
my ($addr) = _db_open_queue($self, $got);
my $obj ;
if ($addr) {
$obj = bless [$addr] , $self ;
push @{ $obj }, $got->{Env} if $got->{Env} ;
- $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
}
return $obj ;
}
*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
+sub UNSHIFT
+{
+ my $self = shift;
+ croak "unshift is unsupported with Queue databases";
+}
+
## package BerkeleyDB::Text ;
##
## use vars qw(@ISA) ;
@@ -745,7 +1014,8 @@ sub new
if ($addr) {
$obj = bless [$addr], "BerkeleyDB::$type" ;
push @{ $obj }, $got->{Env} if $got->{Env} ;
- $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ $obj->Txn($got->{Txn})
+ if $got->{Txn} ;
}
return $obj ;
}
@@ -934,13 +1204,12 @@ sub SHIFT
sub UNSHIFT
{
my $self = shift;
- croak "unshift is unsupported with Queue databases"
- if $self->type == BerkeleyDB::DB_QUEUE() ;
if (@_)
{
my ($key, $value) = (0, 0) ;
my $cursor = $self->db_cursor() ;
- if ($cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) == 0)
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) ;
+ if ($status == 0)
{
foreach $value (reverse @_)
{
@@ -948,6 +1217,14 @@ sub UNSHIFT
$cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
}
}
+ elsif ($status == BerkeleyDB::DB_NOTFOUND())
+ {
+ $key = 0 ;
+ foreach $value (@_)
+ {
+ $self->db_put($key++, $value) ;
+ }
+ }
}
}
@@ -956,14 +1233,16 @@ sub PUSH
my $self = shift;
if (@_)
{
- my ($key, $value) = (0, 0) ;
+ my ($key, $value) = (-1, 0) ;
my $cursor = $self->db_cursor() ;
- if ($cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) == 0)
+ my $status = $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) ;
+ if ($status == 0 || $status == BerkeleyDB::DB_NOTFOUND())
{
+ $key = -1 if $status != 0 and $self->type != BerkeleyDB::DB_RECNO() ;
foreach $value (@_)
{
++ $key ;
- $self->db_put($key, $value) ;
+ $status = $self->db_put($key, $value) ;
}
}
@@ -1112,7 +1391,7 @@ sub db_join
my $db = shift ;
my ($addr) = $db->_db_join(@_) ;
my $obj ;
- $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ $obj = bless [$addr, $db, $_[0]] , "BerkeleyDB::Cursor" if $addr ;
return $obj ;
}
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pod b/bdb/perl/BerkeleyDB/BerkeleyDB.pod
index 2c5c3feb51e..60f30e2abfb 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB.pod
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB.pod
@@ -1,6 +1,6 @@
=head1 NAME
-BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
=head1 SYNOPSIS
@@ -23,6 +23,8 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$db = new BerkeleyDB::Unknown [OPTIONS] ;
$status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
$hash{$key} = $value ;
$value = $hash{$key} ;
@@ -35,11 +37,14 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $db->db_del() ;
$status = $db->db_sync() ;
$status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
$hash_ref = $db->db_stat() ;
$status = $db->db_key_range();
$type = $db->type() ;
$status = $db->status() ;
$boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
($flag, $old_offset, $old_length) = $db->partial_clear() ;
@@ -50,6 +55,7 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $cursor->c_put() ;
$status = $cursor->c_del() ;
$status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
$status = $cursor->status() ;
$status = $cursor->c_close() ;
@@ -60,12 +66,24 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $env->txn_checkpoint()
$hash_ref = $env->txn_stat()
$status = $env->setmutexlocks()
+ $status = $env->set_flags()
$txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
$status = $txn->txn_prepare()
$status = $txn->txn_commit()
$status = $txn->txn_abort()
$status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
$BerkeleyDB::Error
$BerkeleyDB::db_version
@@ -88,11 +106,12 @@ B<NOTE: This document is still under construction. Expect it to be
incomplete in places.>
This Perl module provides an interface to most of the functionality
-available in Berkeley DB versions 2 and 3. In general it is safe to assume
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
that the interface provided here to be identical to the Berkeley DB
interface. The main changes have been to make the Berkeley DB API work
in a Perl way. Note that if you are using Berkeley DB 2.x, the new
-features available in Berkeley DB 3.x are not available via this module.
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
The reader is expected to be familiar with the Berkeley DB
documentation. Where the interface provided here is identical to the
@@ -108,7 +127,7 @@ classes.
The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
-B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
number of sub-systems that can then be used in a consistent way in all
the databases you make use of the environment.
@@ -122,9 +141,10 @@ shouldn't need to make use of B<BerkeleyDB::Env>.
[ -Server => $name, ]
[ -CacheSize => $number, ]
[ -Config => { name => value, name => value }, ]
- [ -ErrFile => filename or filehandle, ]
+ [ -ErrFile => filename, ]
[ -ErrPrefix => "string", ]
[ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
[ -LockDetect => number, ]
[ -Verbose => boolean, ]
@@ -187,8 +207,8 @@ The code below shows an example of how it can be used.
=item -ErrFile
-Expects either the name of a file or a reference to a filehandle. Any
-errors generated internally by Berkeley DB will be logged to this file.
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
=item -ErrPrefix
@@ -253,6 +273,13 @@ B<DB_USE_ENVIRON>
B<DB_USE_ENVIRON_ROOT>
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
=item -LockDetect
Specifies what to do when a lock conflict occurs. The value should be one of
@@ -282,6 +309,8 @@ The environment class has the following methods:
This method is identical to the B<-ErrPrefix> flag. It allows the
error prefix string to be changed dynamically.
+=item $env->set_flags(bitmask, 1|0);
+
=item $txn = $env->TxnMgr()
Constructor for creating a B<TxnMgr> object.
@@ -318,6 +347,12 @@ Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
TODO.
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
=head1 THE DATABASE CLASSES
B<BerkeleyDB> supports the following database formats:
@@ -371,7 +406,7 @@ the next sections.
Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -680,7 +715,7 @@ TODO
Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -928,7 +963,7 @@ TODO
Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -1021,8 +1056,8 @@ Here is the output from the script:
=head1 BerkeleyDB::Queue
Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
-type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
-you use Berkeley DB 2.x.
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
Two forms of constructor are supported:
@@ -1066,7 +1101,7 @@ This class is used to open an existing database.
Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
The constructor looks like this:
@@ -1100,7 +1135,7 @@ be created and removed once the program terminates.
=item -Subname
Specifies the name of the sub-database to open.
-This option is only valid if you are using Berkeley DB 3.x.
+This option is only valid if you are using Berkeley DB 3.x or greater.
=item -Flags
@@ -1276,13 +1311,19 @@ equivalent field would be accessed as follows:
$version = $ref->{'bt_version'} ;
-If you are using Berkeley DB 3.x, this method will work will all database
-formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
=head2 $status = $db->status()
Returns the status of the last C<$db> method called.
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
=head1 CURSORS
A cursor is used whenever you want to access the contents of a database
@@ -1690,10 +1731,10 @@ I get asked.
Before Berkeley DB 2.x was written there was only one Perl module that
interfaced to Berkeley DB. That module is called B<DB_File>. Although
-B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
an interface to the functionality available in Berkeley DB 1.x. That
means that it doesn't support transactions, locking or any of the other
-new features available in DB 2.x or 3.x.
+new features available in DB 2.x or better.
=head2 How do I store Perl data structures with BerkeleyDB?
@@ -1713,7 +1754,7 @@ The official web site for Berkeley DB is F<http://www.sleepycat.com>.
=head1 COPYRIGHT
-Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
is free software; you can redistribute it and/or modify it under the
same terms as Perl itself.
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.pod.P b/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
index 2bcff2d99d1..4a848f5388d 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB.pod.P
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB.pod.P
@@ -1,6 +1,6 @@
=head1 NAME
-BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+BerkeleyDB - Perl extension for Berkeley DB version 2, 3 or 4
=head1 SYNOPSIS
@@ -23,6 +23,8 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$db = new BerkeleyDB::Unknown [OPTIONS] ;
$status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
$hash{$key} = $value ;
$value = $hash{$key} ;
@@ -35,11 +37,14 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $db->db_del() ;
$status = $db->db_sync() ;
$status = $db->db_close() ;
+ $status = $db->db_close() ;
+ $status = $db->db_pget()
$hash_ref = $db->db_stat() ;
$status = $db->db_key_range();
$type = $db->type() ;
$status = $db->status() ;
$boolean = $db->byteswapped() ;
+ $status = $db->truncate($count) ;
($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
($flag, $old_offset, $old_length) = $db->partial_clear() ;
@@ -50,6 +55,7 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $cursor->c_put() ;
$status = $cursor->c_del() ;
$status = $cursor->c_count() ;
+ $status = $cursor->c_pget() ;
$status = $cursor->status() ;
$status = $cursor->c_close() ;
@@ -60,12 +66,24 @@ BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
$status = $env->txn_checkpoint()
$hash_ref = $env->txn_stat()
$status = $env->setmutexlocks()
+ $status = $env->set_flags()
$txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
$status = $txn->txn_prepare()
$status = $txn->txn_commit()
$status = $txn->txn_abort()
$status = $txn->txn_id()
+ $status = $txn->txn_discard()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+ $status = $env->set_verbose() ;
$BerkeleyDB::Error
$BerkeleyDB::db_version
@@ -88,11 +106,12 @@ B<NOTE: This document is still under construction. Expect it to be
incomplete in places.>
This Perl module provides an interface to most of the functionality
-available in Berkeley DB versions 2 and 3. In general it is safe to assume
+available in Berkeley DB versions 2, 3 and 4. In general it is safe to assume
that the interface provided here to be identical to the Berkeley DB
interface. The main changes have been to make the Berkeley DB API work
in a Perl way. Note that if you are using Berkeley DB 2.x, the new
-features available in Berkeley DB 3.x are not available via this module.
+features available in Berkeley DB 3.x or DB 4.x are not available via
+this module.
The reader is expected to be familiar with the Berkeley DB
documentation. Where the interface provided here is identical to the
@@ -108,7 +127,7 @@ classes.
The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
-B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+B<DBENV-E<gt>open> in Berkeley DB 3.x/4.x. Its purpose is to initialise a
number of sub-systems that can then be used in a consistent way in all
the databases you make use of the environment.
@@ -122,9 +141,10 @@ shouldn't need to make use of B<BerkeleyDB::Env>.
[ -Server => $name, ]
[ -CacheSize => $number, ]
[ -Config => { name => value, name => value }, ]
- [ -ErrFile => filename or filehandle, ]
+ [ -ErrFile => filename, ]
[ -ErrPrefix => "string", ]
[ -Flags => number, ]
+ [ -SetFlags => bitmask, ]
[ -LockDetect => number, ]
[ -Verbose => boolean, ]
@@ -187,8 +207,8 @@ The code below shows an example of how it can be used.
=item -ErrFile
-Expects either the name of a file or a reference to a filehandle. Any
-errors generated internally by Berkeley DB will be logged to this file.
+Expects a filenme. Any errors generated internally by Berkeley DB will
+be logged to this file.
=item -ErrPrefix
@@ -253,6 +273,13 @@ B<DB_USE_ENVIRON>
B<DB_USE_ENVIRON_ROOT>
+=item -SetFlags
+
+Calls ENV->set_flags with the supplied bitmask. Use this when you need to make
+use of DB_ENV->set_flags before DB_ENV->open is called.
+
+Only valid when Berkeley DB 3.x or better is used.
+
=item -LockDetect
Specifies what to do when a lock conflict occurs. The value should be one of
@@ -282,6 +309,8 @@ The environment class has the following methods:
This method is identical to the B<-ErrPrefix> flag. It allows the
error prefix string to be changed dynamically.
+=item $env->set_flags(bitmask, 1|0);
+
=item $txn = $env->TxnMgr()
Constructor for creating a B<TxnMgr> object.
@@ -318,6 +347,12 @@ Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
TODO.
+=head1 Global Classes
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+ $status = BerkeleyDB::db_rename [OPTIONS]
+ $status = BerkeleyDB::db_verify [OPTIONS]
+
=head1 THE DATABASE CLASSES
B<BerkeleyDB> supports the following database formats:
@@ -371,7 +406,7 @@ the next sections.
Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -572,7 +607,7 @@ TODO
Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -767,7 +802,7 @@ TODO
Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
Two forms of constructor are supported:
@@ -827,8 +862,8 @@ Here is the output from the script:
=head1 BerkeleyDB::Queue
Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
-type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
-you use Berkeley DB 2.x.
+type B<DB_QUEUE> in Berkeley DB 3.x or greater. This database format
+isn't available if you use Berkeley DB 2.x.
Two forms of constructor are supported:
@@ -872,7 +907,7 @@ This class is used to open an existing database.
Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
-Berkeley DB 3.x.
+Berkeley DB 3.x or greater.
The constructor looks like this:
@@ -906,7 +941,7 @@ be created and removed once the program terminates.
=item -Subname
Specifies the name of the sub-database to open.
-This option is only valid if you are using Berkeley DB 3.x.
+This option is only valid if you are using Berkeley DB 3.x or greater.
=item -Flags
@@ -1082,13 +1117,19 @@ equivalent field would be accessed as follows:
$version = $ref->{'bt_version'} ;
-If you are using Berkeley DB 3.x, this method will work will all database
-formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+If you are using Berkeley DB 3.x or better, this method will work will
+all database formats. When DB 2.x is used, it only works with
+B<BerkeleyDB::Btree>.
=head2 $status = $db->status()
Returns the status of the last C<$db> method called.
+=head2 $status = $db->truncate($count)
+
+Truncates the datatabase and returns the number or records deleted
+in C<$count>.
+
=head1 CURSORS
A cursor is used whenever you want to access the contents of a database
@@ -1457,10 +1498,10 @@ I get asked.
Before Berkeley DB 2.x was written there was only one Perl module that
interfaced to Berkeley DB. That module is called B<DB_File>. Although
-B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+B<DB_File> can be build with Berkeley DB 1.x, 2.x, 3.x or 4.x, it only provides
an interface to the functionality available in Berkeley DB 1.x. That
means that it doesn't support transactions, locking or any of the other
-new features available in DB 2.x or 3.x.
+new features available in DB 2.x or better.
=head2 How do I store Perl data structures with BerkeleyDB?
@@ -1480,7 +1521,7 @@ The official web site for Berkeley DB is F<http://www.sleepycat.com>.
=head1 COPYRIGHT
-Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This program
is free software; you can redistribute it and/or modify it under the
same terms as Perl itself.
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB.xs b/bdb/perl/BerkeleyDB/BerkeleyDB.xs
index 19126c98b53..531b38a655f 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB.xs
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB.xs
@@ -6,7 +6,7 @@
All comments/suggestions/problems are welcome
- Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
@@ -23,10 +23,29 @@
#ifdef __cplusplus
extern "C" {
#endif
+
#define PERL_POLLUTE
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
+#include "ppport.h"
+
+
+/* XSUB.h defines a macro called abort */
+/* This clashes with the txn abort method in Berkeley DB 4.x */
+/* This is a problem with ActivePerl (at least) */
+
+#ifdef _WIN32
+# ifdef abort
+# undef abort
+# endif
+# ifdef fopen
+# undef fopen
+# endif
+# ifdef fclose
+# undef fclose
+# endif
+#endif
/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
* shortly #included by the <db.h>) __attribute__ to the possibly
@@ -34,25 +53,32 @@ extern "C" {
#undef __attribute__
-#ifndef PERL_VERSION
-# include "patchlevel.h"
-# define PERL_REVISION 5
-# define PERL_VERSION PATCHLEVEL
-# define PERL_SUBVERSION SUBVERSION
+#ifdef USE_PERLIO
+# define GetFILEptr(sv) PerlIO_findFILE(IoOFP(sv_2io(sv)))
+#else
+# define GetFILEptr(sv) IoOFP(sv_2io(sv))
#endif
-#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
+#include <db.h>
-# define PL_sv_undef sv_undef
-# define PL_na na
-# define PL_dirty dirty
+/* Check the version of Berkeley DB */
+#ifndef DB_VERSION_MAJOR
+#ifdef HASHMAGIC
+#error db.h is from Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+#else
+#error db.h is not for Berkeley DB at all.
+#endif
+#endif
+
+#if (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6) ||\
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 6 && DB_VERSION_PATCH < 4)
+# error db.h is from Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
#endif
-#include <db.h>
#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
-# define IS_DB_3_0
+# define IS_DB_3_0_x
#endif
#if DB_VERSION_MAJOR >= 3
@@ -67,28 +93,22 @@ extern "C" {
# define AT_LEAST_DB_3_2
#endif
-/* need to define DEFSV & SAVE_DEFSV for older version of Perl */
-#ifndef DEFSV
-# define DEFSV GvSV(defgv)
-#endif
-
-#ifndef SAVE_DEFSV
-# define SAVE_DEFSV SAVESPTR(GvSV(defgv))
+#if DB_VERSION_MAJOR > 3 || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 2) ||\
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 2 && DB_VERSION_PATCH >= 6)
+# define AT_LEAST_DB_3_2_6
#endif
-#ifndef pTHX
-# define pTHX
-# define pTHX_
-# define aTHX
-# define aTHX_
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 3)
+# define AT_LEAST_DB_3_3
#endif
-#ifndef dTHR
-# define dTHR
+#if DB_VERSION_MAJOR >= 4
+# define AT_LEAST_DB_4
#endif
-#ifndef newSVpvn
-# define newSVpvn(a,b) newSVpv(a,b)
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
#endif
#ifdef __cplusplus
@@ -155,7 +175,7 @@ typedef struct {
int Status ;
/* char ErrBuff[1000] ; */
SV * ErrPrefix ;
- SV * ErrHandle ;
+ FILE * ErrHandle ;
DB_ENV * Env ;
int open_dbs ;
int TxnMgrStatus ;
@@ -171,9 +191,17 @@ typedef struct {
BerkeleyDB_ENV_type * parent_env ;
DB * dbp ;
SV * compare ;
+ bool in_compare ;
SV * dup_compare ;
+ bool in_dup_compare ;
SV * prefix ;
+ bool in_prefix ;
SV * hash ;
+ bool in_hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
int Status ;
DB_INFO * info ;
DBC * cursor ;
@@ -205,6 +233,10 @@ typedef struct {
SV * dup_compare ;
SV * prefix ;
SV * hash ;
+#ifdef AT_LEAST_DB_3_3
+ SV * associated ;
+ bool secondary_db ;
+#endif
int Status ;
DB_INFO * info ;
DBC * cursor ;
@@ -279,7 +311,7 @@ typedef PerlIO * IO_or_NULL ;
typedef int DualType ;
static void
-hash_delete(char * hash, IV key);
+hash_delete(char * hash, char * key);
#ifdef TRACE
# define Trace(x) printf x
@@ -305,26 +337,14 @@ hash_delete(char * hash, IV key);
# define flagSet(bitmask) ((flags & DB_OPFLAGS_MASK) == (bitmask))
#endif
-#ifdef DBM_FILTERING
-#define ckFilter(arg,type,name) \
- if (db->type) { \
- SV * save_defsv ; \
- /* printf("filtering %s\n", name) ;*/ \
- if (db->filtering) \
- softCrash("recursion detected in %s", name) ; \
- db->filtering = TRUE ; \
- save_defsv = newSVsv(DEFSV) ; \
- sv_setsv(DEFSV, arg) ; \
- PUSHMARK(sp) ; \
- (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
- sv_setsv(arg, DEFSV) ; \
- sv_setsv(DEFSV, save_defsv) ; \
- SvREFCNT_dec(save_defsv) ; \
- db->filtering = FALSE ; \
- /*printf("end of filtering %s\n", name) ;*/ \
- }
+#if DB_VERSION_MAJOR == 2
+# define BackRef internal
#else
-#define ckFilter(type, sv, name)
+# if DB_VERSION_MAJOR == 3 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0)
+# define BackRef cj_internal
+# else
+# define BackRef api_internal
+# endif
#endif
#define ERR_BUFF "BerkeleyDB::Error"
@@ -335,9 +355,9 @@ hash_delete(char * hash, IV key);
#define DBT_clear(x) Zero(&x, 1, DBT) ;
#if 1
-#define getInnerObject(x) SvIV(*av_fetch((AV*)SvRV(x), 0, FALSE))
+#define getInnerObject(x) (*av_fetch((AV*)SvRV(x), 0, FALSE))
#else
-#define getInnerObject(x) SvIV((SV*)SvRV(sv))
+#define getInnerObject(x) ((SV*)SvRV(sv))
#endif
#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
@@ -345,7 +365,7 @@ hash_delete(char * hash, IV key);
#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
i = SvIV(sv)
#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
- i = IoOFP(sv_2io(sv))
+ i = GetFILEptr(sv)
#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
i = sv
#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
@@ -353,20 +373,20 @@ hash_delete(char * hash, IV key);
#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
i = (t)SvPVX(sv)
#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
- IV tmp = getInnerObject(sv) ; \
- i = (t) tmp ; \
+ IV tmp = SvIV(getInnerObject(sv)) ; \
+ i = INT2PTR(t, tmp) ; \
}
#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
HV * hv = (HV *)GetInternalObject(sv); \
SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
IV tmp = SvIV(*svp); \
- i = (t) tmp ; \
+ i = INT2PTR(t, tmp) ; \
}
#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
IV tmp = SvIV(GetInternalObject(sv));\
- i = (t) tmp ; \
+ i = INT2PTR(t, tmp) ; \
}
#define LastDBerror DB_RUNRECOVERY
@@ -379,7 +399,7 @@ hash_delete(char * hash, IV key);
#define OutputValue(arg, name) \
{ if (RETVAL == 0) { \
my_sv_setpvn(arg, name.data, name.size) ; \
- ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
} \
}
@@ -392,7 +412,7 @@ hash_delete(char * hash, IV key);
else { \
my_sv_setpvn(arg, name.data, name.size) ; \
} \
- ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
+ DBM_ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
} \
}
@@ -404,7 +424,7 @@ hash_delete(char * hash, IV key);
} \
else \
sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE); \
- ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
} \
}
@@ -419,7 +439,7 @@ hash_delete(char * hash, IV key);
else { \
my_sv_setpvn(arg, name.data, name.size); \
} \
- ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ DBM_ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
} \
}
@@ -444,8 +464,29 @@ hash_delete(char * hash, IV key);
static db_recno_t Value ;
static db_recno_t zero = 0 ;
static BerkeleyDB CurrentDB ;
+
static DBTKEY empty ;
+#if 0
static char ErrBuff[1000] ;
+#endif
+
+#ifdef AT_LEAST_DB_3_3
+# if PERL_REVISION == 5 && PERL_VERSION <= 4
+
+/* saferealloc in perl5.004 will croak if it is given a NULL pointer*/
+void *
+MyRealloc(void * ptr, size_t size)
+{
+ if (ptr == NULL )
+ return safemalloc(size) ;
+ else
+ return saferealloc(ptr, size) ;
+}
+
+# else
+# define MyRealloc saferealloc
+# endif
+#endif
static char *
my_strdup(const char *s)
@@ -497,6 +538,8 @@ db_strerror(int err)
}
#endif /* DB_VERSION_MAJOR == 2 */
+#ifdef TRACE
+#if DB_VERSION_MAJOR > 2
static char *
my_db_strerror(int err)
{
@@ -509,6 +552,8 @@ my_db_strerror(int err)
}
return buffer;
}
+#endif
+#endif
static void
close_everything(void)
@@ -521,15 +566,19 @@ close_everything(void)
HE * he ;
I32 len ;
HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
- I32 ret = hv_iterinit(hv) ;
int all = 0 ;
int closed = 0 ;
+ (void)hv_iterinit(hv) ;
Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
- while ( he = hv_iternext(hv) ) {
- tid = * (BerkeleyDB__Txn__Raw *) (IV) hv_iterkey(he, &len) ;
+ while ( (he = hv_iternext(hv)) ) {
+ tid = * (BerkeleyDB__Txn__Raw *) hv_iterkey(he, &len) ;
Trace((" Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
if (tid->active) {
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
txn_abort(tid->txn);
+#endif
++ closed ;
}
tid->active = FALSE ;
@@ -544,12 +593,12 @@ close_everything(void)
HE * he ;
I32 len ;
HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
- I32 ret = hv_iterinit(hv) ;
int all = 0 ;
int closed = 0 ;
+ (void) hv_iterinit(hv) ;
Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
- while ( he = hv_iternext(hv) ) {
- db = * (BerkeleyDB__Cursor*) (IV) hv_iterkey(he, &len) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB__Cursor*) hv_iterkey(he, &len) ;
Trace((" Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
if (db->active) {
((db->cursor)->c_close)(db->cursor) ;
@@ -567,12 +616,12 @@ close_everything(void)
HE * he ;
I32 len ;
HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
- I32 ret = hv_iterinit(hv) ;
int all = 0 ;
int closed = 0 ;
+ (void)hv_iterinit(hv) ;
Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
- while ( he = hv_iternext(hv) ) {
- db = * (BerkeleyDB*) (IV) hv_iterkey(he, &len) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB*) hv_iterkey(he, &len) ;
Trace((" Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
if (db->active) {
(db->dbp->close)(db->dbp, 0) ;
@@ -590,12 +639,12 @@ close_everything(void)
HE * he ;
I32 len ;
HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
- I32 ret = hv_iterinit(hv) ;
int all = 0 ;
int closed = 0 ;
+ (void)hv_iterinit(hv) ;
Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
- while ( he = hv_iternext(hv) ) {
- env = * (BerkeleyDB__Env*) (IV) hv_iterkey(he, &len) ;
+ while ( (he = hv_iternext(hv)) ) {
+ env = * (BerkeleyDB__Env*) hv_iterkey(he, &len) ;
Trace((" Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
if (env->active) {
#if DB_VERSION_MAJOR == 2
@@ -629,6 +678,10 @@ destroyDB(BerkeleyDB db)
SvREFCNT_dec(db->compare) ;
if (db->dup_compare)
SvREFCNT_dec(db->dup_compare) ;
+#ifdef AT_LEAST_DB_3_3
+ if (db->associated && !db->secondary_db)
+ SvREFCNT_dec(db->associated) ;
+#endif
if (db->prefix)
SvREFCNT_dec(db->prefix) ;
#ifdef DBM_FILTERING
@@ -641,13 +694,13 @@ destroyDB(BerkeleyDB db)
if (db->filter_store_value)
SvREFCNT_dec(db->filter_store_value) ;
#endif
- hash_delete("BerkeleyDB::Term::Db", (IV)db) ;
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
if (db->filename)
Safefree(db->filename) ;
Safefree(db) ;
}
-static void
+static int
softCrash(const char *pat, ...)
{
char buffer1 [500] ;
@@ -669,6 +722,7 @@ softCrash(const char *pat, ...)
/* NOTREACHED */
va_end(args);
+ return 1 ;
}
@@ -748,6 +802,7 @@ _GetRecnoKey(BerkeleyDB db, I32 value)
#endif /* 0 */
+#if 0
static SV *
GetInternalObject(SV * sv)
{
@@ -780,17 +835,19 @@ GetInternalObject(SV * sv)
Trace(("end of GetInternalObject %d\n", info)) ;
return info ;
}
+#endif
static int
btree_compare(DB_callback const DBT * key1, const DBT * key2 )
{
dSP ;
- void * data1, * data2 ;
+ char * data1, * data2 ;
int retval ;
int count ;
+ BerkeleyDB keepDB = CurrentDB ;
- data1 = key1->data ;
- data2 = key2->data ;
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
#ifndef newSVpvn
/* As newSVpv will assume that the data pointer is a null terminated C
@@ -824,6 +881,7 @@ btree_compare(DB_callback const DBT * key1, const DBT * key2 )
PUTBACK ;
FREETMPS ;
LEAVE ;
+ CurrentDB = keepDB ;
return (retval) ;
}
@@ -832,9 +890,10 @@ static int
dup_compare(DB_callback const DBT * key1, const DBT * key2 )
{
dSP ;
- void * data1, * data2 ;
+ char * data1, * data2 ;
int retval ;
int count ;
+ BerkeleyDB keepDB = CurrentDB ;
Trace(("In dup_compare \n")) ;
if (!CurrentDB)
@@ -842,8 +901,8 @@ dup_compare(DB_callback const DBT * key1, const DBT * key2 )
if (CurrentDB->dup_compare == NULL)
softCrash("in dup_compare: no callback specified for database '%s'", CurrentDB->filename) ;
- data1 = key1->data ;
- data2 = key2->data ;
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
#ifndef newSVpvn
/* As newSVpv will assume that the data pointer is a null terminated C
@@ -877,6 +936,7 @@ dup_compare(DB_callback const DBT * key1, const DBT * key2 )
PUTBACK ;
FREETMPS ;
LEAVE ;
+ CurrentDB = keepDB ;
return (retval) ;
}
@@ -885,12 +945,13 @@ static size_t
btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
{
dSP ;
- void * data1, * data2 ;
+ char * data1, * data2 ;
int retval ;
int count ;
+ BerkeleyDB keepDB = CurrentDB ;
- data1 = key1->data ;
- data2 = key2->data ;
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
#ifndef newSVpvn
/* As newSVpv will assume that the data pointer is a null terminated C
@@ -924,6 +985,7 @@ btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
PUTBACK ;
FREETMPS ;
LEAVE ;
+ CurrentDB = keepDB ;
return (retval) ;
}
@@ -934,6 +996,7 @@ hash_cb(DB_callback const void * data, u_int32_t size)
dSP ;
int retval ;
int count ;
+ BerkeleyDB keepDB = CurrentDB ;
#ifndef newSVpvn
if (size == 0)
@@ -960,10 +1023,84 @@ hash_cb(DB_callback const void * data, u_int32_t size)
PUTBACK ;
FREETMPS ;
LEAVE ;
+ CurrentDB = keepDB ;
return (retval) ;
}
+#ifdef AT_LEAST_DB_3_3
+
+static int
+associate_cb(DB_callback const DBT * pkey, const DBT * pdata, DBT * skey)
+{
+ dSP ;
+ char * pk_dat, * pd_dat, *sk_dat ;
+ int retval ;
+ int count ;
+ SV * skey_SV ;
+
+ Trace(("In associate_cb \n")) ;
+ if (((BerkeleyDB)db->BackRef)->associated == NULL){
+ Trace(("No Callback registered\n")) ;
+ return EINVAL ;
+ }
+
+ skey_SV = newSVpv("",0);
+
+
+ pk_dat = (char*) pkey->data ;
+ pd_dat = (char*) pdata->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (pkey->size == 0)
+ pk_dat = "" ;
+ if (pdata->size == 0)
+ pd_dat = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(pk_dat,pkey->size)));
+ PUSHs(sv_2mortal(newSVpvn(pd_dat,pdata->size)));
+ PUSHs(sv_2mortal(skey_SV));
+ PUTBACK ;
+
+ Trace(("calling associated cb\n"));
+ count = perl_call_sv(((BerkeleyDB)db->BackRef)->associated, G_SCALAR);
+ Trace(("called associated cb\n"));
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("associate: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+
+ /* retrieve the secondary key */
+ DBT_clear(*skey);
+ skey->flags = DB_DBT_APPMALLOC;
+ skey->size = SvCUR(skey_SV);
+ skey->data = (char*)safemalloc(skey->size);
+ memcpy(skey->data, SvPVX(skey_SV), skey->size);
+ Trace(("key is %d -- %.*s\n", skey->size, skey->size, skey->data));
+
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+#endif /* AT_LEAST_DB_3_3 */
+
static void
db_errcall_cb(const char * db_errpfx, char * buffer)
{
@@ -1004,17 +1141,17 @@ readHash(HV * hash, char * key)
}
static void
-hash_delete(char * hash, IV key)
+hash_delete(char * hash, char * key)
{
HV * hv = perl_get_hv(hash, TRUE);
(void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
}
static void
-hash_store_iv(char * hash, IV key, IV value)
+hash_store_iv(char * hash, char * key, IV value)
{
HV * hv = perl_get_hv(hash, TRUE);
- SV ** ret = hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
+ (void)hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
/* printf("hv_store returned %d\n", ret) ; */
}
@@ -1030,6 +1167,7 @@ my_db_open(
SV * ref,
SV * ref_dbenv ,
BerkeleyDB__Env dbenv ,
+ BerkeleyDB__Txn txn,
const char * file,
const char * subname,
DBTYPE type,
@@ -1042,14 +1180,21 @@ my_db_open(
BerkeleyDB RETVAL = NULL ;
DB * dbp ;
int Status ;
+ DB_TXN* txnid = NULL ;
- Trace(("_db_open(dbenv[%lu] ref_dbenv [%lu] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
CurrentDB = db ;
if (dbenv)
env = dbenv->Env ;
+ if (txn)
+ txnid = txn->txn;
+
+ Trace(("_db_open(dbenv[%p] ref_dbenv [%p] txn [%p] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, txn, file, subname, type, flags, mode)) ;
+
#if DB_VERSION_MAJOR == 2
if (subname)
softCrash("Subname needs Berkeley DB 3 or better") ;
@@ -1061,6 +1206,13 @@ my_db_open(
if (Status)
return RETVAL ;
+#ifdef AT_LEAST_DB_3_3
+ if (! env) {
+ dbp->set_alloc(dbp, safemalloc, MyRealloc, safefree) ;
+ dbp->set_errcall(dbp, db_errcall_cb) ;
+ }
+#endif
+
if (info->re_source) {
Status = dbp->set_re_source(dbp, info->re_source) ;
Trace(("set_re_source [%s] returned %s\n",
@@ -1119,7 +1271,7 @@ my_db_open(
if (info->bt_compare) {
Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
- Trace(("set_bt_compare [%d] returned %s\n",
+ Trace(("set_bt_compare [%p] returned %s\n",
info->bt_compare, my_db_strerror(Status)));
if (Status)
return RETVAL ;
@@ -1193,26 +1345,38 @@ my_db_open(
#endif
}
+#ifdef AT_LEAST_DB_4_1
+ if ((Status = (dbp->open)(dbp, txnid, file, subname, type, flags, mode)) == 0) {
+#else
if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
+#endif /* AT_LEAST_DB_4_1 */
#else /* DB_VERSION_MAJOR == 2 */
if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
#endif /* DB_VERSION_MAJOR == 2 */
- Trace(("db_opened\n"));
+ Trace(("db_opened ok\n"));
+#ifdef AT_LEAST_DB_3_3
+ dbp->BackRef = db;
+#endif
RETVAL = db ;
RETVAL->dbp = dbp ;
+ RETVAL->txn = txnid ;
#if DB_VERSION_MAJOR == 2
RETVAL->type = dbp->type ;
#else /* DB_VERSION_MAJOR > 2 */
+#ifdef AT_LEAST_DB_3_3
+ dbp->get_type(dbp, &RETVAL->type) ;
+#else /* DB 3.0 -> 3.2 */
RETVAL->type = dbp->get_type(dbp) ;
+#endif
#endif /* DB_VERSION_MAJOR > 2 */
RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
RETVAL->type == DB_QUEUE) ;
RETVAL->filename = my_strdup(file) ;
RETVAL->Status = Status ;
RETVAL->active = TRUE ;
- hash_store_iv("BerkeleyDB::Term::Db", (IV)RETVAL, 1) ;
- Trace((" storing %d %d in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
+ hash_store_iv("BerkeleyDB::Term::Db", (char *)RETVAL, 1) ;
+ Trace((" storing %p %p in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
if (dbenv) {
RETVAL->parent_env = dbenv ;
dbenv->Status = Status ;
@@ -1230,913 +1394,12 @@ my_db_open(
return RETVAL ;
}
-static double
-constant(char * name, int arg)
-{
- errno = 0;
- switch (*name) {
- case 'A':
- break;
- case 'B':
- break;
- case 'C':
- break;
- case 'D':
- if (strEQ(name, "DB_AFTER"))
-#ifdef DB_AFTER
- return DB_AFTER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_APPEND"))
-#ifdef DB_APPEND
- return DB_APPEND;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ARCH_ABS"))
-#ifdef DB_ARCH_ABS
- return DB_ARCH_ABS;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ARCH_DATA"))
-#ifdef DB_ARCH_DATA
- return DB_ARCH_DATA;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ARCH_LOG"))
-#ifdef DB_ARCH_LOG
- return DB_ARCH_LOG;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_BEFORE"))
-#ifdef DB_BEFORE
- return DB_BEFORE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_BTREE"))
- return DB_BTREE;
- if (strEQ(name, "DB_BTREEMAGIC"))
-#ifdef DB_BTREEMAGIC
- return DB_BTREEMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_BTREEOLDVER"))
-#ifdef DB_BTREEOLDVER
- return DB_BTREEOLDVER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_BTREEVERSION"))
-#ifdef DB_BTREEVERSION
- return DB_BTREEVERSION;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_CHECKPOINT"))
-#ifdef DB_CHECKPOINT
- return DB_CHECKPOINT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_CONSUME"))
-#ifdef DB_CONSUME
- return DB_CONSUME;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_CREATE"))
-#ifdef DB_CREATE
- return DB_CREATE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_CURLSN"))
-#ifdef DB_CURLSN
- return DB_CURLSN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_CURRENT"))
-#ifdef DB_CURRENT
- return DB_CURRENT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DBT_MALLOC"))
-#ifdef DB_DBT_MALLOC
- return DB_DBT_MALLOC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DBT_PARTIAL"))
-#ifdef DB_DBT_PARTIAL
- return DB_DBT_PARTIAL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DBT_USERMEM"))
-#ifdef DB_DBT_USERMEM
- return DB_DBT_USERMEM;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DELETED"))
-#ifdef DB_DELETED
- return DB_DELETED;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DELIMITER"))
-#ifdef DB_DELIMITER
- return DB_DELIMITER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DUP"))
-#ifdef DB_DUP
- return DB_DUP;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_DUPSORT"))
-#ifdef DB_DUPSORT
- return DB_DUPSORT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ENV_APPINIT"))
-#ifdef DB_ENV_APPINIT
- return DB_ENV_APPINIT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ENV_STANDALONE"))
-#ifdef DB_ENV_STANDALONE
- return DB_ENV_STANDALONE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_ENV_THREAD"))
-#ifdef DB_ENV_THREAD
- return DB_ENV_THREAD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_EXCL"))
-#ifdef DB_EXCL
- return DB_EXCL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_FILE_ID_LEN"))
-#ifdef DB_FILE_ID_LEN
- return DB_FILE_ID_LEN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_FIRST"))
-#ifdef DB_FIRST
- return DB_FIRST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_FIXEDLEN"))
-#ifdef DB_FIXEDLEN
- return DB_FIXEDLEN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_FLUSH"))
-#ifdef DB_FLUSH
- return DB_FLUSH;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_FORCE"))
-#ifdef DB_FORCE
- return DB_FORCE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_GET_BOTH"))
-#ifdef DB_GET_BOTH
- return DB_GET_BOTH;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_GET_RECNO"))
-#ifdef DB_GET_RECNO
- return DB_GET_RECNO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_HASH"))
- return DB_HASH;
- if (strEQ(name, "DB_HASHMAGIC"))
-#ifdef DB_HASHMAGIC
- return DB_HASHMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_HASHOLDVER"))
-#ifdef DB_HASHOLDVER
- return DB_HASHOLDVER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_HASHVERSION"))
-#ifdef DB_HASHVERSION
- return DB_HASHVERSION;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INCOMPLETE"))
-#ifdef DB_INCOMPLETE
- return DB_INCOMPLETE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INIT_CDB"))
-#ifdef DB_INIT_CDB
- return DB_INIT_CDB;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INIT_LOCK"))
-#ifdef DB_INIT_LOCK
- return DB_INIT_LOCK;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INIT_LOG"))
-#ifdef DB_INIT_LOG
- return DB_INIT_LOG;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INIT_MPOOL"))
-#ifdef DB_INIT_MPOOL
- return DB_INIT_MPOOL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_INIT_TXN"))
-#ifdef DB_INIT_TXN
- return DB_INIT_TXN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_JOIN_ITEM"))
-#ifdef DB_JOIN_ITEM
- return DB_JOIN_ITEM;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_KEYEMPTY"))
-#ifdef DB_KEYEMPTY
- return DB_KEYEMPTY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_KEYEXIST"))
-#ifdef DB_KEYEXIST
- return DB_KEYEXIST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_KEYFIRST"))
-#ifdef DB_KEYFIRST
- return DB_KEYFIRST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_KEYLAST"))
-#ifdef DB_KEYLAST
- return DB_KEYLAST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LAST"))
-#ifdef DB_LAST
- return DB_LAST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCKMAGIC"))
-#ifdef DB_LOCKMAGIC
- return DB_LOCKMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCKVERSION"))
-#ifdef DB_LOCKVERSION
- return DB_LOCKVERSION;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_CONFLICT"))
-#ifdef DB_LOCK_CONFLICT
- return DB_LOCK_CONFLICT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_DEADLOCK"))
-#ifdef DB_LOCK_DEADLOCK
- return DB_LOCK_DEADLOCK;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_DEFAULT"))
-#ifdef DB_LOCK_DEFAULT
- return DB_LOCK_DEFAULT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_GET"))
- return DB_LOCK_GET;
- if (strEQ(name, "DB_LOCK_NORUN"))
-#ifdef DB_LOCK_NORUN
- return DB_LOCK_NORUN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_NOTGRANTED"))
-#ifdef DB_LOCK_NOTGRANTED
- return DB_LOCK_NOTGRANTED;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_NOTHELD"))
-#ifdef DB_LOCK_NOTHELD
- return DB_LOCK_NOTHELD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_NOWAIT"))
-#ifdef DB_LOCK_NOWAIT
- return DB_LOCK_NOWAIT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_OLDEST"))
-#ifdef DB_LOCK_OLDEST
- return DB_LOCK_OLDEST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_RANDOM"))
-#ifdef DB_LOCK_RANDOM
- return DB_LOCK_RANDOM;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_RIW_N"))
-#ifdef DB_LOCK_RIW_N
- return DB_LOCK_RIW_N;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_RW_N"))
-#ifdef DB_LOCK_RW_N
- return DB_LOCK_RW_N;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOCK_YOUNGEST"))
-#ifdef DB_LOCK_YOUNGEST
- return DB_LOCK_YOUNGEST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOGMAGIC"))
-#ifdef DB_LOGMAGIC
- return DB_LOGMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_LOGOLDVER"))
-#ifdef DB_LOGOLDVER
- return DB_LOGOLDVER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MAX_PAGES"))
-#ifdef DB_MAX_PAGES
- return DB_MAX_PAGES;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MAX_RECORDS"))
-#ifdef DB_MAX_RECORDS
- return DB_MAX_RECORDS;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_CLEAN"))
-#ifdef DB_MPOOL_CLEAN
- return DB_MPOOL_CLEAN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_CREATE"))
-#ifdef DB_MPOOL_CREATE
- return DB_MPOOL_CREATE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_DIRTY"))
-#ifdef DB_MPOOL_DIRTY
- return DB_MPOOL_DIRTY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_DISCARD"))
-#ifdef DB_MPOOL_DISCARD
- return DB_MPOOL_DISCARD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_LAST"))
-#ifdef DB_MPOOL_LAST
- return DB_MPOOL_LAST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_NEW"))
-#ifdef DB_MPOOL_NEW
- return DB_MPOOL_NEW;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MPOOL_PRIVATE"))
-#ifdef DB_MPOOL_PRIVATE
- return DB_MPOOL_PRIVATE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MUTEXDEBUG"))
-#ifdef DB_MUTEXDEBUG
- return DB_MUTEXDEBUG;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_MUTEXLOCKS"))
-#ifdef DB_MUTEXLOCKS
- return DB_MUTEXLOCKS;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NEEDSPLIT"))
-#ifdef DB_NEEDSPLIT
- return DB_NEEDSPLIT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NEXT"))
-#ifdef DB_NEXT
- return DB_NEXT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NEXT_DUP"))
-#ifdef DB_NEXT_DUP
- return DB_NEXT_DUP;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NOMMAP"))
-#ifdef DB_NOMMAP
- return DB_NOMMAP;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NOOVERWRITE"))
-#ifdef DB_NOOVERWRITE
- return DB_NOOVERWRITE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NOSYNC"))
-#ifdef DB_NOSYNC
- return DB_NOSYNC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_NOTFOUND"))
-#ifdef DB_NOTFOUND
- return DB_NOTFOUND;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_PAD"))
-#ifdef DB_PAD
- return DB_PAD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_PAGEYIELD"))
-#ifdef DB_PAGEYIELD
- return DB_PAGEYIELD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_POSITION"))
-#ifdef DB_POSITION
- return DB_POSITION;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_PREV"))
-#ifdef DB_PREV
- return DB_PREV;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_PRIVATE"))
-#ifdef DB_PRIVATE
- return DB_PRIVATE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_QUEUE"))
- return DB_QUEUE;
- if (strEQ(name, "DB_RDONLY"))
-#ifdef DB_RDONLY
- return DB_RDONLY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RECNO"))
- return DB_RECNO;
- if (strEQ(name, "DB_RECNUM"))
-#ifdef DB_RECNUM
- return DB_RECNUM;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RECORDCOUNT"))
-#ifdef DB_RECORDCOUNT
- return DB_RECORDCOUNT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RECOVER"))
-#ifdef DB_RECOVER
- return DB_RECOVER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RECOVER_FATAL"))
-#ifdef DB_RECOVER_FATAL
- return DB_RECOVER_FATAL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_REGISTERED"))
-#ifdef DB_REGISTERED
- return DB_REGISTERED;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RENUMBER"))
-#ifdef DB_RENUMBER
- return DB_RENUMBER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RMW"))
-#ifdef DB_RMW
- return DB_RMW;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_RUNRECOVERY"))
-#ifdef DB_RUNRECOVERY
- return DB_RUNRECOVERY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SEQUENTIAL"))
-#ifdef DB_SEQUENTIAL
- return DB_SEQUENTIAL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SET"))
-#ifdef DB_SET
- return DB_SET;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SET_RANGE"))
-#ifdef DB_SET_RANGE
- return DB_SET_RANGE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SET_RECNO"))
-#ifdef DB_SET_RECNO
- return DB_SET_RECNO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SNAPSHOT"))
-#ifdef DB_SNAPSHOT
- return DB_SNAPSHOT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SWAPBYTES"))
-#ifdef DB_SWAPBYTES
- return DB_SWAPBYTES;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TEMPORARY"))
-#ifdef DB_TEMPORARY
- return DB_TEMPORARY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_THREAD"))
-#ifdef DB_THREAD
- return DB_THREAD;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TRUNCATE"))
-#ifdef DB_TRUNCATE
- return DB_TRUNCATE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXNMAGIC"))
-#ifdef DB_TXNMAGIC
- return DB_TXNMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXNVERSION"))
-#ifdef DB_TXNVERSION
- return DB_TXNVERSION;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_BACKWARD_ROLL"))
- return DB_TXN_BACKWARD_ROLL;
- if (strEQ(name, "DB_TXN_CKP"))
-#ifdef DB_TXN_CKP
- return DB_TXN_CKP;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_FORWARD_ROLL"))
- return DB_TXN_FORWARD_ROLL;
- if (strEQ(name, "DB_TXN_LOCK_2PL"))
-#ifdef DB_TXN_LOCK_2PL
- return DB_TXN_LOCK_2PL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOCK_MASK"))
-#ifdef DB_TXN_LOCK_MASK
- return DB_TXN_LOCK_MASK;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOCK_OPTIMIST"))
-#ifdef DB_TXN_LOCK_OPTIMIST
- return DB_TXN_LOCK_OPTIMIST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOCK_OPTIMISTIC"))
-#ifdef DB_TXN_LOCK_OPTIMISTIC
- return DB_TXN_LOCK_OPTIMISTIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOG_MASK"))
-#ifdef DB_TXN_LOG_MASK
- return DB_TXN_LOG_MASK;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOG_REDO"))
-#ifdef DB_TXN_LOG_REDO
- return DB_TXN_LOG_REDO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOG_UNDO"))
-#ifdef DB_TXN_LOG_UNDO
- return DB_TXN_LOG_UNDO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_LOG_UNDOREDO"))
-#ifdef DB_TXN_LOG_UNDOREDO
- return DB_TXN_LOG_UNDOREDO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_NOSYNC"))
-#ifdef DB_TXN_NOSYNC
- return DB_TXN_NOSYNC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_NOWAIT"))
-#ifdef DB_TXN_NOWAIT
- return DB_TXN_NOWAIT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_OPENFILES"))
- return DB_TXN_OPENFILES;
- if (strEQ(name, "DB_TXN_REDO"))
-#ifdef DB_TXN_REDO
- return DB_TXN_REDO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_SYNC"))
-#ifdef DB_TXN_SYNC
- return DB_TXN_SYNC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN_UNDO"))
-#ifdef DB_TXN_UNDO
- return DB_TXN_UNDO;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_UNKNOWN"))
- return DB_UNKNOWN;
- if (strEQ(name, "DB_USE_ENVIRON"))
-#ifdef DB_USE_ENVIRON
- return DB_USE_ENVIRON;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_USE_ENVIRON_ROOT"))
-#ifdef DB_USE_ENVIRON_ROOT
- return DB_USE_ENVIRON_ROOT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_VERSION_MAJOR"))
-#ifdef DB_VERSION_MAJOR
- return DB_VERSION_MAJOR;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_VERSION_MINOR"))
-#ifdef DB_VERSION_MINOR
- return DB_VERSION_MINOR;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_VERSION_PATCH"))
-#ifdef DB_VERSION_PATCH
- return DB_VERSION_PATCH;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_WRITECURSOR"))
-#ifdef DB_WRITECURSOR
- return DB_WRITECURSOR;
-#else
- goto not_there;
-#endif
- break;
- case 'E':
- break;
- case 'F':
- break;
- case 'G':
- break;
- case 'H':
- break;
- case 'I':
- break;
- case 'J':
- break;
- case 'K':
- break;
- case 'L':
- break;
- case 'M':
- break;
- case 'N':
- break;
- case 'O':
- break;
- case 'P':
- break;
- case 'Q':
- break;
- case 'R':
- break;
- case 'S':
- break;
- case 'T':
- break;
- case 'U':
- break;
- case 'V':
- break;
- case 'W':
- break;
- case 'X':
- break;
- case 'Y':
- break;
- case 'Z':
- break;
- case 'a':
- break;
- case 'b':
- break;
- case 'c':
- break;
- case 'd':
- break;
- case 'e':
- break;
- case 'f':
- break;
- case 'g':
- break;
- case 'h':
- break;
- case 'i':
- break;
- case 'j':
- break;
- case 'k':
- break;
- case 'l':
- break;
- case 'm':
- break;
- case 'n':
- break;
- case 'o':
- break;
- case 'p':
- break;
- case 'q':
- break;
- case 'r':
- break;
- case 's':
- break;
- case 't':
- break;
- case 'u':
- break;
- case 'v':
- break;
- case 'w':
- break;
- case 'x':
- break;
- case 'y':
- break;
- case 'z':
- break;
- }
- errno = EINVAL;
- return 0;
-
-not_there:
- errno = ENOENT;
- return 0;
-}
+#include "constants.h"
MODULE = BerkeleyDB PACKAGE = BerkeleyDB PREFIX = env_
-char *
-DB_VERSION_STRING()
- CODE:
- RETVAL = DB_VERSION_STRING ;
- OUTPUT:
- RETVAL
-
-
-double
-constant(name,arg)
- char * name
- int arg
+INCLUDE: constants.xs
#define env_db_version(maj, min, patch) db_version(&maj, &min, &patch)
char *
@@ -2168,7 +1431,7 @@ _db_remove(ref)
HV * hash ;
DB * dbp ;
SV * sv ;
- const char * db ;
+ const char * db = NULL ;
const char * subdb = NULL ;
BerkeleyDB__Env env = NULL ;
DB_ENV * dbenv = NULL ;
@@ -2190,6 +1453,87 @@ _db_remove(ref)
OUTPUT:
RETVAL
+DualType
+_db_verify(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_verify needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * outfile = NULL ;
+ FILE * ofh = NULL;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(outfile, "Outfile", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ RETVAL = 0;
+ if (outfile){
+ ofh = fopen(outfile, "w");
+ if (! ofh)
+ RETVAL = errno;
+ }
+ if (! RETVAL) {
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->verify(dbp, db, subdb, ofh, flags) ;
+ }
+ if (outfile)
+ fclose(ofh);
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_db_rename(ref)
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("BerkeleyDB::db_rename needs Berkeley DB 3.1.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ const char * newname = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_pv(newname, "Newname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->rename(dbp, db, subdb, newname, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
MODULE = BerkeleyDB::Env PACKAGE = BerkeleyDB::Env PREFIX = env_
@@ -2202,12 +1546,13 @@ _db_appinit(self, ref)
HV * hash ;
SV * sv ;
char * home = NULL ;
+ char * errfile = NULL ;
char * server = NULL ;
char ** config = NULL ;
int flags = 0 ;
+ int setflags = 0 ;
int cachesize = 0 ;
int lk_detect = 0 ;
- int mode = 0 ;
SV * errprefix = NULL;
DB_ENV * env ;
int status ;
@@ -2218,9 +1563,14 @@ _db_appinit(self, ref)
SetValue_pv(config, "Config", char **) ;
SetValue_sv(errprefix, "ErrPrefix") ;
SetValue_iv(flags, "Flags") ;
+ SetValue_iv(setflags, "SetFlags") ;
SetValue_pv(server, "Server", char *) ;
SetValue_iv(cachesize, "Cachesize") ;
SetValue_iv(lk_detect, "LockDetect") ;
+#ifndef AT_LEAST_DB_3_2
+ if (setflags)
+ softCrash("-SetFlags needs Berkeley DB 3.x or better") ;
+#endif /* ! AT_LEAST_DB_3 */
#ifndef AT_LEAST_DB_3_1
if (server)
softCrash("-Server needs Berkeley DB 3.1 or better") ;
@@ -2255,22 +1605,22 @@ _db_appinit(self, ref)
if (RETVAL->ErrPrefix)
RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
- if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
- env->db_errfile = IoOFP(sv_2io(sv)) ;
- RETVAL->ErrHandle = newRV(sv) ;
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = env->db_errfile = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
}
- /* SetValue_io(RETVAL->Env.db_errfile, "ErrFile") ; */
SetValue_iv(env->db_verbose, "Verbose") ;
- /* env->db_errbuf = RETVAL->ErrBuff ; */
env->db_errcall = db_errcall_cb ;
RETVAL->active = TRUE ;
status = db_appinit(home, config, env, flags) ;
Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ;
if (status == 0)
- hash_store_iv("BerkeleyDB::Term::Env", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
else {
if (RETVAL->ErrHandle)
- SvREFCNT_dec(RETVAL->ErrHandle) ;
+ fclose(RETVAL->ErrHandle) ;
if (RETVAL->ErrPrefix)
SvREFCNT_dec(RETVAL->ErrPrefix) ;
Safefree(RETVAL->Env) ;
@@ -2286,18 +1636,30 @@ _db_appinit(self, ref)
Trace(("db_env_create flags = %d returned %s\n", flags,
my_db_strerror(status))) ;
env = RETVAL->Env ;
+#ifdef AT_LEAST_DB_3_3
+ env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
+#endif
if (status == 0 && cachesize) {
status = env->set_cachesize(env, 0, cachesize, 0) ;
Trace(("set_cachesize [%d] returned %s\n",
cachesize, my_db_strerror(status)));
}
-
+
if (status == 0 && lk_detect) {
status = env->set_lk_detect(env, lk_detect) ;
Trace(("set_lk_detect [%d] returned %s\n",
lk_detect, my_db_strerror(status)));
}
-#ifdef AT_LEAST_DB_3_1
+#ifdef AT_LEAST_DB_4
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_rpc_server(env, NULL, server, 0, 0, 0);
+ Trace(("ENV->set_rpc_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+#else
+# if defined(AT_LEAST_DB_3_1) && ! defined(AT_LEAST_DB_4)
/* set the server */
if (server && status == 0)
{
@@ -2305,9 +1667,19 @@ _db_appinit(self, ref)
Trace(("ENV->set_server server = %s returned %s\n", server,
my_db_strerror(status))) ;
}
+# endif
+#endif
+#ifdef AT_LEAST_DB_3_2
+ if (setflags && status == 0)
+ {
+ status = env->set_flags(env, setflags, 1);
+ Trace(("ENV->set_flags value = %d returned %s\n", setflags,
+ my_db_strerror(status))) ;
+ }
#endif
if (status == 0)
{
+ int mode = 0 ;
/* Take a copy of the error prefix */
if (errprefix) {
Trace(("copying errprefix\n" )) ;
@@ -2317,16 +1689,18 @@ _db_appinit(self, ref)
if (RETVAL->ErrPrefix)
env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
- if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
- env->set_errfile(env, IoOFP(sv_2io(sv))) ;
- RETVAL->ErrHandle = newRV(sv) ;
+ SetValue_pv(errfile, "ErrFile", char *) ;
+ if (errfile) {
+ RETVAL->ErrHandle = fopen(errfile, "w");
+ if (RETVAL->ErrHandle == NULL)
+ croak("Cannot open file %s: %s\n", errfile, Strerror(errno));
+ env->set_errfile(env, RETVAL->ErrHandle) ;
}
- /* SetValue_iv(RETVAL->Env.db_verbose, "Verbose") ; */ /* TODO */
+
SetValue_iv(mode, "Mode") ;
- /* RETVAL->Env.db_errbuf = RETVAL->ErrBuff ; */
env->set_errcall(env, db_errcall_cb) ;
RETVAL->active = TRUE ;
-#ifdef IS_DB_3_0
+#ifdef IS_DB_3_0_x
status = (env->open)(env, home, config, flags, mode) ;
#else /* > 3.0 */
status = (env->open)(env, home, flags, mode) ;
@@ -2335,11 +1709,11 @@ _db_appinit(self, ref)
}
if (status == 0)
- hash_store_iv("BerkeleyDB::Term::Env", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
else {
(env->close)(env, 0) ;
if (RETVAL->ErrHandle)
- SvREFCNT_dec(RETVAL->ErrHandle) ;
+ fclose(RETVAL->ErrHandle) ;
if (RETVAL->ErrPrefix)
SvREFCNT_dec(RETVAL->ErrPrefix) ;
Safefree(RETVAL) ;
@@ -2350,11 +1724,43 @@ _db_appinit(self, ref)
OUTPUT:
RETVAL
+void
+log_archive(env, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ PPCODE:
+ {
+ char ** list;
+ char ** file;
+ AV * av;
+#ifndef AT_LEAST_DB_3
+ softCrash("log_archive needs at least Berkeley DB 3.x.x");
+#else
+# ifdef AT_LEAST_DB_4
+ env->Status = env->Env->log_archive(env->Env, &list, flags) ;
+# else
+# ifdef AT_LEAST_DB_3_3
+ env->Status = log_archive(env->Env, &list, flags) ;
+# else
+ env->Status = log_archive(env->Env, &list, flags, safemalloc) ;
+# endif
+# endif
+ if (env->Status == 0 && list != NULL)
+ {
+ for (file = list; *file != NULL; ++file)
+ {
+ XPUSHs(sv_2mortal(newSVpv(*file, 0))) ;
+ }
+ safefree(list);
+ }
+#endif
+ }
+
BerkeleyDB::Txn::Raw
_txn_begin(env, pid=NULL, flags=0)
+ u_int32_t flags
BerkeleyDB::Env env
BerkeleyDB::Txn pid
- u_int32_t flags
CODE:
{
DB_TXN *txn ;
@@ -2372,14 +1778,18 @@ _txn_begin(env, pid=NULL, flags=0)
#if DB_VERSION_MAJOR == 2
txn_begin(env->Env->tx_info, p_id, &txn) ;
#else
+# ifdef AT_LEAST_DB_4
+ env->Env->txn_begin(env->Env, p_id, &txn, flags) ;
+# else
txn_begin(env->Env, p_id, &txn, flags) ;
+# endif
#endif
if (env->TxnMgrStatus == 0) {
ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
RETVAL->txn = txn ;
RETVAL->active = TRUE ;
- Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
- hash_store_iv("BerkeleyDB::Term::Txn", (IV)RETVAL, 1) ;
+ Trace(("_txn_begin created txn [%p] in [%p]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
}
else
RETVAL = NULL ;
@@ -2389,19 +1799,24 @@ _txn_begin(env, pid=NULL, flags=0)
#if DB_VERSION_MAJOR == 2
-# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env->tx_info, k, m)
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env->tx_info, k, m)
#else /* DB 3.0 or better */
-# ifdef AT_LEAST_DB_3_1
-# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m, 0)
+# ifdef AT_LEAST_DB_4
+# define env_txn_checkpoint(e,k,m,f) e->Env->txn_checkpoint(e->Env, k, m, f)
# else
-# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m)
+# ifdef AT_LEAST_DB_3_1
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m, 0)
+# else
+# define env_txn_checkpoint(e,k,m,f) txn_checkpoint(e->Env, k, m)
+# endif
# endif
#endif
DualType
-env_txn_checkpoint(env, kbyte, min)
+env_txn_checkpoint(env, kbyte, min, flags=0)
BerkeleyDB::Env env
long kbyte
long min
+ u_int32_t flags
HV *
txn_stat(env)
@@ -2410,10 +1825,18 @@ txn_stat(env)
CODE:
{
DB_TXN_STAT * stat ;
-#if DB_VERSION_MAJOR == 2
- if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+#ifdef AT_LEAST_DB_4
+ if(env->Env->txn_stat(env->Env, &stat, 0) == 0) {
#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
if(txn_stat(env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
#endif
RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
@@ -2500,6 +1923,7 @@ status(env)
DualType
db_appexit(env)
BerkeleyDB::Env env
+ ALIAS: close =1
INIT:
ckActive_Environment(env->active) ;
CODE:
@@ -2514,7 +1938,7 @@ db_appexit(env)
RETVAL = (env->Env->close)(env->Env, 0) ;
#endif
env->active = FALSE ;
- hash_delete("BerkeleyDB::Term::Env", (IV)env) ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
OUTPUT:
RETVAL
@@ -2533,14 +1957,14 @@ _DESTROY(env)
(env->Env->close)(env->Env, 0) ;
#endif
if (env->ErrHandle)
- SvREFCNT_dec(env->ErrHandle) ;
+ fclose(env->ErrHandle) ;
if (env->ErrPrefix)
SvREFCNT_dec(env->ErrPrefix) ;
#if DB_VERSION_MAJOR == 2
Safefree(env->Env) ;
#endif
Safefree(env) ;
- hash_delete("BerkeleyDB::Term::Env", (IV)env) ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
BerkeleyDB::TxnMgr::Raw
@@ -2553,36 +1977,66 @@ _TxnMgr(env)
CODE:
ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
RETVAL->env = env ;
- /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (IV)txn, 1) ; */
+ /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (char *)txn, 1) ; */
OUTPUT:
RETVAL
int
-set_data_dir(env, dir)
+set_lg_dir(env, dir)
BerkeleyDB::Env env
char * dir
INIT:
ckActive_Database(env->active) ;
CODE:
#ifndef AT_LEAST_DB_3_1
- softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
+ softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
#else
- RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
+ RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
#endif
OUTPUT:
RETVAL
int
-set_lg_dir(env, dir)
+set_lg_bsize(env, bsize)
+ BerkeleyDB::Env env
+ u_int32_t bsize
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_bsize needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_bsize(env->Env, bsize);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_max(env, lg_max)
+ BerkeleyDB::Env env
+ u_int32_t lg_max
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_max needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_max(env->Env, lg_max);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_data_dir(env, dir)
BerkeleyDB::Env env
char * dir
INIT:
ckActive_Database(env->active) ;
CODE:
#ifndef AT_LEAST_DB_3_1
- softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
+ softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
#else
- RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
+ RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
#endif
OUTPUT:
RETVAL
@@ -2612,15 +2066,52 @@ set_mutexlocks(env, do_lock)
#ifndef AT_LEAST_DB_3
softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
#else
-#if defined(IS_DB_3_0) || defined(AT_LEAST_DB_3_2)
+# ifdef AT_LEAST_DB_4
+ RETVAL = env->Status = env->Env->set_flags(env->Env, DB_NOLOCKING, do_lock);
+# else
+# if defined(AT_LEAST_DB_3_2_6) || defined(IS_DB_3_0_x)
RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
-#else /* DB 3.1 */
+# else /* DB 3.1 or 3.2.3 */
RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
+# endif
+# endif
#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_verbose(env, which, onoff)
+ BerkeleyDB::Env env
+ u_int32_t which
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_verbose needs Berkeley DB 3.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_verbose(env->Env, which, onoff);
#endif
OUTPUT:
RETVAL
+int
+set_flags(env, flags, onoff)
+ BerkeleyDB::Env env
+ u_int32_t flags
+ int onoff
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_2
+ softCrash("$env->set_flags needs Berkeley DB 3.2.x or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_flags(env->Env, flags, onoff);
+#endif
+ OUTPUT:
+ RETVAL
+
+
MODULE = BerkeleyDB::Term PACKAGE = BerkeleyDB::Term
void
@@ -2649,11 +2140,13 @@ _db_open_hash(self, ref)
int flags = 0 ;
int mode = 0 ;
BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
Trace(("_db_open_hash start\n")) ;
hash = (HV*) SvRV(ref) ;
SetValue_pv(file, "Filename", char *) ;
SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
ref_dbenv = sv ;
SetValue_iv(flags, "Flags") ;
@@ -2681,7 +2174,7 @@ _db_open_hash(self, ref)
croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
#endif
}
- RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_HASH, flags, mode, &info) ;
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_HASH, flags, mode, &info) ;
Trace(("_db_open_hash end\n")) ;
}
OUTPUT:
@@ -2690,8 +2183,8 @@ _db_open_hash(self, ref)
HV *
db_stat(db, flags=0)
- BerkeleyDB::Common db
int flags
+ BerkeleyDB::Common db
HV * RETVAL = NULL ;
INIT:
ckActive_Database(db->active) ;
@@ -2701,7 +2194,11 @@ db_stat(db, flags=0)
softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
#else
DB_HASH_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
if (db->Status == 0) {
RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
@@ -2713,7 +2210,9 @@ db_stat(db, flags=0)
#else
hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
#endif
+#ifndef AT_LEAST_DB_3_1
hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
+#endif
hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
hv_store_iv(RETVAL, "hash_free", stat->hash_free);
@@ -2753,11 +2252,13 @@ _db_open_unknown(ref)
int mode = 0 ;
BerkeleyDB db ;
BerkeleyDB RETVAL ;
+ BerkeleyDB__Txn txn = NULL ;
static char * Names[] = {"", "Btree", "Hash", "Recno"} ;
hash = (HV*) SvRV(ref) ;
SetValue_pv(file, "Filename", char *) ;
SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
ref_dbenv = sv ;
SetValue_iv(flags, "Flags") ;
@@ -2772,8 +2273,8 @@ _db_open_unknown(ref)
SetValue_iv(info.flags, "Property") ;
ZMALLOC(db, BerkeleyDB_type) ;
- RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_UNKNOWN, flags, mode, &info) ;
- XPUSHs(sv_2mortal(newSViv((IV)RETVAL)));
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_UNKNOWN, flags, mode, &info) ;
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(RETVAL))));
if (RETVAL)
XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
else
@@ -2800,10 +2301,13 @@ _db_open_btree(self, ref)
int flags = 0 ;
int mode = 0 ;
BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
+ Trace(("In _db_open_btree\n"));
hash = (HV*) SvRV(ref) ;
SetValue_pv(file, "Filename", char*) ;
SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
ref_dbenv = sv ;
SetValue_iv(flags, "Flags") ;
@@ -2817,12 +2321,14 @@ _db_open_btree(self, ref)
SetValue_iv(info.flags, "Property") ;
ZMALLOC(db, BerkeleyDB_type) ;
if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Compare callback\n"));
info.bt_compare = btree_compare ;
db->compare = newSVsv(sv) ;
}
/* DB_DUPSORT was introduced in DB 2.5.9 */
if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
#ifdef DB_DUPSORT
+ Trace((" Parsed DupCompare callback\n"));
info.dup_compare = dup_compare ;
db->dup_compare = newSVsv(sv) ;
info.flags |= DB_DUP|DB_DUPSORT ;
@@ -2831,11 +2337,12 @@ _db_open_btree(self, ref)
#endif
}
if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
+ Trace((" Parsed Prefix callback\n"));
info.bt_prefix = btree_prefix ;
db->prefix = newSVsv(sv) ;
}
- RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_BTREE, flags, mode, &info) ;
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_BTREE, flags, mode, &info) ;
}
OUTPUT:
RETVAL
@@ -2843,15 +2350,19 @@ _db_open_btree(self, ref)
HV *
db_stat(db, flags=0)
- BerkeleyDB::Common db
int flags
+ BerkeleyDB::Common db
HV * RETVAL = NULL ;
INIT:
ckActive_Database(db->active) ;
CODE:
{
DB_BTREE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
if (db->Status == 0) {
RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
@@ -2920,11 +2431,13 @@ _db_open_recno(self, ref)
int flags = 0 ;
int mode = 0 ;
BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
hash = (HV*) SvRV(ref) ;
SetValue_pv(file, "Fname", char*) ;
SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
SetValue_iv(flags, "Flags") ;
SetValue_iv(mode, "Mode") ;
@@ -2954,7 +2467,7 @@ _db_open_recno(self, ref)
db->array_base = (db->array_base == 0 ? 1 : 0) ;
#endif /* ALLOW_RECNO_OFFSET */
- RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_RECNO, flags, mode, &info) ;
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_RECNO, flags, mode, &info) ;
}
OUTPUT:
RETVAL
@@ -2981,11 +2494,13 @@ _db_open_queue(self, ref)
int flags = 0 ;
int mode = 0 ;
BerkeleyDB db ;
+ BerkeleyDB__Txn txn = NULL ;
hash = (HV*) SvRV(ref) ;
SetValue_pv(file, "Fname", char*) ;
SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
ref_dbenv = sv ;
+ SetValue_ov(txn, "Txn", BerkeleyDB__Txn) ;
SetValue_iv(flags, "Flags") ;
SetValue_iv(mode, "Mode") ;
@@ -3000,7 +2515,7 @@ _db_open_queue(self, ref)
SetValue_iv(info.flags, "Property") ;
if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
info.re_len = SvIV(sv) ; ;
- flagSet_DB2(info.flags, DB_PAD) ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
}
if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
@@ -3012,7 +2527,7 @@ _db_open_queue(self, ref)
db->array_base = (db->array_base == 0 ? 1 : 0) ;
#endif /* ALLOW_RECNO_OFFSET */
- RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_QUEUE, flags, mode, &info) ;
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, txn, file, subname, DB_QUEUE, flags, mode, &info) ;
#endif
}
OUTPUT:
@@ -3020,8 +2535,8 @@ _db_open_queue(self, ref)
HV *
db_stat(db, flags=0)
- BerkeleyDB::Common db
int flags
+ BerkeleyDB::Common db
HV * RETVAL = NULL ;
INIT:
ckActive_Database(db->active) ;
@@ -3031,7 +2546,11 @@ db_stat(db, flags=0)
softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
#else /* Berkeley DB 3, or better */
DB_QUEUE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
if (db->Status == 0) {
RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
@@ -3069,8 +2588,8 @@ MODULE = BerkeleyDB::Common PACKAGE = BerkeleyDB::Common PREFIX = dab_
DualType
db_close(db,flags=0)
- BerkeleyDB::Common db
int flags
+ BerkeleyDB::Common db
INIT:
ckActive_Database(db->active) ;
CurrentDB = db ;
@@ -3087,7 +2606,7 @@ db_close(db,flags=0)
if (db->parent_env && db->parent_env->open_dbs)
-- db->parent_env->open_dbs ;
db->active = FALSE ;
- hash_delete("BerkeleyDB::Term::Db", (IV)db) ;
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
-- db->open_cursors ;
Trace(("end of BerkeleyDB::Common::db_close\n"));
OUTPUT:
@@ -3109,8 +2628,8 @@ dab__DESTROY(db)
#endif
BerkeleyDB::Cursor::Raw
_db_cursor(db, flags=0)
- BerkeleyDB::Common db
u_int32_t flags
+ BerkeleyDB::Common db
BerkeleyDB::Cursor RETVAL = NULL ;
INIT:
ckActive_Database(db->active) ;
@@ -3124,11 +2643,16 @@ _db_cursor(db, flags=0)
RETVAL->parent_db = db ;
RETVAL->cursor = cursor ;
RETVAL->dbp = db->dbp ;
+ RETVAL->txn = db->txn ;
RETVAL->type = db->type ;
RETVAL->recno_or_queue = db->recno_or_queue ;
RETVAL->filename = my_strdup(db->filename) ;
RETVAL->compare = db->compare ;
RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
RETVAL->prefix = db->prefix ;
RETVAL->hash = db->hash ;
RETVAL->partial = db->partial ;
@@ -3146,7 +2670,7 @@ _db_cursor(db, flags=0)
RETVAL->filter_store_value = db->filter_store_value ;
#endif
/* RETVAL->info ; */
- hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
}
}
OUTPUT:
@@ -3154,9 +2678,9 @@ _db_cursor(db, flags=0)
BerkeleyDB::Cursor::Raw
_db_join(db, cursors, flags=0)
+ u_int32_t flags
BerkeleyDB::Common db
AV * cursors
- u_int32_t flags
BerkeleyDB::Cursor RETVAL = NULL ;
INIT:
ckActive_Database(db->active) ;
@@ -3175,7 +2699,8 @@ _db_join(db, cursors, flags=0)
cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
for (i = 0 ; i < count ; ++i) {
SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
- BerkeleyDB__Cursor cur = (BerkeleyDB__Cursor) getInnerObject(obj) ;
+ IV tmp = SvIV(getInnerObject(obj)) ;
+ BerkeleyDB__Cursor cur = INT2PTR(BerkeleyDB__Cursor, tmp);
cursor_list[i] = cur->cursor ;
}
cursor_list[i] = NULL ;
@@ -3193,6 +2718,10 @@ _db_join(db, cursors, flags=0)
RETVAL->filename = my_strdup(db->filename) ;
RETVAL->compare = db->compare ;
RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+ RETVAL->secondary_db = db->secondary_db;
+#endif
RETVAL->prefix = db->prefix ;
RETVAL->hash = db->hash ;
RETVAL->partial = db->partial ;
@@ -3210,7 +2739,7 @@ _db_join(db, cursors, flags=0)
RETVAL->filter_store_value = db->filter_store_value ;
#endif
/* RETVAL->info ; */
- hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
}
safefree(cursor_list) ;
#endif /* Berkeley DB >= 2.5.2 */
@@ -3254,9 +2783,13 @@ byteswapped(db)
#if DB_VERSION_MAJOR == 2
RETVAL = db->dbp->byteswapped ;
#else
+#ifdef AT_LEAST_DB_3_3
+ db->dbp->get_byteswapped(db->dbp, &RETVAL) ;
+#else
RETVAL = db->dbp->get_byteswapped(db->dbp) ;
#endif
#endif
+#endif
OUTPUT:
RETVAL
@@ -3294,7 +2827,7 @@ filter_fetch_key(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_fetch_key) ;
+ DBM_setFilter(db->filter_fetch_key, code) ;
SV *
filter_store_key(db, code)
@@ -3302,7 +2835,7 @@ filter_store_key(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_store_key) ;
+ DBM_setFilter(db->filter_store_key, code) ;
SV *
filter_fetch_value(db, code)
@@ -3310,7 +2843,7 @@ filter_fetch_value(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_fetch_value) ;
+ DBM_setFilter(db->filter_fetch_value, code) ;
SV *
filter_store_value(db, code)
@@ -3318,7 +2851,7 @@ filter_store_value(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_store_value) ;
+ DBM_setFilter(db->filter_store_value, code) ;
#endif /* DBM_FILTERING */
@@ -3360,55 +2893,100 @@ partial_clear(db)
(db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
DualType
db_del(db, key, flags=0)
+ u_int flags
BerkeleyDB::Common db
DBTKEY key
- u_int flags
INIT:
+ Trace(("db_del db[%p] in [%p] txn[%p] key[%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
ckActive_Database(db->active) ;
CurrentDB = db ;
+#ifdef AT_LEAST_DB_3
+# ifdef AT_LEAST_DB_3_2
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_CONSUME_WAIT)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# else
+# define writeToKey() (flagSet(DB_CONSUME)||flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+# endif
+#else
+#define writeToKey() (flagSet(DB_GET_BOTH)||flagSet(DB_SET_RECNO))
+#endif
#define db_get(db, key, data, flags) \
(db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
DualType
db_get(db, key, data, flags=0)
+ u_int flags
BerkeleyDB::Common db
+ DBTKEY_B key
+ DBT_OPT data
+ CODE:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ Trace(("db_get db[%p] in [%p] txn[%p] key [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, flags)) ;
+ RETVAL = db_get(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+ OUTPUT:
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ data
+
+#define db_pget(db, key, pkey, data, flags) \
+ (db->Status = ((db->dbp)->pget)(db->dbp, db->txn, &key, &pkey, &data, flags))
+DualType
+db_pget(db, key, pkey, data, flags=0)
u_int flags
+ BerkeleyDB::Common db
DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
DBT_OPT data
- INIT:
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("db_pget db [%p] in [%p] txn [%p] flags [%d]\n", db->dbp, db, db->txn, flags)) ;
ckActive_Database(db->active) ;
CurrentDB = db ;
SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = db_pget(db, key, pkey, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
+#endif
OUTPUT:
- key if (flagSet(DB_SET_RECNO)) OutputValue(ST(1), key) ;
+ RETVAL
+ key if (writeToKey()) OutputKey(ST(1), key) ;
+ pkey
data
#define db_put(db,key,data,flag) \
(db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
DualType
db_put(db, key, data, flags=0)
+ u_int flags
BerkeleyDB::Common db
DBTKEY key
DBT data
- u_int flags
- INIT:
+ CODE:
ckActive_Database(db->active) ;
CurrentDB = db ;
/* SetPartial(data,db) ; */
+ Trace(("db_put db[%p] in [%p] txn[%p] key[%.*s] data [%.*s] flags[%d]\n", db->dbp, db, db->txn, key.size, key.data, data.size, data.data, flags)) ;
+ RETVAL = db_put(db, key, data, flags);
+ Trace((" RETVAL %d\n", RETVAL));
OUTPUT:
+ RETVAL
key if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
#define db_key_range(db, key, range, flags) \
(db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
DualType
db_key_range(db, key, less, equal, greater, flags=0)
+ u_int32_t flags
BerkeleyDB::Common db
DBTKEY_B key
- double less = NO_INIT
- double equal = NO_INIT
- double greater = NO_INIT
- u_int32_t flags
+ double less = 0.0 ;
+ double equal = 0.0 ;
+ double greater = 0.0 ;
CODE:
{
#ifndef AT_LEAST_DB_3_1
@@ -3449,8 +3027,8 @@ db_fd(db)
#define db_sync(db, fl) (db->Status = (db->dbp->sync)(db->dbp, fl))
DualType
db_sync(db, flags=0)
- BerkeleyDB::Common db
u_int flags
+ BerkeleyDB::Common db
INIT:
ckActive_Database(db->active) ;
CurrentDB = db ;
@@ -3463,24 +3041,72 @@ _Txn(db, txn=NULL)
ckActive_Database(db->active) ;
CODE:
if (txn) {
- Trace(("_Txn(%d in %d) active [%d]\n", txn->txn, txn, txn->active));
+ Trace(("_Txn[%p] in[%p] active [%d]\n", txn->txn, txn, txn->active));
ckActive_Transaction(txn->active) ;
db->txn = txn->txn ;
}
else {
- Trace(("_Txn(undef) \n"));
+ Trace(("_Txn[undef] \n"));
db->txn = NULL ;
}
+#define db_truncate(db, countp, flags) \
+ (db->Status = ((db->dbp)->truncate)(db->dbp, db->txn, &countp, flags))
+DualType
+truncate(db, countp, flags=0)
+ BerkeleyDB::Common db
+ u_int32_t countp
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("truncate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ RETVAL = db_truncate(db, countp, flags);
+#endif
+ OUTPUT:
+ RETVAL
+ countp
+
+#ifdef AT_LEAST_DB_4_1
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, NULL, sec->dbp, &cb, flags))
+#else
+# define db_associate(db, sec, cb, flags)\
+ (db->Status = ((db->dbp)->associate)(db->dbp, sec->dbp, &cb, flags))
+#endif
+DualType
+associate(db, secondary, callback, flags=0)
+ BerkeleyDB::Common db
+ BerkeleyDB::Common secondary
+ SV* callback
+ u_int32_t flags
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("associate needs Berkeley DB 3.3 or later") ;
+#else
+ CurrentDB = db ;
+ /* db->associated = newSVsv(callback) ; */
+ secondary->associated = newSVsv(callback) ;
+ /* secondary->dbp->app_private = secondary->associated ; */
+ secondary->secondary_db = TRUE;
+ RETVAL = db_associate(db, secondary, associate_cb, flags);
+#endif
+ OUTPUT:
+ RETVAL
MODULE = BerkeleyDB::Cursor PACKAGE = BerkeleyDB::Cursor PREFIX = cu_
BerkeleyDB::Cursor::Raw
_c_dup(db, flags=0)
- BerkeleyDB::Cursor db
u_int32_t flags
+ BerkeleyDB::Cursor db
BerkeleyDB::Cursor RETVAL = NULL ;
INIT:
CurrentDB = db->parent_db ;
@@ -3503,6 +3129,9 @@ _c_dup(db, flags=0)
RETVAL->filename = my_strdup(db->filename) ;
RETVAL->compare = db->compare ;
RETVAL->dup_compare = db->dup_compare ;
+#ifdef AT_LEAST_DB_3_3
+ RETVAL->associated = db->associated ;
+#endif
RETVAL->prefix = db->prefix ;
RETVAL->hash = db->hash ;
RETVAL->partial = db->partial ;
@@ -3520,7 +3149,7 @@ _c_dup(db, flags=0)
RETVAL->filter_store_value = db->filter_store_value ;
#endif /* DBM_FILTERING */
/* RETVAL->info ; */
- hash_store_iv("BerkeleyDB::Term::Cursor", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
}
#endif
}
@@ -3533,7 +3162,7 @@ _c_close(db)
INIT:
CurrentDB = db->parent_db ;
ckActive_Cursor(db->active) ;
- hash_delete("BerkeleyDB::Term::Cursor", (IV)db) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
CODE:
RETVAL = db->Status =
((db->cursor)->c_close)(db->cursor) ;
@@ -3549,7 +3178,7 @@ _DESTROY(db)
CODE:
CurrentDB = db->parent_db ;
Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
- hash_delete("BerkeleyDB::Term::Cursor", (IV)db) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
if (db->active)
((db->cursor)->c_close)(db->cursor) ;
if (db->parent_db->open_cursors)
@@ -3570,8 +3199,8 @@ status(db)
#define cu_c_del(c,f) (c->Status = ((c->cursor)->c_del)(c->cursor,f))
DualType
cu_c_del(db, flags=0)
- BerkeleyDB::Cursor db
int flags
+ BerkeleyDB::Cursor db
INIT:
CurrentDB = db->parent_db ;
ckActive_Cursor(db->active) ;
@@ -3582,12 +3211,12 @@ cu_c_del(db, flags=0)
#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
DualType
cu_c_get(db, key, data, flags=0)
- BerkeleyDB::Cursor db
int flags
+ BerkeleyDB::Cursor db
DBTKEY_B key
DBT_B data
INIT:
- Trace(("c_get db [%d] flags [%d]\n", db, flags)) ;
+ Trace(("c_get db [%p] in [%p] flags [%d]\n", db->dbp, db, flags)) ;
CurrentDB = db->parent_db ;
ckActive_Cursor(db->active) ;
SetPartial(data,db) ;
@@ -3597,14 +3226,41 @@ cu_c_get(db, key, data, flags=0)
key
data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+#define cu_c_pget(c,k,p,d,f) (c->Status = (c->secondary_db ? (c->cursor->c_pget)(c->cursor,&k,&p,&d,f) : EINVAL))
+DualType
+cu_c_pget(db, key, pkey, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBTKEY_B pkey = NO_INIT
+ DBT_B data
+ CODE:
+#ifndef AT_LEAST_DB_3_3
+ softCrash("db_c_pget needs at least Berkeley DB 3.3");
+#else
+ Trace(("c_pget db [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ DBT_clear(pkey);
+ RETVAL = cu_c_pget(db, key, pkey, data, flags);
+ Trace(("c_pget end\n")) ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ pkey
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+
#define cu_c_put(c,k,d,f) (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
DualType
cu_c_put(db, key, data, flags=0)
+ int flags
BerkeleyDB::Cursor db
DBTKEY key
DBT data
- int flags
INIT:
CurrentDB = db->parent_db ;
ckActive_Cursor(db->active) ;
@@ -3615,9 +3271,9 @@ cu_c_put(db, key, data, flags=0)
#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
DualType
cu_c_count(db, count, flags=0)
+ int flags
BerkeleyDB::Cursor db
u_int32_t count = NO_INIT
- int flags
CODE:
#ifndef AT_LEAST_DB_3_1
softCrash("c_count needs at least Berkeley DB 3.1.x");
@@ -3636,9 +3292,9 @@ MODULE = BerkeleyDB::TxnMgr PACKAGE = BerkeleyDB::TxnMgr PREFIX = xx_
BerkeleyDB::Txn::Raw
_txn_begin(txnmgr, pid=NULL, flags=0)
+ u_int32_t flags
BerkeleyDB::TxnMgr txnmgr
BerkeleyDB::Txn pid
- u_int32_t flags
CODE:
{
DB_TXN *txn ;
@@ -3653,14 +3309,18 @@ _txn_begin(txnmgr, pid=NULL, flags=0)
#if DB_VERSION_MAJOR == 2
txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
#else
+# ifdef AT_LEAST_DB_4
+ txnmgr->env->Env->txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# else
txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+# endif
#endif
if (txnmgr->env->TxnMgrStatus == 0) {
ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
RETVAL->txn = txn ;
RETVAL->active = TRUE ;
Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
- hash_store_iv("BerkeleyDB::Term::Txn", (IV)RETVAL, 1) ;
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
}
else
RETVAL = NULL ;
@@ -3693,19 +3353,24 @@ txn_close(txnp)
#if DB_VERSION_MAJOR == 2
-# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env->tx_info, k, m)
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env->tx_info, k, m)
#else
-# ifdef AT_LEAST_DB_3_1
-# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m, 0)
+# ifdef AT_LEAST_DB_4
+# define xx_txn_checkpoint(e,k,m,f) e->env->Env->txn_checkpoint(e->env->Env, k, m, f)
# else
-# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m)
+# ifdef AT_LEAST_DB_3_1
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m, 0)
+# else
+# define xx_txn_checkpoint(t,k,m,f) txn_checkpoint(t->env->Env, k, m)
+# endif
# endif
#endif
DualType
-xx_txn_checkpoint(txnp, kbyte, min)
+xx_txn_checkpoint(txnp, kbyte, min, flags=0)
BerkeleyDB::TxnMgr txnp
long kbyte
long min
+ u_int32_t flags
HV *
txn_stat(txnp)
@@ -3714,10 +3379,18 @@ txn_stat(txnp)
CODE:
{
DB_TXN_STAT * stat ;
-#if DB_VERSION_MAJOR == 2
- if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+#ifdef AT_LEAST_DB_4
+ if(txnp->env->Env->txn_stat(txnp->env->Env, &stat, 0) == 0) {
#else
+# ifdef AT_LEAST_DB_3_3
+ if(txn_stat(txnp->env->Env, &stat) == 0) {
+# else
+# if DB_VERSION_MAJOR == 2
+ if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+# else
if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
+# endif
+# endif
#endif
RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
@@ -3742,8 +3415,8 @@ txn_stat(txnp)
BerkeleyDB::TxnMgr
txn_open(dir, flags, mode, dbenv)
- const char * dir
int flags
+ const char * dir
int mode
BerkeleyDB::Env dbenv
NOT_IMPLEMENTED_YET
@@ -3765,9 +3438,13 @@ _DESTROY(tid)
CODE:
Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
if (tid->active)
+#ifdef AT_LEAST_DB_4
+ tid->txn->abort(tid->txn) ;
+#else
txn_abort(tid->txn) ;
+#endif
RETVAL = (int)tid ;
- hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
Safefree(tid) ;
Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
OUTPUT:
@@ -3781,37 +3458,75 @@ xx_txn_unlink(dir, force, dbenv)
BerkeleyDB::Env dbenv
NOT_IMPLEMENTED_YET
-#define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+#ifdef AT_LEAST_DB_4
+# define xx_txn_prepare(t) (t->Status = t->txn->prepare(t->txn, 0))
+#else
+# ifdef AT_LEAST_DB_3_3
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn, 0))
+# else
+# define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+# endif
+#endif
DualType
xx_txn_prepare(tid)
BerkeleyDB::Txn tid
INIT:
ckActive_Transaction(tid->active) ;
-#if DB_VERSION_MAJOR == 2
-# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+#ifdef AT_LEAST_DB_4
+# define _txn_commit(t,flags) (t->Status = t->txn->commit(t->txn, flags))
#else
-# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+# if DB_VERSION_MAJOR == 2
+# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+# else
+# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+# endif
#endif
DualType
_txn_commit(tid, flags=0)
- BerkeleyDB::Txn tid
u_int32_t flags
+ BerkeleyDB::Txn tid
INIT:
ckActive_Transaction(tid->active) ;
- hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
tid->active = FALSE ;
-#define _txn_abort(t) (t->Status = txn_abort(t->txn))
+#ifdef AT_LEAST_DB_4
+# define _txn_abort(t) (t->Status = t->txn->abort(t->txn))
+#else
+# define _txn_abort(t) (t->Status = txn_abort(t->txn))
+#endif
DualType
_txn_abort(tid)
BerkeleyDB::Txn tid
INIT:
ckActive_Transaction(tid->active) ;
- hash_delete("BerkeleyDB::Term::Txn", (IV)tid) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#ifdef AT_LEAST_DB_4
+# define _txn_discard(t,f) (t->Status = t->txn->discard(t->txn, f))
+#else
+# ifdef AT_LEAST_DB_3_3_4
+# define _txn_discard(t,f) (t->Status = txn_discard(t->txn, f))
+# else
+# define _txn_discard(t,f) (int)softCrash("txn_discard needs Berkeley DB 3.3.4 or better") ;
+# endif
+#endif
+DualType
+_txn_discard(tid, flags=0)
+ BerkeleyDB::Txn tid
+ u_int32_t flags
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
tid->active = FALSE ;
-#define xx_txn_id(t) txn_id(t->txn)
+#ifdef AT_LEAST_DB_4
+# define xx_txn_id(t) t->txn->id(t->txn)
+#else
+# define xx_txn_id(t) txn_id(t->txn)
+#endif
u_int32_t
xx_txn_id(tid)
BerkeleyDB::Txn tid
@@ -3861,12 +3576,13 @@ FIRSTKEY(db)
int
NEXTKEY(db, key)
BerkeleyDB::Common db
- DBTKEY key
+ DBTKEY key = NO_INIT
CODE:
{
DBT value ;
CurrentDB = db ;
+ DBT_clear(key) ;
DBT_clear(value) ;
key.flags = 0 ;
RETVAL = (db->Status) =
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm b/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
index ba9a9c0085d..ba9a9c0085d 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB/Btree.pm
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB/Btree.pm
diff --git a/bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm b/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
index 8e7bc7e78c7..8e7bc7e78c7 100644
--- a/bdb/perl.BerkeleyDB/BerkeleyDB/Hash.pm
+++ b/bdb/perl/BerkeleyDB/BerkeleyDB/Hash.pm
diff --git a/bdb/perl.BerkeleyDB/Changes b/bdb/perl/BerkeleyDB/Changes
index dcaccd4d0c7..cbeb1a34d73 100644
--- a/bdb/perl.BerkeleyDB/Changes
+++ b/bdb/perl/BerkeleyDB/Changes
@@ -1,31 +1,116 @@
Revision history for Perl extension BerkeleyDB.
-0.01 23 October 1997
- * first alpha release as BerkDB.
+0.20 2nd September 2002
-0.02 30 October 1997
- * renamed module to BerkeleyDB
- * fixed a few bugs & added more tests
+ * More support for building with Berkeley DB 4.1.x
+ * db->get & db->pget used the wrong output macro for DBM filters
+ bug spotted by Aaron Ross.
+ * db_join didn't keep a reference to the cursors it was joining.
+ Spotted by Winton Davies.
-0.03 5 May 1998
- * fixed db_get with DB_SET_RECNO
- * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
- * implemented BerkeleyDB::Unknown
- * implemented BerkeleyDB::Recno, including push, pop etc
- modified the txn support.
+0.19 5th June 2002
+ * Removed the targets that used mkconsts from Makefile.PL. They relied
+ on a module that is not available in all versions of Perl.
+ * added support for env->set_verbose
+ * added support for db->truncate
+ * added support for db->rename via BerkeleyDB::db_rename
+ * added support for db->verify via BerkeleyDB::db_verify
+ * added support for db->associate, db->pget & cursor->c_pget
+ * Builds with Berkeley DB 4.1.x
+
-0.04 19 May 1998
- * Define DEFSV & SAVE_DEFSV if not already defined. This allows
- the module to be built with Perl 5.004_04.
+0.18 6th January 2002
+ * Dropped support for ErrFile as a file handle. It was proving too
+ difficult to get at the underlying FILE * in XS.
+ Reported by Jonas Smedegaard (Debian powerpc) & Kenneth Olwing (Win32)
+ * Fixed problem with abort macro in XSUB.h clashing with txn abort
+ method in Berkeley DB 4.x -- patch supplied by Kenneth Olwing.
+ * DB->set_alloc was getting called too late in BerkeleyDB.xs.
+ This was causing problems with ActivePerl -- problem reported
+ by Kenneth Olwing.
+ * When opening a queue, the Len proprty set the DB_PAD flag.
+ Should have been DB_FIXEDLEN. Fix provided by Kenneth Olwing.
+ * Test harness fixes from Kenneth Olwing.
-0.05 9 November 1998
- * Added a note to README about how to build Berkeley DB 2.x
- when using HP-UX.
- * Minor modifications to get the module to build with DB 2.5.x
+0.17 23 September 2001
+ * Fixed a bug in BerkeleyDB::Recno - reported by Niklas Paulsson.
+ * Added log_archive - patch supplied by Benjamin Holzman
+ * Added txn_discard
+ * Builds with Berkeley DB 4.0.x
-0.06 19 December 1998
- * Minor modifications to get the module to build with DB 2.6.x
- * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
+0.16 1 August 2001
+ * added support for Berkeley DB 3.3.x (but no support for any of the
+ new features just yet)
+
+0.15 26 April 2001
+ * Fixed a bug in the processing of the flags options in
+ db_key_range.
+ * added support for set_lg_max & set_lg_bsize
+ * allow DB_TMP_DIR and DB_TEMP_DIR
+ * the -Filename parameter to BerkeleyDB::Queue didn't work.
+ * added symbol DB_CONSUME_WAIT
+
+0.14 21st January 2001
+ * Silenced the warnings when build with a 64-bit Perl.
+ * Can now build with DB 3.2.3h (part of MySQL). The test harness
+ takes an age to do the queue test, but it does eventually pass.
+ * Mentioned the problems that occur when perl is built with sfio.
+
+0.13 15th January 2001
+ * Added support to allow this module to build with Berkeley DB 3.2
+ * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
+ changes.
+ * Documented the Solaris 2.7 core dump problem in README.
+ * Tidied up the test harness to fix a problem on Solaris where the
+ "fred" directory wasn't being deleted when it should have been.
+ * two calls to "open" clashed with a win32 macro.
+ * size argument for hash_cb is different for Berkeley DB 3.x
+ * Documented the issue of building on Linux.
+ * Added -Server, -CacheSize & -LockDetect options
+ [original patch supplied by Graham Barr]
+ * Added support for set_mutexlocks, c_count, set_q_extentsize,
+ key_range, c_dup
+ * Dropped the "attempted to close a Cursor with an open transaction"
+ error in c_close. The correct behaviour is that the cursor
+ should be closed before committing/aborting the transaction.
+
+0.12 2nd August 2000
+ * Serious bug with get fixed. Spotted by Sleepycat.
+ * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+
+0.11 4th June 2000
+ * When built with Berkeley Db 3.x there can be a clash with the close
+ macro.
+ * Typo in the definition of DB_WRITECURSOR
+ * The flags parameter wasn't getting sent to db_cursor
+ * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
+ memory)
+ * Can be built with Berkeley DB 3.1
+
+0.10 8th December 1999
+ * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
+ a memory leak. Fixed.
+ * If opening an environment or database failed, there was a small
+ memory leak. This has been fixed.
+ * A thread-enabled Perl it could core when a database was closed.
+ Problem traced to the strdup function.
+
+0.09 29th November 1999
+ * the queue.t & subdb.t test harnesses were outputting a few
+ spurious warnings. This has been fixed.
+
+0.08 28nd November 1999
+ * More documentation updates
+ * Changed reference to files in /tmp in examples.t
+ * Fixed a typo in softCrash that caused problems when building
+ with a thread-enabled Perl.
+ * BerkeleyDB::Error wasn't initialised properly.
+ * ANSI-ified all the static C functions in BerkeleyDB.xs
+ * Added support for the following DB 3.x features:
+ + The Queue database type
+ + db_remove
+ + subdatabases
+ + db_stat for Hash & Queue
0.07 21st September 1999
* Numerous small bug fixes.
@@ -53,60 +138,30 @@ Revision history for Perl extension BerkeleyDB.
* Deprecated the TxnMgr class. As with Berkeley DB version 3,
txn_begin etc are now accessed via the environment object.
-0.08 28nd November 1999
- * More documentation updates
- * Changed reference to files in /tmp in examples.t
- * Fixed a typo in softCrash that caused problems when building
- with a thread-enabled Perl.
- * BerkeleyDB::Error wasn't initialised properly.
- * ANSI-ified all the static C functions in BerkeleyDB.xs
- * Added support for the following DB 3.x features:
- + The Queue database type
- + db_remove
- + subdatabases
- + db_stat for Hash & Queue
+0.06 19 December 1998
+ * Minor modifications to get the module to build with DB 2.6.x
+ * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
-0.09 29th November 1999
- * the queue.t & subdb.t test harnesses were outputting a few
- spurious warnings. This has been fixed.
+0.05 9 November 1998
+ * Added a note to README about how to build Berkeley DB 2.x
+ when using HP-UX.
+ * Minor modifications to get the module to build with DB 2.5.x
-0.10 8th December 1999
- * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
- a memory leak. Fixed.
- * If opening an environment or database failed, there was a small
- memory leak. This has been fixed.
- * A thread-enabled Perl it could core when a database was closed.
- Problem traced to the strdup function.
+0.04 19 May 1998
+ * Define DEFSV & SAVE_DEFSV if not already defined. This allows
+ the module to be built with Perl 5.004_04.
-0.11 4th June 2000
- * When built with Berkeley Db 3.x there can be a clash with the close
- macro.
- * Typo in the definition of DB_WRITECURSOR
- * The flags parameter wasn't getting sent to db_cursor
- * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
- memory)
- * Can be built with Berkeley DB 3.1
-
+0.03 5 May 1998
+ * fixed db_get with DB_SET_RECNO
+ * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
+ * implemented BerkeleyDB::Unknown
+ * implemented BerkeleyDB::Recno, including push, pop etc
+ modified the txn support.
-0.12 2nd August 2000
- * Serious bug with get fixed. Spotted by Sleepycat.
- * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+0.02 30 October 1997
+ * renamed module to BerkeleyDB
+ * fixed a few bugs & added more tests
-0.13 15th January 2001
- * Added support to allow this module to build with Berkeley DB 3.2
- * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
- changes.
- * Documented the Solaris 2.7 core dump problem in README.
- * Tidied up the test harness to fix a problem on Solaris where the
- "fred" directory wasn't being deleted when it should have been.
- * two calls to "open" clashed with a win32 macro.
- * size argument for hash_cb is different for Berkeley DB 3.x
- * Documented the issue of building on Linux.
- * Added -Server, -CacheSize & -LockDetect options
- [original patch supplied by Graham Barr]
- * Added support for set_mutexlocks, c_count, set_q_extentsize,
- key_range, c_dup
- * Dropped the "attempted to close a Cursor with an open transaction"
- error in c_close. The correct behaviour is that the cursor
- should be closed before committing/aborting the transaction.
+0.01 23 October 1997
+ * first alpha release as BerkDB.
diff --git a/bdb/perl.BerkeleyDB/MANIFEST b/bdb/perl/BerkeleyDB/MANIFEST
index 3b8a820d56e..7da51ef7d7c 100644
--- a/bdb/perl.BerkeleyDB/MANIFEST
+++ b/bdb/perl/BerkeleyDB/MANIFEST
@@ -6,18 +6,23 @@ BerkeleyDB/Btree.pm
BerkeleyDB/Hash.pm
Changes
config.in
+constants.h
+constants.xs
dbinfo
+hints/dec_osf.pl
hints/solaris.pl
hints/irix_6_5.pl
Makefile.PL
MANIFEST
mkconsts
mkpod
+ppport.h
README
t/btree.t
t/db-3.0.t
t/db-3.1.t
t/db-3.2.t
+t/db-3.3.t
t/destroy.t
t/env.t
t/examples.t
@@ -34,6 +39,7 @@ t/strict.t
t/subdb.t
t/txn.t
t/unknown.t
+t/util.pm
Todo
typemap
patches/5.004
@@ -47,3 +53,4 @@ patches/5.005_01
patches/5.005_02
patches/5.005_03
patches/5.6.0
+scan
diff --git a/bdb/perl.BerkeleyDB/Makefile.PL b/bdb/perl/BerkeleyDB/Makefile.PL
index 399a6761886..86da9a845af 100644
--- a/bdb/perl.BerkeleyDB/Makefile.PL
+++ b/bdb/perl/BerkeleyDB/Makefile.PL
@@ -8,6 +8,17 @@ BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
use strict ;
use ExtUtils::MakeMaker ;
+use Config ;
+
+# Check for the presence of sfio
+if ($Config{'d_sfio'}) {
+ print <<EOM;
+
+WARNING: Perl seems to have been built with SFIO support enabled.
+ Please read the SFIO Notes in the README file.
+
+EOM
+}
my $LIB_DIR ;
my $INC_DIR ;
@@ -32,7 +43,7 @@ $OS2 = "-DOS2" if $^O eq 'os2' ;
WriteMakefile(
NAME => 'BerkeleyDB',
LIBS => ["-L${LIB_DIR} $LIBS"],
- MAN3PODS => ' ', # Pods will be built by installman.
+ #MAN3PODS => {}, # Pods will be built by installman.
INC => "-I$INC_DIR",
VERSION_FROM => 'BerkeleyDB.pm',
XSPROTOARG => '-noprototypes',
@@ -53,7 +64,7 @@ $(NAME).pod: $(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
perl ./mkpod
$(NAME).xs: typemap
- @$(TOUCH) $(NAME).xs
+ $(TOUCH) $(NAME).xs
Makefile: config.in
diff --git a/bdb/perl.BerkeleyDB/README b/bdb/perl/BerkeleyDB/README
index aa905fa8011..a600e313193 100644
--- a/bdb/perl.BerkeleyDB/README
+++ b/bdb/perl/BerkeleyDB/README
@@ -1,10 +1,10 @@
BerkeleyDB
- Version 0.13
+ Version 0.20
- 15th Jan 2001
+ 2nd Sept 2002
- Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This
+ Copyright (c) 1997-2002 Paul Marquess. All rights reserved. This
program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
@@ -13,8 +13,9 @@ DESCRIPTION
-----------
BerkeleyDB is a module which allows Perl programs to make use of the
-facilities provided by Berkeley DB version 2 or 3. (Note: if you want
-to use version 1 of Berkeley DB with Perl you need the DB_File module).
+facilities provided by Berkeley DB version 2 or greater. (Note: if
+you want to use version 1 of Berkeley DB with Perl you need the DB_File
+module).
Berkeley DB is a C library which provides a consistent interface to a
number of database formats. BerkeleyDB provides an interface to all
@@ -105,37 +106,56 @@ This symptom can imply:
Solution: Edit config.in and set the LIB and INCLUDE variables to point
to the directories where libdb.a and db.h are installed.
-Wrong db.h
-----------
+#error db.h is not for Berkeley DB at all.
+------------------------------------------
-If you get an error like this when building this module:
+If you get the error above when building this module it means that there
+is a file called "db.h" on your system that isn't the one that comes
+with Berkeley DB.
- cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
- -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
- -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
- BerkeleyDB.xs:93: parse error before `DB_INFO'
- BerkeleyDB.xs:93: warning: no semicolon at end of struct or union
- BerkeleyDB.xs:94: warning: data definition has no type or storage class
- BerkeleyDB.xs:95: parse error before `0x80000000'
- BerkeleyDB.xs:110: parse error before `}'
- BerkeleyDB.xs:110: warning: data definition has no type or storage class
- BerkeleyDB.xs:117: parse error before `DB_ENV'
- ...
+Options:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. Edit config.in and make sure the INCLUDE variable points to the
+ directory where the Berkeley DB file db.h is installed.
+
+ 3. If option 2 doesn't work, try tempoarily renaming the db.h file
+ that is causing the error.
-This error usually happens when if you only have Berkeley DB version 1
-on your system or you have both version 1 and version 2 (or 3) of Berkeley
-DB installed on your system. When building BerkeleyDB it attempts
-to use the db.h for Berkeley DB version 1. This perl module can only
-be built with Berkeley DB version 2 or 3.
+#error db.h is for Berkeley DB 1.x - need at least Berkeley DB 2.6.4
+--------------------------------------------------------------------
+
+The error above will occur if there is a copy of the Berkeley DB 1.x
+file db.h on your system.
+
+This error will happen when
+
+ 1. you only have Berkeley DB version 1 on your system.
+ Solution: get & install a newer version of Berkeley DB.
+
+ 2. you have both version 1 and a later version of Berkeley DB
+ installed on your system. When building BerkeleyDB it attempts to
+ use the db.h for Berkeley DB version 1.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+
+#error db.h is for Berkeley DB 2.0-2.5 - need at least Berkeley DB 2.6.4
+------------------------------------------------------------------------
+
+The error above will occur if there is a copy of the the file db.h for
+Berkeley DB 2.0 to 2.5 on your system.
This symptom can imply:
- 1. You don't have Berkeley DB version 2 or 3 installed on your system
- at all.
- Solution: get & install Berkeley DB.
+ 1. You don't have a new enough version of Berkeley DB.
+ Solution: get & install a newer version of Berkeley DB.
- 2. You do have Berkeley DB 2 or 3 installed, but it isn't in a standard
- place.
+ 2. You have the correct version of Berkeley DB installed, but it isn't
+ in a standard place.
Solution: Edit config.in and set the LIB and INCLUDE variables
to point to the directories where libdb.a and db.h are
installed.
@@ -157,11 +177,11 @@ when you run the test harness:
at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
...
-This error usually happens when you have both version 1 and version
-2 (or 3) of Berkeley DB installed on your system and BerkeleyDB attempts
-to build using the db.h for Berkeley DB version 2/3 and the version 1
+This error usually happens when you have both version 1 and a newer version
+of Berkeley DB installed on your system. BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2/3/4 and the version 1
library. Unfortunately the two versions aren't compatible with each
-other. BerkeleyDB can only be built with Berkeley DB version 2 or 3.
+other. BerkeleyDB can only be built with Berkeley DB version 2, 3 or 4.
Solution: Setting the LIB & INCLUDE variables in config.in to point to the
correct directories can sometimes be enough to fix this
@@ -301,7 +321,7 @@ be used to build this module. Follow the instructions in "BUILDING THE
MODULE", remembering to set the INCLUDE and LIB variables in config.in.
-The second approach will work with both Berkeley DB 2.x and 3.x.
+The second approach will work with Berkeley DB 2.x or better.
Start by building Berkeley DB as a shared library. This is from
the Berkeley DB build instructions:
@@ -419,7 +439,7 @@ To help me help you, I need of the following information:
2. The version of BerkeleyDB you have. If you have successfully
installed BerkeleyDB, this one-liner will tell you:
- perl -MBerkeleyDB -e 'print "BerkeleyDB ver $BerkeleyDB::VERSION\n"'
+ perl -MBerkeleyDB -e 'print qq{BerkeleyDB ver $BerkeleyDB::VERSION\n}'
If you haven't installed BerkeleyDB then search BerkeleyDB.pm for a
line like this:
@@ -429,7 +449,7 @@ To help me help you, I need of the following information:
3. The version of Berkeley DB you have installed. If you have
successfully installed BerkeleyDB, this one-liner will tell you:
- perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING."\n"'
+ perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING.qq{\n}'
If you haven't installed BerkeleyDB then search db.h for a line
like this:
diff --git a/bdb/perl.BerkeleyDB/Todo b/bdb/perl/BerkeleyDB/Todo
index 12d53bcf91c..12d53bcf91c 100644
--- a/bdb/perl.BerkeleyDB/Todo
+++ b/bdb/perl/BerkeleyDB/Todo
diff --git a/bdb/perl.BerkeleyDB/config.in b/bdb/perl/BerkeleyDB/config.in
index c23e6689cb3..fd1bb1caede 100644
--- a/bdb/perl.BerkeleyDB/config.in
+++ b/bdb/perl/BerkeleyDB/config.in
@@ -7,24 +7,16 @@
# Change the path below to point to the directory where db.h is
# installed on your system.
-#INCLUDE = /usr/local/include
+INCLUDE = /usr/local/include
#INCLUDE = /usr/local/BerkeleyDB/include
-#INCLUDE = ./libraries/2.7.5
-#INCLUDE = ./libraries/3.0.55
-#INCLUDE = ./libraries/3.1.17
-INCLUDE = ./libraries/3.2.7
# 2. Where is libdb?
#
# Change the path below to point to the directory where libdb is
# installed on your system.
-#LIB = /usr/local/lib
+LIB = /usr/local/lib
#LIB = /usr/local/BerkeleyDB/lib
-#LIB = ./libraries/2.7.5
-#LIB = ./libraries/3.0.55
-#LIB = ./libraries/3.1.17
-LIB = ./libraries/3.2.7
# 3. Is the library called libdb?
#
diff --git a/bdb/perl/BerkeleyDB/constants.h b/bdb/perl/BerkeleyDB/constants.h
new file mode 100644
index 00000000000..d86cef15513
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/constants.h
@@ -0,0 +1,4046 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_DUP DB_PAD DB_RMW DB_SET */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'D':
+ if (memEQ(name, "DB_DUP", 6)) {
+ /* ^ */
+#ifdef DB_DUP
+ *iv_return = DB_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAD", 6)) {
+ /* ^ */
+#ifdef DB_PAD
+ *iv_return = DB_PAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RMW", 6)) {
+ /* ^ */
+#ifdef DB_RMW
+ *iv_return = DB_RMW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET", 6)) {
+ /* ^ */
+#ifdef DB_SET
+ *iv_return = DB_SET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_EXCL DB_HASH DB_LAST DB_NEXT DB_PREV */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'E':
+ if (memEQ(name, "DB_EXCL", 7)) {
+ /* ^ */
+#ifdef DB_EXCL
+ *iv_return = DB_EXCL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASH", 7)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LAST", 7)) {
+ /* ^ */
+#ifdef DB_LAST
+ *iv_return = DB_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEXT", 7)) {
+ /* ^ */
+#ifdef DB_NEXT
+ *iv_return = DB_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PREV", 7)) {
+ /* ^ */
+#ifdef DB_PREV
+ *iv_return = DB_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AFTER DB_BTREE DB_FIRST DB_FLUSH DB_FORCE DB_QUEUE DB_RECNO */
+ /* Offset 4 gives the best switch position. */
+ switch (name[4]) {
+ case 'E':
+ if (memEQ(name, "DB_RECNO", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_AFTER", 8)) {
+ /* ^ */
+#ifdef DB_AFTER
+ *iv_return = DB_AFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_FIRST", 8)) {
+ /* ^ */
+#ifdef DB_FIRST
+ *iv_return = DB_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_FLUSH", 8)) {
+ /* ^ */
+#ifdef DB_FLUSH
+ *iv_return = DB_FLUSH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_FORCE", 8)) {
+ /* ^ */
+#ifdef DB_FORCE
+ *iv_return = DB_FORCE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_BTREE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_QUEUE", 8)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 55)
+ *iv_return = DB_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPEND DB_BEFORE DB_CLIENT DB_COMMIT DB_CREATE DB_CURLSN DB_DIRECT
+ DB_EXTENT DB_GETREC DB_NOCOPY DB_NOMMAP DB_NOSYNC DB_RDONLY DB_RECNUM
+ DB_THREAD DB_VERIFY */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'A':
+ if (memEQ(name, "DB_NOMMAP", 9)) {
+ /* ^ */
+#ifdef DB_NOMMAP
+ *iv_return = DB_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_THREAD", 9)) {
+ /* ^ */
+#ifdef DB_THREAD
+ *iv_return = DB_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_DIRECT", 9)) {
+ /* ^ */
+#ifdef DB_DIRECT
+ *iv_return = DB_DIRECT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_GETREC", 9)) {
+ /* ^ */
+#ifdef DB_GETREC
+ *iv_return = DB_GETREC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_VERIFY", 9)) {
+ /* ^ */
+#ifdef DB_VERIFY
+ *iv_return = DB_VERIFY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_COMMIT", 9)) {
+ /* ^ */
+#ifdef DB_COMMIT
+ *iv_return = DB_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_RDONLY", 9)) {
+ /* ^ */
+#ifdef DB_RDONLY
+ *iv_return = DB_RDONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APPEND", 9)) {
+ /* ^ */
+#ifdef DB_APPEND
+ *iv_return = DB_APPEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CLIENT", 9)) {
+ /* ^ */
+#ifdef DB_CLIENT
+ *iv_return = DB_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_EXTENT", 9)) {
+ /* ^ */
+#ifdef DB_EXTENT
+ *iv_return = DB_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSYNC", 9)) {
+ /* ^ */
+#ifdef DB_NOSYNC
+ *iv_return = DB_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_NOCOPY", 9)) {
+ /* ^ */
+#ifdef DB_NOCOPY
+ *iv_return = DB_NOCOPY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BEFORE", 9)) {
+ /* ^ */
+#ifdef DB_BEFORE
+ *iv_return = DB_BEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_CURLSN", 9)) {
+ /* ^ */
+#ifdef DB_CURLSN
+ *iv_return = DB_CURLSN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_CREATE", 9)) {
+ /* ^ */
+#ifdef DB_CREATE
+ *iv_return = DB_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_RECNUM", 9)) {
+ /* ^ */
+#ifdef DB_RECNUM
+ *iv_return = DB_RECNUM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CONSUME DB_CURRENT DB_DELETED DB_DUPSORT DB_ENCRYPT DB_ENV_CDB
+ DB_ENV_TXN DB_JOINENV DB_KEYLAST DB_NOPANIC DB_OK_HASH DB_PRIVATE
+ DB_PR_PAGE DB_RECOVER DB_SALVAGE DB_TIMEOUT DB_TXN_CKP DB_UNKNOWN
+ DB_UPGRADE */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'D':
+ if (memEQ(name, "DB_ENV_CDB", 10)) {
+ /* ^ */
+#ifdef DB_ENV_CDB
+ *iv_return = DB_ENV_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UPGRADE", 10)) {
+ /* ^ */
+#ifdef DB_UPGRADE
+ *iv_return = DB_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_DELETED", 10)) {
+ /* ^ */
+#ifdef DB_DELETED
+ *iv_return = DB_DELETED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECOVER", 10)) {
+ /* ^ */
+#ifdef DB_RECOVER
+ *iv_return = DB_RECOVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_PR_PAGE", 10)) {
+ /* ^ */
+#ifdef DB_PR_PAGE
+ *iv_return = DB_PR_PAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SALVAGE", 10)) {
+ /* ^ */
+#ifdef DB_SALVAGE
+ *iv_return = DB_SALVAGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_NOPANIC", 10)) {
+ /* ^ */
+#ifdef DB_NOPANIC
+ *iv_return = DB_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_TXN_CKP", 10)) {
+ /* ^ */
+#ifdef DB_TXN_CKP
+ *iv_return = DB_TXN_CKP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_CONSUME", 10)) {
+ /* ^ */
+#ifdef DB_CONSUME
+ *iv_return = DB_CONSUME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_CURRENT", 10)) {
+ /* ^ */
+#ifdef DB_CURRENT
+ *iv_return = DB_CURRENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOINENV", 10)) {
+ /* ^ */
+#ifdef DB_JOINENV
+ *iv_return = DB_JOINENV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENCRYPT", 10)) {
+ /* ^ */
+#ifdef DB_ENCRYPT
+ *iv_return = DB_ENCRYPT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_DUPSORT", 10)) {
+ /* ^ */
+#ifdef DB_DUPSORT
+ *iv_return = DB_DUPSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_KEYLAST", 10)) {
+ /* ^ */
+#ifdef DB_KEYLAST
+ *iv_return = DB_KEYLAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_HASH", 10)) {
+ /* ^ */
+#ifdef DB_OK_HASH
+ *iv_return = DB_OK_HASH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIVATE", 10)) {
+ /* ^ */
+#ifdef DB_PRIVATE
+ *iv_return = DB_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TIMEOUT", 10)) {
+ /* ^ */
+#ifdef DB_TIMEOUT
+ *iv_return = DB_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_UNKNOWN", 10)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_UNKNOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_ENV_TXN", 10)) {
+ /* ^ */
+#ifdef DB_ENV_TXN
+ *iv_return = DB_ENV_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APP_INIT DB_ARCH_ABS DB_ARCH_LOG DB_FIXEDLEN DB_GET_BOTH DB_INIT_CDB
+ DB_INIT_LOG DB_INIT_TXN DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_LOCKDOWN
+ DB_LOCK_GET DB_LOCK_PUT DB_LOGMAGIC DB_LOG_DISK DB_MULTIPLE DB_NEXT_DUP
+ DB_NOSERVER DB_NOTFOUND DB_OK_BTREE DB_OK_QUEUE DB_OK_RECNO DB_POSITION
+ DB_QAMMAGIC DB_RENUMBER DB_SNAPSHOT DB_TRUNCATE DB_TXNMAGIC DB_TXN_LOCK
+ DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO DB_WRNOSYNC DB_YIELDCPU */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_ABS", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_ABS
+ *iv_return = DB_ARCH_ABS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TRUNCATE", 11)) {
+ /* ^ */
+#ifdef DB_TRUNCATE
+ *iv_return = DB_TRUNCATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_RENUMBER", 11)) {
+ /* ^ */
+#ifdef DB_RENUMBER
+ *iv_return = DB_RENUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INIT_CDB", 11)) {
+ /* ^ */
+#ifdef DB_INIT_CDB
+ *iv_return = DB_INIT_CDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_RECNO", 11)) {
+ /* ^ */
+#ifdef DB_OK_RECNO
+ *iv_return = DB_OK_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_YIELDCPU", 11)) {
+ /* ^ */
+#ifdef DB_YIELDCPU
+ *iv_return = DB_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_NEXT_DUP", 11)) {
+ /* ^ */
+#ifdef DB_NEXT_DUP
+ *iv_return = DB_NEXT_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_OK_QUEUE", 11)) {
+ /* ^ */
+#ifdef DB_OK_QUEUE
+ *iv_return = DB_OK_QUEUE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_REDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_REDO
+ *iv_return = DB_TXN_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_GET", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_GET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_LOGMAGIC
+ *iv_return = DB_LOGMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_QAMMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_QAMMAGIC
+ *iv_return = DB_QAMMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNMAGIC", 11)) {
+ /* ^ */
+#ifdef DB_TXNMAGIC
+ *iv_return = DB_TXNMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_SNAPSHOT", 11)) {
+ /* ^ */
+#ifdef DB_SNAPSHOT
+ *iv_return = DB_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_KEYEXIST", 11)) {
+ /* ^ */
+#ifdef DB_KEYEXIST
+ *iv_return = DB_KEYEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_DISK", 11)) {
+ /* ^ */
+#ifdef DB_LOG_DISK
+ *iv_return = DB_LOG_DISK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITION", 11)) {
+ /* ^ */
+#ifdef DB_POSITION
+ *iv_return = DB_POSITION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ARCH_LOG", 11)) {
+ /* ^ */
+#ifdef DB_ARCH_LOG
+ *iv_return = DB_ARCH_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FIXEDLEN", 11)) {
+ /* ^ */
+#ifdef DB_FIXEDLEN
+ *iv_return = DB_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INIT_LOG", 11)) {
+ /* ^ */
+#ifdef DB_INIT_LOG
+ *iv_return = DB_INIT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_APP_INIT", 11)) {
+ /* ^ */
+#ifdef DB_APP_INIT
+ *iv_return = DB_APP_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_UNDO", 11)) {
+ /* ^ */
+#ifdef DB_TXN_UNDO
+ *iv_return = DB_TXN_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_GET_BOTH", 11)) {
+ /* ^ */
+#ifdef DB_GET_BOTH
+ *iv_return = DB_GET_BOTH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKDOWN", 11)) {
+ /* ^ */
+#ifdef DB_LOCKDOWN
+ *iv_return = DB_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK", 11)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK
+ *iv_return = DB_TXN_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_KEYEMPTY", 11)) {
+ /* ^ */
+#ifdef DB_KEYEMPTY
+ *iv_return = DB_KEYEMPTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT", 11)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MULTIPLE", 11)) {
+ /* ^ */
+#ifdef DB_MULTIPLE
+ *iv_return = DB_MULTIPLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_KEYFIRST", 11)) {
+ /* ^ */
+#ifdef DB_KEYFIRST
+ *iv_return = DB_KEYFIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OK_BTREE", 11)) {
+ /* ^ */
+#ifdef DB_OK_BTREE
+ *iv_return = DB_OK_BTREE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_INIT_TXN", 11)) {
+ /* ^ */
+#ifdef DB_INIT_TXN
+ *iv_return = DB_INIT_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_NOTFOUND", 11)) {
+ /* ^ */
+#ifdef DB_NOTFOUND
+ *iv_return = DB_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_NOSERVER", 11)) {
+ /* ^ */
+#ifdef DB_NOSERVER
+ *iv_return = DB_NOSERVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_TXN_SYNC", 11)) {
+ /* ^ */
+#ifdef DB_TXN_SYNC
+ *iv_return = DB_TXN_SYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRNOSYNC", 11)) {
+ /* ^ */
+#ifdef DB_WRNOSYNC
+ *iv_return = DB_WRNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_12 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ARCH_DATA DB_CDB_ALLDB DB_CL_WRITER DB_DELIMITER DB_DIRECT_DB
+ DB_DUPCURSOR DB_ENV_FATAL DB_FAST_STAT DB_GET_BOTHC DB_GET_RECNO
+ DB_HASHMAGIC DB_INIT_LOCK DB_JOIN_ITEM DB_LOCKMAGIC DB_LOCK_DUMP
+ DB_LOCK_RW_N DB_LOGOLDVER DB_MAX_PAGES DB_MPOOL_NEW DB_NEEDSPLIT
+ DB_NODUPDATA DB_NOLOCKING DB_NORECURSE DB_OVERWRITE DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PERMANENT DB_POSITIONI DB_PRINTABLE DB_QAMOLDVER
+ DB_SET_RANGE DB_SET_RECNO DB_SWAPBYTES DB_TEMPORARY DB_TXN_ABORT
+ DB_TXN_APPLY DB_TXN_PRINT DB_WRITELOCK DB_WRITEOPEN DB_XA_CREATE */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'A':
+ if (memEQ(name, "DB_ARCH_DATA", 12)) {
+ /* ^ */
+#ifdef DB_ARCH_DATA
+ *iv_return = DB_ARCH_DATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_CDB_ALLDB", 12)) {
+ /* ^ */
+#ifdef DB_CDB_ALLDB
+ *iv_return = DB_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_CL_WRITER", 12)) {
+ /* ^ */
+#ifdef DB_CL_WRITER
+ *iv_return = DB_CL_WRITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_DELIMITER", 12)) {
+ /* ^ */
+#ifdef DB_DELIMITER
+ *iv_return = DB_DELIMITER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_DB", 12)) {
+ /* ^ */
+#ifdef DB_DIRECT_DB
+ *iv_return = DB_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DUPCURSOR", 12)) {
+ /* ^ */
+#ifdef DB_DUPCURSOR
+ *iv_return = DB_DUPCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_FATAL", 12)) {
+ /* ^ */
+#ifdef DB_ENV_FATAL
+ *iv_return = DB_ENV_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_FAST_STAT", 12)) {
+ /* ^ */
+#ifdef DB_FAST_STAT
+ *iv_return = DB_FAST_STAT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_GET_BOTHC", 12)) {
+ /* ^ */
+#ifdef DB_GET_BOTHC
+ *iv_return = DB_GET_BOTHC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_GET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_GET_RECNO
+ *iv_return = DB_GET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_HASHMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_HASHMAGIC
+ *iv_return = DB_HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_INIT_LOCK
+ *iv_return = DB_INIT_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'J':
+ if (memEQ(name, "DB_JOIN_ITEM", 12)) {
+ /* ^ */
+#ifdef DB_JOIN_ITEM
+ *iv_return = DB_JOIN_ITEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCKMAGIC", 12)) {
+ /* ^ */
+#ifdef DB_LOCKMAGIC
+ *iv_return = DB_LOCKMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DUMP", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_DUMP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RW_N", 12)) {
+ /* ^ */
+#ifdef DB_LOCK_RW_N
+ *iv_return = DB_LOCK_RW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_LOGOLDVER
+ *iv_return = DB_LOGOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_MAX_PAGES", 12)) {
+ /* ^ */
+#ifdef DB_MAX_PAGES
+ *iv_return = DB_MAX_PAGES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_NEW", 12)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW
+ *iv_return = DB_MPOOL_NEW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_NEEDSPLIT", 12)) {
+ /* ^ */
+#ifdef DB_NEEDSPLIT
+ *iv_return = DB_NEEDSPLIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NODUPDATA", 12)) {
+ /* ^ */
+#ifdef DB_NODUPDATA
+ *iv_return = DB_NODUPDATA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOLOCKING", 12)) {
+ /* ^ */
+#ifdef DB_NOLOCKING
+ *iv_return = DB_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NORECURSE", 12)) {
+ /* ^ */
+#ifdef DB_NORECURSE
+ *iv_return = DB_NORECURSE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_OVERWRITE", 12)) {
+ /* ^ */
+#ifdef DB_OVERWRITE
+ *iv_return = DB_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_PAGEYIELD", 12)) {
+ /* ^ */
+#ifdef DB_PAGEYIELD
+ *iv_return = DB_PAGEYIELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_LOCK", 12)) {
+ /* ^ */
+#ifdef DB_PAGE_LOCK
+ *iv_return = DB_PAGE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PERMANENT", 12)) {
+ /* ^ */
+#ifdef DB_PERMANENT
+ *iv_return = DB_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_POSITIONI", 12)) {
+ /* ^ */
+#ifdef DB_POSITIONI
+ *iv_return = DB_POSITIONI;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRINTABLE", 12)) {
+ /* ^ */
+#ifdef DB_PRINTABLE
+ *iv_return = DB_PRINTABLE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_QAMOLDVER", 12)) {
+ /* ^ */
+#ifdef DB_QAMOLDVER
+ *iv_return = DB_QAMOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_SET_RANGE", 12)) {
+ /* ^ */
+#ifdef DB_SET_RANGE
+ *iv_return = DB_SET_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SET_RECNO", 12)) {
+ /* ^ */
+#ifdef DB_SET_RECNO
+ *iv_return = DB_SET_RECNO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SWAPBYTES", 12)) {
+ /* ^ */
+#ifdef DB_SWAPBYTES
+ *iv_return = DB_SWAPBYTES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEMPORARY", 12)) {
+ /* ^ */
+#ifdef DB_TEMPORARY
+ *iv_return = DB_TEMPORARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_ABORT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_ABORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_APPLY", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_TXN_APPLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_PRINT", 12)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_PRINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_WRITELOCK", 12)) {
+ /* ^ */
+#ifdef DB_WRITELOCK
+ *iv_return = DB_WRITELOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_WRITEOPEN", 12)) {
+ /* ^ */
+#ifdef DB_WRITEOPEN
+ *iv_return = DB_WRITEOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_XA_CREATE", 12)) {
+ /* ^ */
+#ifdef DB_XA_CREATE
+ *iv_return = DB_XA_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_13 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AGGRESSIVE DB_BTREEMAGIC DB_CHECKPOINT DB_DIRECT_LOG DB_DIRTY_READ
+ DB_DONOTINDEX DB_ENV_CREATE DB_ENV_NOMMAP DB_ENV_THREAD DB_HASHOLDVER
+ DB_INCOMPLETE DB_INIT_MPOOL DB_LOCK_NORUN DB_LOCK_RIW_N DB_LOCK_TRADE
+ DB_LOGVERSION DB_LOG_LOCKED DB_MPOOL_LAST DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEXT_NODUP DB_NOORDERCHK DB_PREV_NODUP DB_PR_HEADERS DB_QAMVERSION
+ DB_RDWRMASTER DB_REGISTERED DB_REP_CLIENT DB_REP_MASTER DB_SEQUENTIAL
+ DB_STAT_CLEAR DB_SYSTEM_MEM DB_TXNVERSION DB_TXN_NOSYNC DB_TXN_NOWAIT
+ DB_VERIFY_BAD */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'A':
+ if (memEQ(name, "DB_STAT_CLEAR", 13)) {
+ /* ^ */
+#ifdef DB_STAT_CLEAR
+ *iv_return = DB_STAT_CLEAR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_INCOMPLETE", 13)) {
+ /* ^ */
+#ifdef DB_INCOMPLETE
+ *iv_return = DB_INCOMPLETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NORUN", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_NORUN
+ *iv_return = DB_LOCK_NORUN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_RIW_N", 13)) {
+ /* ^ */
+#ifdef DB_LOCK_RIW_N
+ *iv_return = DB_LOCK_RIW_N;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_TRADE", 13)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_LOCK_TRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_CHECKPOINT", 13)) {
+ /* ^ */
+#ifdef DB_CHECKPOINT
+ *iv_return = DB_CHECKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PREV_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_PREV_NODUP
+ *iv_return = DB_PREV_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_AGGRESSIVE", 13)) {
+ /* ^ */
+#ifdef DB_AGGRESSIVE
+ *iv_return = DB_AGGRESSIVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOGVERSION", 13)) {
+ /* ^ */
+#ifdef DB_LOGVERSION
+ *iv_return = DB_LOGVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_LOCKED", 13)) {
+ /* ^ */
+#ifdef DB_LOG_LOCKED
+ *iv_return = DB_LOG_LOCKED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGISTERED", 13)) {
+ /* ^ */
+#ifdef DB_REGISTERED
+ *iv_return = DB_REGISTERED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_INIT_MPOOL", 13)) {
+ /* ^ */
+#ifdef DB_INIT_MPOOL
+ *iv_return = DB_INIT_MPOOL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_QAMVERSION", 13)) {
+ /* ^ */
+#ifdef DB_QAMVERSION
+ *iv_return = DB_QAMVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_DONOTINDEX", 13)) {
+ /* ^ */
+#ifdef DB_DONOTINDEX
+ *iv_return = DB_DONOTINDEX;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXNVERSION", 13)) {
+ /* ^ */
+#ifdef DB_TXNVERSION
+ *iv_return = DB_TXNVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOSYNC", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOSYNC
+ *iv_return = DB_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_NOWAIT", 13)) {
+ /* ^ */
+#ifdef DB_TXN_NOWAIT
+ *iv_return = DB_TXN_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_MPOOL_LAST", 13)) {
+ /* ^ */
+#ifdef DB_MPOOL_LAST
+ *iv_return = DB_MPOOL_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOORDERCHK", 13)) {
+ /* ^ */
+#ifdef DB_NOORDERCHK
+ *iv_return = DB_NOORDERCHK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_REP_CLIENT", 13)) {
+ /* ^ */
+#ifdef DB_REP_CLIENT
+ *iv_return = DB_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_MASTER", 13)) {
+ /* ^ */
+#ifdef DB_REP_MASTER
+ *iv_return = DB_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Q':
+ if (memEQ(name, "DB_SEQUENTIAL", 13)) {
+ /* ^ */
+#ifdef DB_SEQUENTIAL
+ *iv_return = DB_SEQUENTIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEMAGIC", 13)) {
+ /* ^ */
+#ifdef DB_BTREEMAGIC
+ *iv_return = DB_BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRECT_LOG", 13)) {
+ /* ^ */
+#ifdef DB_DIRECT_LOG
+ *iv_return = DB_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_DIRTY_READ", 13)) {
+ /* ^ */
+#ifdef DB_DIRTY_READ
+ *iv_return = DB_DIRTY_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_BAD", 13)) {
+ /* ^ */
+#ifdef DB_VERIFY_BAD
+ *iv_return = DB_VERIFY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_HASHOLDVER", 13)) {
+ /* ^ */
+#ifdef DB_HASHOLDVER
+ *iv_return = DB_HASHOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SYSTEM_MEM", 13)) {
+ /* ^ */
+#ifdef DB_SYSTEM_MEM
+ *iv_return = DB_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_MUTEXDEBUG", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXDEBUG
+ *iv_return = DB_MUTEXDEBUG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MUTEXLOCKS", 13)) {
+ /* ^ */
+#ifdef DB_MUTEXLOCKS
+ *iv_return = DB_MUTEXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_ENV_CREATE", 13)) {
+ /* ^ */
+#ifdef DB_ENV_CREATE
+ *iv_return = DB_ENV_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOMMAP", 13)) {
+ /* ^ */
+#ifdef DB_ENV_NOMMAP
+ *iv_return = DB_ENV_NOMMAP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_THREAD", 13)) {
+ /* ^ */
+#ifdef DB_ENV_THREAD
+ *iv_return = DB_ENV_THREAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_RDWRMASTER", 13)) {
+ /* ^ */
+#ifdef DB_RDWRMASTER
+ *iv_return = DB_RDWRMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_NEXT_NODUP", 13)) {
+ /* ^ */
+#ifdef DB_NEXT_NODUP
+ *iv_return = DB_NEXT_NODUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PR_HEADERS", 13)) {
+ /* ^ */
+#ifdef DB_PR_HEADERS
+ *iv_return = DB_PR_HEADERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_14 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_AUTO_COMMIT DB_BTREEOLDVER DB_CHKSUM_SHA1 DB_EID_INVALID DB_ENCRYPT_AES
+ DB_ENV_APPINIT DB_ENV_DBLOCAL DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOPANIC
+ DB_ENV_PRIVATE DB_FILE_ID_LEN DB_HANDLE_LOCK DB_HASHVERSION DB_INVALID_EID
+ DB_JOIN_NOSORT DB_LOCKVERSION DB_LOCK_EXPIRE DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_SWITCH DB_MAX_RECORDS
+ DB_MPOOL_CLEAN DB_MPOOL_DIRTY DB_NOOVERWRITE DB_NOSERVER_ID DB_ODDFILESIZE
+ DB_OLD_VERSION DB_OPEN_CALLED DB_RECORDCOUNT DB_RECORD_LOCK DB_REGION_ANON
+ DB_REGION_INIT DB_REGION_NAME DB_RENAMEMAGIC DB_REP_NEWSITE DB_REP_UNAVAIL
+ DB_REVSPLITOFF DB_RUNRECOVERY DB_SET_TXN_NOW DB_USE_ENVIRON DB_WRITECURSOR
+ DB_XIDDATASIZE */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'A':
+ if (memEQ(name, "DB_LOCK_RANDOM", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RANDOM
+ *iv_return = DB_LOCK_RANDOM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPEN_CALLED", 14)) {
+ /* ^ */
+#ifdef DB_OPEN_CALLED
+ *iv_return = DB_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_UNAVAIL", 14)) {
+ /* ^ */
+#ifdef DB_REP_UNAVAIL
+ *iv_return = DB_REP_UNAVAIL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_XIDDATASIZE", 14)) {
+ /* ^ */
+#ifdef DB_XIDDATASIZE
+ *iv_return = DB_XIDDATASIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_ENV_LOCKING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKING
+ *iv_return = DB_ENV_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MAX_RECORDS", 14)) {
+ /* ^ */
+#ifdef DB_MAX_RECORDS
+ *iv_return = DB_MAX_RECORDS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CLEAN", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_CLEAN
+ *iv_return = DB_MPOOL_CLEAN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORDCOUNT", 14)) {
+ /* ^ */
+#ifdef DB_RECORDCOUNT
+ *iv_return = DB_RECORDCOUNT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'D':
+ if (memEQ(name, "DB_FILE_ID_LEN", 14)) {
+ /* ^ */
+#ifdef DB_FILE_ID_LEN
+ *iv_return = DB_FILE_ID_LEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_INVALID_EID", 14)) {
+ /* ^ */
+#ifdef DB_INVALID_EID
+ *iv_return = DB_INVALID_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DIRTY", 14)) {
+ /* ^ */
+#ifdef DB_MPOOL_DIRTY
+ *iv_return = DB_MPOOL_DIRTY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_RECORD", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_RECORD
+ *iv_return = DB_LOCK_RECORD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_REMOVE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_REMOVE
+ *iv_return = DB_LOCK_REMOVE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOSERVER_ID", 14)) {
+ /* ^ */
+#ifdef DB_NOSERVER_ID
+ *iv_return = DB_NOSERVER_ID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ODDFILESIZE", 14)) {
+ /* ^ */
+#ifdef DB_ODDFILESIZE
+ *iv_return = DB_ODDFILESIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_ENV_LOGGING", 14)) {
+ /* ^ */
+#ifdef DB_ENV_LOGGING
+ *iv_return = DB_ENV_LOGGING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PRIVATE", 14)) {
+ /* ^ */
+#ifdef DB_ENV_PRIVATE
+ *iv_return = DB_ENV_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REVSPLITOFF", 14)) {
+ /* ^ */
+#ifdef DB_REVSPLITOFF
+ *iv_return = DB_REVSPLITOFF;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_BTREEOLDVER", 14)) {
+ /* ^ */
+#ifdef DB_BTREEOLDVER
+ *iv_return = DB_BTREEOLDVER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_DBLOCAL", 14)) {
+ /* ^ */
+#ifdef DB_ENV_DBLOCAL
+ *iv_return = DB_ENV_DBLOCAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_OLDEST", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_OLDEST
+ *iv_return = DB_LOCK_OLDEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_RENAMEMAGIC", 14)) {
+ /* ^ */
+#ifdef DB_RENAMEMAGIC
+ *iv_return = DB_RENAMEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_SET_TXN_NOW", 14)) {
+ /* ^ */
+#ifdef DB_SET_TXN_NOW
+ *iv_return = DB_SET_TXN_NOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_AUTO_COMMIT", 14)) {
+ /* ^ */
+#ifdef DB_AUTO_COMMIT
+ *iv_return = DB_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_JOIN_NOSORT", 14)) {
+ /* ^ */
+#ifdef DB_JOIN_NOSORT
+ *iv_return = DB_JOIN_NOSORT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOWAIT", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_NOWAIT
+ *iv_return = DB_LOCK_NOWAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RUNRECOVERY", 14)) {
+ /* ^ */
+#ifdef DB_RUNRECOVERY
+ *iv_return = DB_RUNRECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_APPINIT", 14)) {
+ /* ^ */
+#ifdef DB_ENV_APPINIT
+ *iv_return = DB_ENV_APPINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_NOPANIC", 14)) {
+ /* ^ */
+#ifdef DB_ENV_NOPANIC
+ *iv_return = DB_ENV_NOPANIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_HASHVERSION", 14)) {
+ /* ^ */
+#ifdef DB_HASHVERSION
+ *iv_return = DB_HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCKVERSION", 14)) {
+ /* ^ */
+#ifdef DB_LOCKVERSION
+ *iv_return = DB_LOCKVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OLD_VERSION", 14)) {
+ /* ^ */
+#ifdef DB_OLD_VERSION
+ *iv_return = DB_OLD_VERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENCRYPT_AES", 14)) {
+ /* ^ */
+#ifdef DB_ENCRYPT_AES
+ *iv_return = DB_ENCRYPT_AES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_WRITECURSOR", 14)) {
+ /* ^ */
+#ifdef DB_WRITECURSOR
+ *iv_return = DB_WRITECURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_EID_INVALID", 14)) {
+ /* ^ */
+#ifdef DB_EID_INVALID
+ *iv_return = DB_EID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_USE_ENVIRON", 14)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON
+ *iv_return = DB_USE_ENVIRON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_SWITCH", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_SWITCH
+ *iv_return = DB_LOCK_SWITCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_NOOVERWRITE", 14)) {
+ /* ^ */
+#ifdef DB_NOOVERWRITE
+ *iv_return = DB_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWSITE", 14)) {
+ /* ^ */
+#ifdef DB_REP_NEWSITE
+ *iv_return = DB_REP_NEWSITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_EXPIRE", 14)) {
+ /* ^ */
+#ifdef DB_LOCK_EXPIRE
+ *iv_return = DB_LOCK_EXPIRE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CHKSUM_SHA1", 14)) {
+ /* ^ */
+#ifdef DB_CHKSUM_SHA1
+ *iv_return = DB_CHKSUM_SHA1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_HANDLE_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_HANDLE_LOCK
+ *iv_return = DB_HANDLE_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RECORD_LOCK", 14)) {
+ /* ^ */
+#ifdef DB_RECORD_LOCK
+ *iv_return = DB_RECORD_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_ANON", 14)) {
+ /* ^ */
+#ifdef DB_REGION_ANON
+ *iv_return = DB_REGION_ANON;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_INIT", 14)) {
+ /* ^ */
+#ifdef DB_REGION_INIT
+ *iv_return = DB_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_NAME", 14)) {
+ /* ^ */
+#ifdef DB_REGION_NAME
+ *iv_return = DB_REGION_NAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_15 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_APPLY_LOGREG DB_BTREEVERSION DB_CONSUME_WAIT DB_ENV_LOCKDOWN
+ DB_ENV_PANIC_OK DB_ENV_YIELDCPU DB_LOCK_DEFAULT DB_LOCK_INHERIT
+ DB_LOCK_NOTHELD DB_LOCK_PUT_ALL DB_LOCK_PUT_OBJ DB_LOCK_TIMEOUT
+ DB_LOCK_UPGRADE DB_MPOOL_CREATE DB_MPOOL_EXTENT DB_MULTIPLE_KEY
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_PRIORITY_LOW DB_REGION_MAGIC
+ DB_REP_LOGSONLY DB_REP_OUTDATED DB_SURPRISE_KID DB_TEST_POSTLOG
+ DB_TEST_PREOPEN DB_TXN_GETPGNOS DB_TXN_LOCK_2PL DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_VERIFY_FATAL */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'D':
+ if (memEQ(name, "DB_REP_OUTDATED", 15)) {
+ /* ^ */
+#ifdef DB_REP_OUTDATED
+ *iv_return = DB_REP_OUTDATED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_MULTIPLE_KEY", 15)) {
+ /* ^ */
+#ifdef DB_MULTIPLE_KEY
+ *iv_return = DB_MULTIPLE_KEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SURPRISE_KID", 15)) {
+ /* ^ */
+#ifdef DB_SURPRISE_KID
+ *iv_return = DB_SURPRISE_KID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_PREOPEN", 15)) {
+ /* ^ */
+#ifdef DB_TEST_PREOPEN
+ *iv_return = DB_TEST_PREOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'F':
+ if (memEQ(name, "DB_LOCK_DEFAULT", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_DEFAULT
+ *iv_return = DB_LOCK_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERIFY_FATAL", 15)) {
+ /* ^ */
+#ifdef DB_VERIFY_FATAL
+ *iv_return = DB_VERIFY_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_UPGRADE", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_UPGRADE
+ *iv_return = DB_LOCK_UPGRADE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_LOCK_INHERIT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \
+ DB_VERSION_PATCH >= 1)
+ *iv_return = DB_LOCK_INHERIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_PANIC_OK", 15)) {
+ /* ^ */
+#ifdef DB_ENV_PANIC_OK
+ *iv_return = DB_ENV_PANIC_OK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_LOCKDOWN", 15)) {
+ /* ^ */
+#ifdef DB_ENV_LOCKDOWN
+ *iv_return = DB_ENV_LOCKDOWN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ORDERCHKONLY", 15)) {
+ /* ^ */
+#ifdef DB_ORDERCHKONLY
+ *iv_return = DB_ORDERCHKONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOCK_2PL", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_2PL
+ *iv_return = DB_TXN_LOCK_2PL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_YIELDCPU", 15)) {
+ /* ^ */
+#ifdef DB_ENV_YIELDCPU
+ *iv_return = DB_ENV_YIELDCPU;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_LOCK_TIMEOUT", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REGION_MAGIC", 15)) {
+ /* ^ */
+#ifdef DB_REGION_MAGIC
+ *iv_return = DB_REGION_MAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_APPLY_LOGREG", 15)) {
+ /* ^ */
+#ifdef DB_APPLY_LOGREG
+ *iv_return = DB_APPLY_LOGREG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_TXN_GETPGNOS", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_GETPGNOS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_BTREEVERSION", 15)) {
+ /* ^ */
+#ifdef DB_BTREEVERSION
+ *iv_return = DB_BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_CREATE", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_CREATE
+ *iv_return = DB_MPOOL_CREATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_REP_LOGSONLY", 15)) {
+ /* ^ */
+#ifdef DB_REP_LOGSONLY
+ *iv_return = DB_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOG", 15)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOG
+ *iv_return = DB_TEST_POSTLOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_LOCK_NOTHELD", 15)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTHELD
+ *iv_return = DB_LOCK_NOTHELD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_ALL", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_ALL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_OBJ", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 2) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 0)
+ *iv_return = DB_LOCK_PUT_OBJ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_MPOOL_EXTENT", 15)) {
+ /* ^ */
+#ifdef DB_MPOOL_EXTENT
+ *iv_return = DB_MPOOL_EXTENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PRIORITY_LOW", 15)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_CONSUME_WAIT", 15)) {
+ /* ^ */
+#ifdef DB_CONSUME_WAIT
+ *iv_return = DB_CONSUME_WAIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_OPFLAGS_MASK", 15)) {
+ /* ^ */
+#ifdef DB_OPFLAGS_MASK
+ *iv_return = DB_OPFLAGS_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_MASK", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_MASK
+ *iv_return = DB_TXN_LOG_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_REDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_REDO
+ *iv_return = DB_TXN_LOG_REDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_LOG_UNDO", 15)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDO
+ *iv_return = DB_TXN_LOG_UNDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_16 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_BROADCAST_EID DB_CACHED_COUNTS DB_EID_BROADCAST DB_ENV_CDB_ALLDB
+ DB_ENV_DIRECT_DB DB_ENV_NOLOCKING DB_ENV_OVERWRITE DB_ENV_RPCCLIENT
+ DB_FCNTL_LOCKING DB_JAVA_CALLBACK DB_LOCK_CONFLICT DB_LOCK_DEADLOCK
+ DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS DB_LOCK_MINWRITE DB_LOCK_NOTEXIST
+ DB_LOCK_PUT_READ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_MPOOL_DISCARD
+ DB_MPOOL_PRIVATE DB_NOSERVER_HOME DB_PAGE_NOTFOUND DB_PRIORITY_HIGH
+ DB_RECOVER_FATAL DB_REP_DUPMASTER DB_REP_NEWMASTER DB_REP_PERMANENT
+ DB_SECONDARY_BAD DB_TEST_POSTOPEN DB_TEST_POSTSYNC DB_TXN_LOCK_MASK
+ DB_TXN_OPENFILES DB_VERB_CHKPOINT DB_VERB_DEADLOCK DB_VERB_RECOVERY
+ DB_VERB_WAITSFOR DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK */
+ /* Offset 12 gives the best switch position. */
+ switch (name[12]) {
+ case 'A':
+ if (memEQ(name, "DB_RECOVER_FATAL", 16)) {
+ /* ^ */
+#ifdef DB_RECOVER_FATAL
+ *iv_return = DB_RECOVER_FATAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MAJOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MAJOR
+ *iv_return = DB_VERSION_MAJOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_PATCH", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_PATCH
+ *iv_return = DB_VERSION_PATCH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'B':
+ if (memEQ(name, "DB_JAVA_CALLBACK", 16)) {
+ /* ^ */
+#ifdef DB_JAVA_CALLBACK
+ *iv_return = DB_JAVA_CALLBACK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'C':
+ if (memEQ(name, "DB_EID_BROADCAST", 16)) {
+ /* ^ */
+#ifdef DB_EID_BROADCAST
+ *iv_return = DB_EID_BROADCAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_MPOOL_DISCARD", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_DISCARD
+ *iv_return = DB_MPOOL_DISCARD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_LOCK_YOUNGEST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_YOUNGEST
+ *iv_return = DB_LOCK_YOUNGEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'H':
+ if (memEQ(name, "DB_NOSERVER_HOME", 16)) {
+ /* ^ */
+#ifdef DB_NOSERVER_HOME
+ *iv_return = DB_NOSERVER_HOME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PRIORITY_HIGH", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_ENV_RPCCLIENT", 16)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT
+ *iv_return = DB_ENV_RPCCLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_OPENFILES", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_OPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERSION_MINOR", 16)) {
+ /* ^ */
+#ifdef DB_VERSION_MINOR
+ *iv_return = DB_VERSION_MINOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'K':
+ if (memEQ(name, "DB_ENV_NOLOCKING", 16)) {
+ /* ^ */
+#ifdef DB_ENV_NOLOCKING
+ *iv_return = DB_ENV_NOLOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_FCNTL_LOCKING", 16)) {
+ /* ^ */
+#ifdef DB_FCNTL_LOCKING
+ *iv_return = DB_FCNTL_LOCKING;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_CDB_ALLDB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_CDB_ALLDB
+ *iv_return = DB_ENV_CDB_ALLDB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_CONFLICT", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_CONFLICT
+ *iv_return = DB_LOCK_CONFLICT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_DEADLOCK
+ *iv_return = DB_LOCK_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_DEADLOCK", 16)) {
+ /* ^ */
+#ifdef DB_VERB_DEADLOCK
+ *iv_return = DB_VERB_DEADLOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_TXN_LOCK_MASK", 16)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_MASK
+ *iv_return = DB_TXN_LOCK_MASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VRFY_FLAGMASK", 16)) {
+ /* ^ */
+#ifdef DB_VRFY_FLAGMASK
+ *iv_return = DB_VRFY_FLAGMASK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_REP_PERMANENT", 16)) {
+ /* ^ */
+#ifdef DB_REP_PERMANENT
+ *iv_return = DB_REP_PERMANENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_LOCK_MAXLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MAXLOCKS
+ *iv_return = DB_LOCK_MAXLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINLOCKS", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINLOCKS
+ *iv_return = DB_LOCK_MINLOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_PAGE_NOTFOUND", 16)) {
+ /* ^ */
+#ifdef DB_PAGE_NOTFOUND
+ *iv_return = DB_PAGE_NOTFOUND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTOPEN", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTOPEN
+ *iv_return = DB_TEST_POSTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_CHKPOINT", 16)) {
+ /* ^ */
+#ifdef DB_VERB_CHKPOINT
+ *iv_return = DB_VERB_CHKPOINT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_ENV_OVERWRITE", 16)) {
+ /* ^ */
+#ifdef DB_ENV_OVERWRITE
+ *iv_return = DB_ENV_OVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_MINWRITE", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_MINWRITE
+ *iv_return = DB_LOCK_MINWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_PUT_READ", 16)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_PUT_READ;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_LOGC_BUF_SIZE", 16)) {
+ /* ^ */
+#ifdef DB_LOGC_BUF_SIZE
+ *iv_return = DB_LOGC_BUF_SIZE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_DUPMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_DUPMASTER
+ *iv_return = DB_REP_DUPMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_REP_NEWMASTER", 16)) {
+ /* ^ */
+#ifdef DB_REP_NEWMASTER
+ *iv_return = DB_REP_NEWMASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTSYNC", 16)) {
+ /* ^ */
+#ifdef DB_TEST_POSTSYNC
+ *iv_return = DB_TEST_POSTSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_WAITSFOR", 16)) {
+ /* ^ */
+#ifdef DB_VERB_WAITSFOR
+ *iv_return = DB_VERB_WAITSFOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_DIRECT_DB", 16)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_DB
+ *iv_return = DB_ENV_DIRECT_DB;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_CACHED_COUNTS", 16)) {
+ /* ^ */
+#ifdef DB_CACHED_COUNTS
+ *iv_return = DB_CACHED_COUNTS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_MPOOL_PRIVATE", 16)) {
+ /* ^ */
+#ifdef DB_MPOOL_PRIVATE
+ *iv_return = DB_MPOOL_PRIVATE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_RECOVERY", 16)) {
+ /* ^ */
+#ifdef DB_VERB_RECOVERY
+ *iv_return = DB_VERB_RECOVERY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'X':
+ if (memEQ(name, "DB_LOCK_NOTEXIST", 16)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTEXIST
+ *iv_return = DB_LOCK_NOTEXIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_BROADCAST_EID", 16)) {
+ /* ^ */
+#ifdef DB_BROADCAST_EID
+ *iv_return = DB_BROADCAST_EID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_SECONDARY_BAD", 16)) {
+ /* ^ */
+#ifdef DB_SECONDARY_BAD
+ *iv_return = DB_SECONDARY_BAD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_17 (pTHX_ const char *name, IV *iv_return, const char **pv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_DIRECT_LOG DB_ENV_REP_CLIENT DB_ENV_REP_MASTER DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_TXN_NOSYNC DB_ENV_USER_ALLOC DB_GET_BOTH_RANGE
+ DB_LOG_SILENT_ERR DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_PRERENAME DB_TXN_POPENFILES DB_VERSION_STRING */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'A':
+ if (memEQ(name, "DB_TEST_PRERENAME", 17)) {
+ /* ^ */
+#ifdef DB_TEST_PRERENAME
+ *iv_return = DB_TEST_PRERENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_ENV_REP_CLIENT", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_CLIENT
+ *iv_return = DB_ENV_REP_CLIENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOG_SILENT_ERR", 17)) {
+ /* ^ */
+#ifdef DB_LOG_SILENT_ERR
+ *iv_return = DB_LOG_SILENT_ERR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_RPC_SERVERVERS", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERVERS
+ *iv_return = DB_RPC_SERVERVERS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTSEND", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTSEND
+ *iv_return = DB_TEST_ELECTSEND;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_VERSION_STRING", 17)) {
+ /* ^ */
+#ifdef DB_VERSION_STRING
+ *pv_return = DB_VERSION_STRING;
+ return PERL_constant_ISPV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_ENV_DIRECT_LOG", 17)) {
+ /* ^ */
+#ifdef DB_ENV_DIRECT_LOG
+ *iv_return = DB_ENV_DIRECT_LOG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_USER_ALLOC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_USER_ALLOC
+ *iv_return = DB_ENV_USER_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_POPENFILES", 17)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_TXN_POPENFILES;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_ENV_SYSTEM_MEM", 17)) {
+ /* ^ */
+#ifdef DB_ENV_SYSTEM_MEM
+ *iv_return = DB_ENV_SYSTEM_MEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_GET_BOTH_RANGE", 17)) {
+ /* ^ */
+#ifdef DB_GET_BOTH_RANGE
+ *iv_return = DB_GET_BOTH_RANGE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTINIT", 17)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTINIT
+ *iv_return = DB_TEST_ELECTINIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ENV_STANDALONE", 17)) {
+ /* ^ */
+#ifdef DB_ENV_STANDALONE
+ *iv_return = DB_ENV_STANDALONE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_RPC_SERVERPROG", 17)) {
+ /* ^ */
+#ifdef DB_RPC_SERVERPROG
+ *iv_return = DB_RPC_SERVERPROG;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_ENV_REP_MASTER", 17)) {
+ /* ^ */
+#ifdef DB_ENV_REP_MASTER
+ *iv_return = DB_ENV_REP_MASTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_ENV_TXN_NOSYNC", 17)) {
+ /* ^ */
+#ifdef DB_ENV_TXN_NOSYNC
+ *iv_return = DB_ENV_TXN_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_18 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ALREADY_ABORTED DB_ENV_AUTO_COMMIT DB_ENV_OPEN_CALLED
+ DB_ENV_REGION_INIT DB_LOCK_NOTGRANTED DB_MPOOL_NEW_GROUP
+ DB_PR_RECOVERYTEST DB_SET_TXN_TIMEOUT DB_TEST_ELECTVOTE1
+ DB_TEST_ELECTVOTE2 DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2
+ DB_TEST_POSTRENAME DB_TEST_PREDESTROY DB_TEST_PREEXTOPEN */
+ /* Offset 13 gives the best switch position. */
+ switch (name[13]) {
+ case 'A':
+ if (memEQ(name, "DB_ENV_OPEN_CALLED", 18)) {
+ /* ^ */
+#ifdef DB_ENV_OPEN_CALLED
+ *iv_return = DB_ENV_OPEN_CALLED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_NOTGRANTED", 18)) {
+ /* ^ */
+#ifdef DB_LOCK_NOTGRANTED
+ *iv_return = DB_LOCK_NOTGRANTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTRENAME", 18)) {
+ /* ^ */
+#ifdef DB_TEST_POSTRENAME
+ *iv_return = DB_TEST_POSTRENAME;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_MPOOL_NEW_GROUP", 18)) {
+ /* ^ */
+#ifdef DB_MPOOL_NEW_GROUP
+ *iv_return = DB_MPOOL_NEW_GROUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "DB_SET_TXN_TIMEOUT", 18)) {
+ /* ^ */
+#ifdef DB_SET_TXN_TIMEOUT
+ *iv_return = DB_SET_TXN_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_ALREADY_ABORTED", 18)) {
+ /* ^ */
+#ifdef DB_ALREADY_ABORTED
+ *iv_return = DB_ALREADY_ABORTED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_ENV_AUTO_COMMIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_AUTO_COMMIT
+ *iv_return = DB_ENV_AUTO_COMMIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "DB_TEST_PREDESTROY", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREDESTROY
+ *iv_return = DB_TEST_PREDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TEST_PREEXTOPEN", 18)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTOPEN
+ *iv_return = DB_TEST_PREEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_TEST_ELECTVOTE1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE1
+ *iv_return = DB_TEST_ELECTVOTE1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTVOTE2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTVOTE2
+ *iv_return = DB_TEST_ELECTVOTE2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_TEST_ELECTWAIT1", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT1
+ *iv_return = DB_TEST_ELECTWAIT1;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_ELECTWAIT2", 18)) {
+ /* ^ */
+#ifdef DB_TEST_ELECTWAIT2
+ *iv_return = DB_TEST_ELECTWAIT2;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'Y':
+ if (memEQ(name, "DB_PR_RECOVERYTEST", 18)) {
+ /* ^ */
+#ifdef DB_PR_RECOVERYTEST
+ *iv_return = DB_PR_RECOVERYTEST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_ENV_REGION_INIT", 18)) {
+ /* ^ */
+#ifdef DB_ENV_REGION_INIT
+ *iv_return = DB_ENV_REGION_INIT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_19 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_ENV_REP_LOGSONLY DB_LOCK_FREE_LOCKER DB_LOCK_GET_TIMEOUT
+ DB_LOCK_SET_TIMEOUT DB_PRIORITY_DEFAULT DB_REP_HOLDELECTION
+ DB_SET_LOCK_TIMEOUT DB_TEST_POSTDESTROY DB_TEST_POSTEXTOPEN
+ DB_TEST_POSTLOGMETA DB_TEST_SUBDB_LOCKS DB_TXN_FORWARD_ROLL
+ DB_TXN_LOG_UNDOREDO DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD
+ DB_UPDATE_SECONDARY DB_USE_ENVIRON_ROOT DB_VERB_REPLICATION */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'C':
+ if (memEQ(name, "DB_SET_LOCK_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_SET_LOCK_TIMEOUT
+ *iv_return = DB_SET_LOCK_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_LOCK_GET_TIMEOUT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \
+ DB_VERSION_PATCH >= 7)
+ *iv_return = DB_LOCK_GET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_LOCK_SET_TIMEOUT", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_SET_TIMEOUT
+ *iv_return = DB_LOCK_SET_TIMEOUT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_VERB_REPLICATION", 19)) {
+ /* ^ */
+#ifdef DB_VERB_REPLICATION
+ *iv_return = DB_VERB_REPLICATION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'G':
+ if (memEQ(name, "DB_TXN_LOG_UNDOREDO", 19)) {
+ /* ^ */
+#ifdef DB_TXN_LOG_UNDOREDO
+ *iv_return = DB_TXN_LOG_UNDOREDO;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'I':
+ if (memEQ(name, "DB_TXN_WRITE_NOSYNC", 19)) {
+ /* ^ */
+#ifdef DB_TXN_WRITE_NOSYNC
+ *iv_return = DB_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_REP_HOLDELECTION", 19)) {
+ /* ^ */
+#ifdef DB_REP_HOLDELECTION
+ *iv_return = DB_REP_HOLDELECTION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_UNRESOLVED_CHILD", 19)) {
+ /* ^ */
+#ifdef DB_UNRESOLVED_CHILD
+ *iv_return = DB_UNRESOLVED_CHILD;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_TEST_POSTDESTROY", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTDESTROY
+ *iv_return = DB_TEST_POSTDESTROY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTEXTOPEN", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTOPEN
+ *iv_return = DB_TEST_POSTEXTOPEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TEST_POSTLOGMETA", 19)) {
+ /* ^ */
+#ifdef DB_TEST_POSTLOGMETA
+ *iv_return = DB_TEST_POSTLOGMETA;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_REP_LOGSONLY", 19)) {
+ /* ^ */
+#ifdef DB_ENV_REP_LOGSONLY
+ *iv_return = DB_ENV_REP_LOGSONLY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_LOCK_FREE_LOCKER", 19)) {
+ /* ^ */
+#ifdef DB_LOCK_FREE_LOCKER
+ *iv_return = DB_LOCK_FREE_LOCKER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_FORWARD_ROLL", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_FORWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_PRIORITY_DEFAULT", 19)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_DEFAULT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_SUBDB_LOCKS", 19)) {
+ /* ^ */
+#ifdef DB_TEST_SUBDB_LOCKS
+ *iv_return = DB_TEST_SUBDB_LOCKS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'V':
+ if (memEQ(name, "DB_USE_ENVIRON_ROOT", 19)) {
+ /* ^ */
+#ifdef DB_USE_ENVIRON_ROOT
+ *iv_return = DB_USE_ENVIRON_ROOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_UPDATE_SECONDARY", 19)) {
+ /* ^ */
+#ifdef DB_UPDATE_SECONDARY
+ *iv_return = DB_UPDATE_SECONDARY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_20 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_CXX_NO_EXCEPTIONS DB_LOGFILEID_INVALID DB_PANIC_ENVIRONMENT
+ DB_PRIORITY_VERY_LOW DB_TEST_PREEXTDELETE DB_TEST_PREEXTUNLINK
+ DB_TXN_BACKWARD_ROLL DB_TXN_LOCK_OPTIMIST */
+ /* Offset 14 gives the best switch position. */
+ switch (name[14]) {
+ case 'D':
+ if (memEQ(name, "DB_TEST_PREEXTDELETE", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTDELETE
+ *iv_return = DB_TEST_PREEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ if (memEQ(name, "DB_TXN_BACKWARD_ROLL", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 12)
+ *iv_return = DB_TXN_BACKWARD_ROLL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_LOGFILEID_INVALID", 20)) {
+ /* ^ */
+#ifdef DB_LOGFILEID_INVALID
+ *iv_return = DB_LOGFILEID_INVALID;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "DB_PANIC_ENVIRONMENT", 20)) {
+ /* ^ */
+#ifdef DB_PANIC_ENVIRONMENT
+ *iv_return = DB_PANIC_ENVIRONMENT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_CXX_NO_EXCEPTIONS", 20)) {
+ /* ^ */
+#ifdef DB_CXX_NO_EXCEPTIONS
+ *iv_return = DB_CXX_NO_EXCEPTIONS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "DB_PRIORITY_VERY_LOW", 20)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_LOW;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMIST", 20)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMIST
+ *iv_return = DB_TXN_LOCK_OPTIMIST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'U':
+ if (memEQ(name, "DB_TEST_PREEXTUNLINK", 20)) {
+ /* ^ */
+#ifdef DB_TEST_PREEXTUNLINK
+ *iv_return = DB_TEST_PREEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_21 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK_UPGRADE_WRITE DB_PRIORITY_VERY_HIGH DB_TEST_POSTEXTDELETE
+ DB_TEST_POSTEXTUNLINK DB_TXN_BACKWARD_ALLOC */
+ /* Offset 16 gives the best switch position. */
+ switch (name[16]) {
+ case 'A':
+ if (memEQ(name, "DB_TXN_BACKWARD_ALLOC", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_TXN_BACKWARD_ALLOC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'E':
+ if (memEQ(name, "DB_TEST_POSTEXTDELETE", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTDELETE
+ *iv_return = DB_TEST_POSTEXTDELETE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "DB_TEST_POSTEXTUNLINK", 21)) {
+ /* ^ */
+#ifdef DB_TEST_POSTEXTUNLINK
+ *iv_return = DB_TEST_POSTEXTUNLINK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'W':
+ if (memEQ(name, "DB_LOCK_UPGRADE_WRITE", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \
+ DB_VERSION_PATCH >= 4)
+ *iv_return = DB_LOCK_UPGRADE_WRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_PRIORITY_VERY_HIGH", 21)) {
+ /* ^ */
+#if (DB_VERSION_MAJOR > 4) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \
+ (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \
+ DB_VERSION_PATCH >= 17)
+ *iv_return = DB_PRIORITY_VERY_HIGH;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return, const char **pv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!/home/paul/perl/install/redhat6.1/5.8.0/bin/perl5.8.0 -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV PV)};
+my @names = (qw(DB_AFTER DB_AGGRESSIVE DB_ALREADY_ABORTED DB_APPEND
+ DB_APPLY_LOGREG DB_APP_INIT DB_ARCH_ABS DB_ARCH_DATA DB_ARCH_LOG
+ DB_AUTO_COMMIT DB_BEFORE DB_BROADCAST_EID DB_BTREEMAGIC
+ DB_BTREEOLDVER DB_BTREEVERSION DB_CACHED_COUNTS DB_CDB_ALLDB
+ DB_CHECKPOINT DB_CHKSUM_SHA1 DB_CLIENT DB_CL_WRITER DB_COMMIT
+ DB_CONSUME DB_CONSUME_WAIT DB_CREATE DB_CURLSN DB_CURRENT
+ DB_CXX_NO_EXCEPTIONS DB_DELETED DB_DELIMITER DB_DIRECT
+ DB_DIRECT_DB DB_DIRECT_LOG DB_DIRTY_READ DB_DONOTINDEX DB_DUP
+ DB_DUPCURSOR DB_DUPSORT DB_EID_BROADCAST DB_EID_INVALID
+ DB_ENCRYPT DB_ENCRYPT_AES DB_ENV_APPINIT DB_ENV_AUTO_COMMIT
+ DB_ENV_CDB DB_ENV_CDB_ALLDB DB_ENV_CREATE DB_ENV_DBLOCAL
+ DB_ENV_DIRECT_DB DB_ENV_DIRECT_LOG DB_ENV_FATAL DB_ENV_LOCKDOWN
+ DB_ENV_LOCKING DB_ENV_LOGGING DB_ENV_NOLOCKING DB_ENV_NOMMAP
+ DB_ENV_NOPANIC DB_ENV_OPEN_CALLED DB_ENV_OVERWRITE
+ DB_ENV_PANIC_OK DB_ENV_PRIVATE DB_ENV_REGION_INIT
+ DB_ENV_REP_CLIENT DB_ENV_REP_LOGSONLY DB_ENV_REP_MASTER
+ DB_ENV_RPCCLIENT DB_ENV_RPCCLIENT_GIVEN DB_ENV_STANDALONE
+ DB_ENV_SYSTEM_MEM DB_ENV_THREAD DB_ENV_TXN DB_ENV_TXN_NOSYNC
+ DB_ENV_TXN_WRITE_NOSYNC DB_ENV_USER_ALLOC DB_ENV_YIELDCPU
+ DB_EXCL DB_EXTENT DB_FAST_STAT DB_FCNTL_LOCKING DB_FILE_ID_LEN
+ DB_FIRST DB_FIXEDLEN DB_FLUSH DB_FORCE DB_GETREC DB_GET_BOTH
+ DB_GET_BOTHC DB_GET_BOTH_RANGE DB_GET_RECNO DB_HANDLE_LOCK
+ DB_HASHMAGIC DB_HASHOLDVER DB_HASHVERSION DB_INCOMPLETE
+ DB_INIT_CDB DB_INIT_LOCK DB_INIT_LOG DB_INIT_MPOOL DB_INIT_TXN
+ DB_INVALID_EID DB_JAVA_CALLBACK DB_JOINENV DB_JOIN_ITEM
+ DB_JOIN_NOSORT DB_KEYEMPTY DB_KEYEXIST DB_KEYFIRST DB_KEYLAST
+ DB_LAST DB_LOCKDOWN DB_LOCKMAGIC DB_LOCKVERSION DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK DB_LOCK_DEFAULT DB_LOCK_EXPIRE
+ DB_LOCK_FREE_LOCKER DB_LOCK_MAXLOCKS DB_LOCK_MINLOCKS
+ DB_LOCK_MINWRITE DB_LOCK_NORUN DB_LOCK_NOTEXIST
+ DB_LOCK_NOTGRANTED DB_LOCK_NOTHELD DB_LOCK_NOWAIT DB_LOCK_OLDEST
+ DB_LOCK_RANDOM DB_LOCK_RECORD DB_LOCK_REMOVE DB_LOCK_RIW_N
+ DB_LOCK_RW_N DB_LOCK_SET_TIMEOUT DB_LOCK_SWITCH DB_LOCK_UPGRADE
+ DB_LOCK_YOUNGEST DB_LOGC_BUF_SIZE DB_LOGFILEID_INVALID
+ DB_LOGMAGIC DB_LOGOLDVER DB_LOGVERSION DB_LOG_DISK DB_LOG_LOCKED
+ DB_LOG_SILENT_ERR DB_MAX_PAGES DB_MAX_RECORDS DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE DB_MPOOL_DIRTY DB_MPOOL_DISCARD DB_MPOOL_EXTENT
+ DB_MPOOL_LAST DB_MPOOL_NEW DB_MPOOL_NEW_GROUP DB_MPOOL_PRIVATE
+ DB_MULTIPLE DB_MULTIPLE_KEY DB_MUTEXDEBUG DB_MUTEXLOCKS
+ DB_NEEDSPLIT DB_NEXT DB_NEXT_DUP DB_NEXT_NODUP DB_NOCOPY
+ DB_NODUPDATA DB_NOLOCKING DB_NOMMAP DB_NOORDERCHK DB_NOOVERWRITE
+ DB_NOPANIC DB_NORECURSE DB_NOSERVER DB_NOSERVER_HOME
+ DB_NOSERVER_ID DB_NOSYNC DB_NOTFOUND DB_ODDFILESIZE DB_OK_BTREE
+ DB_OK_HASH DB_OK_QUEUE DB_OK_RECNO DB_OLD_VERSION DB_OPEN_CALLED
+ DB_OPFLAGS_MASK DB_ORDERCHKONLY DB_OVERWRITE DB_PAD DB_PAGEYIELD
+ DB_PAGE_LOCK DB_PAGE_NOTFOUND DB_PANIC_ENVIRONMENT DB_PERMANENT
+ DB_POSITION DB_POSITIONI DB_PREV DB_PREV_NODUP DB_PRINTABLE
+ DB_PRIVATE DB_PR_HEADERS DB_PR_PAGE DB_PR_RECOVERYTEST
+ DB_QAMMAGIC DB_QAMOLDVER DB_QAMVERSION DB_RDONLY DB_RDWRMASTER
+ DB_RECNUM DB_RECORDCOUNT DB_RECORD_LOCK DB_RECOVER
+ DB_RECOVER_FATAL DB_REGION_ANON DB_REGION_INIT DB_REGION_MAGIC
+ DB_REGION_NAME DB_REGISTERED DB_RENAMEMAGIC DB_RENUMBER
+ DB_REP_CLIENT DB_REP_DUPMASTER DB_REP_HOLDELECTION
+ DB_REP_LOGSONLY DB_REP_MASTER DB_REP_NEWMASTER DB_REP_NEWSITE
+ DB_REP_OUTDATED DB_REP_PERMANENT DB_REP_UNAVAIL DB_REVSPLITOFF
+ DB_RMW DB_RPC_SERVERPROG DB_RPC_SERVERVERS DB_RUNRECOVERY
+ DB_SALVAGE DB_SECONDARY_BAD DB_SEQUENTIAL DB_SET
+ DB_SET_LOCK_TIMEOUT DB_SET_RANGE DB_SET_RECNO DB_SET_TXN_NOW
+ DB_SET_TXN_TIMEOUT DB_SNAPSHOT DB_STAT_CLEAR DB_SURPRISE_KID
+ DB_SWAPBYTES DB_SYSTEM_MEM DB_TEMPORARY DB_TEST_ELECTINIT
+ DB_TEST_ELECTSEND DB_TEST_ELECTVOTE1 DB_TEST_ELECTVOTE2
+ DB_TEST_ELECTWAIT1 DB_TEST_ELECTWAIT2 DB_TEST_POSTDESTROY
+ DB_TEST_POSTEXTDELETE DB_TEST_POSTEXTOPEN DB_TEST_POSTEXTUNLINK
+ DB_TEST_POSTLOG DB_TEST_POSTLOGMETA DB_TEST_POSTOPEN
+ DB_TEST_POSTRENAME DB_TEST_POSTSYNC DB_TEST_PREDESTROY
+ DB_TEST_PREEXTDELETE DB_TEST_PREEXTOPEN DB_TEST_PREEXTUNLINK
+ DB_TEST_PREOPEN DB_TEST_PRERENAME DB_TEST_SUBDB_LOCKS DB_THREAD
+ DB_TIMEOUT DB_TRUNCATE DB_TXNMAGIC DB_TXNVERSION DB_TXN_CKP
+ DB_TXN_LOCK DB_TXN_LOCK_2PL DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST DB_TXN_LOCK_OPTIMISTIC DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO DB_TXN_LOG_UNDO DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC DB_TXN_NOWAIT DB_TXN_REDO DB_TXN_SYNC DB_TXN_UNDO
+ DB_TXN_WRITE_NOSYNC DB_UNRESOLVED_CHILD DB_UPDATE_SECONDARY
+ DB_UPGRADE DB_USE_ENVIRON DB_USE_ENVIRON_ROOT DB_VERB_CHKPOINT
+ DB_VERB_DEADLOCK DB_VERB_RECOVERY DB_VERB_REPLICATION
+ DB_VERB_WAITSFOR DB_VERIFY DB_VERIFY_BAD DB_VERIFY_FATAL
+ DB_VERSION_MAJOR DB_VERSION_MINOR DB_VERSION_PATCH
+ DB_VRFY_FLAGMASK DB_WRITECURSOR DB_WRITELOCK DB_WRITEOPEN
+ DB_WRNOSYNC DB_XA_CREATE DB_XIDDATASIZE DB_YIELDCPU),
+ {name=>"DB_BTREE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_HASH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_DUMP", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_GET_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_INHERIT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 7) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 7 && \\\n DB_VERSION_PATCH >= 1)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_ALL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_OBJ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_LOCK_PUT_READ", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TIMEOUT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_LOCK_TRADE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_LOCK_UPGRADE_WRITE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_DEFAULT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_HIGH", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_PRIORITY_VERY_LOW", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_QUEUE", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 55)\n", "#endif\n"]},
+ {name=>"DB_RECNO", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_TXN_ABORT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_APPLY", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 7)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ALLOC", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_BACKWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_FORWARD_ROLL", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_GETPGNOS", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_TXN_OPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 12)\n", "#endif\n"]},
+ {name=>"DB_TXN_POPENFILES", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 3) || \\\n (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 3 && \\\n DB_VERSION_PATCH >= 4)\n", "#endif\n"]},
+ {name=>"DB_TXN_PRINT", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 4) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR > 1) || \\\n (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR == 1 && \\\n DB_VERSION_PATCH >= 17)\n", "#endif\n"]},
+ {name=>"DB_UNKNOWN", type=>"IV", macro=>["#if (DB_VERSION_MAJOR > 2) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR > 0) || \\\n (DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR == 0 && \\\n DB_VERSION_PATCH >= 0)\n", "#endif\n"]},
+ {name=>"DB_VERSION_STRING", type=>"PV"});
+
+print constant_types(); # macro defs
+foreach (C_constant ("BerkeleyDB", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("BerkeleyDB", $types);
+__END__
+ */
+
+ switch (len) {
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ return constant_12 (aTHX_ name, iv_return);
+ break;
+ case 13:
+ return constant_13 (aTHX_ name, iv_return);
+ break;
+ case 14:
+ return constant_14 (aTHX_ name, iv_return);
+ break;
+ case 15:
+ return constant_15 (aTHX_ name, iv_return);
+ break;
+ case 16:
+ return constant_16 (aTHX_ name, iv_return);
+ break;
+ case 17:
+ return constant_17 (aTHX_ name, iv_return, pv_return);
+ break;
+ case 18:
+ return constant_18 (aTHX_ name, iv_return);
+ break;
+ case 19:
+ return constant_19 (aTHX_ name, iv_return);
+ break;
+ case 20:
+ return constant_20 (aTHX_ name, iv_return);
+ break;
+ case 21:
+ return constant_21 (aTHX_ name, iv_return);
+ break;
+ case 22:
+ /* Names all of length 22. */
+ /* DB_ENV_RPCCLIENT_GIVEN DB_TXN_LOCK_OPTIMISTIC */
+ /* Offset 8 gives the best switch position. */
+ switch (name[8]) {
+ case 'O':
+ if (memEQ(name, "DB_TXN_LOCK_OPTIMISTIC", 22)) {
+ /* ^ */
+#ifdef DB_TXN_LOCK_OPTIMISTIC
+ *iv_return = DB_TXN_LOCK_OPTIMISTIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "DB_ENV_RPCCLIENT_GIVEN", 22)) {
+ /* ^ */
+#ifdef DB_ENV_RPCCLIENT_GIVEN
+ *iv_return = DB_ENV_RPCCLIENT_GIVEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ case 23:
+ if (memEQ(name, "DB_ENV_TXN_WRITE_NOSYNC", 23)) {
+#ifdef DB_ENV_TXN_WRITE_NOSYNC
+ *iv_return = DB_ENV_TXN_WRITE_NOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/bdb/perl/BerkeleyDB/constants.xs b/bdb/perl/BerkeleyDB/constants.xs
new file mode 100644
index 00000000000..1b2c8b2c3c8
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/constants.xs
@@ -0,0 +1,87 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ const char *pv;
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv, &pv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid BerkeleyDB macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined BerkeleyDB macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break;
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing BerkeleyDB macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/bdb/perl.BerkeleyDB/dbinfo b/bdb/perl/BerkeleyDB/dbinfo
index 415411aff8e..af2c45facf5 100755
--- a/bdb/perl.BerkeleyDB/dbinfo
+++ b/bdb/perl/BerkeleyDB/dbinfo
@@ -7,7 +7,7 @@
# Version: 1.03
# Date 17th September 2000
#
-# Copyright (c) 1998-2001 Paul Marquess. All rights reserved.
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
# This program is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself.
@@ -29,7 +29,8 @@ my %Data =
5 => "2.0.0 -> 2.3.0",
6 => "2.3.1 -> 2.7.7",
7 => "3.0.x",
- 8 => "3.1.x or greater",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
}
},
0x061561 => {
@@ -42,7 +43,8 @@ my %Data =
4 => "2.0.0 -> 2.1.0",
5 => "2.2.6 -> 2.7.7",
6 => "3.0.x",
- 7 => "3.1.x or greater",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
}
},
0x042253 => {
@@ -51,7 +53,8 @@ my %Data =
{
1 => "3.0.x",
2 => "3.1.x",
- 3 => "3.2.x or greater",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
}
},
) ;
diff --git a/bdb/perl/BerkeleyDB/hints/dec_osf.pl b/bdb/perl/BerkeleyDB/hints/dec_osf.pl
new file mode 100644
index 00000000000..6d7faeed2e2
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/hints/dec_osf.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lpthreads" ];
diff --git a/bdb/perl.BerkeleyDB/hints/irix_6_5.pl b/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
index b531673e6e0..b531673e6e0 100644
--- a/bdb/perl.BerkeleyDB/hints/irix_6_5.pl
+++ b/bdb/perl/BerkeleyDB/hints/irix_6_5.pl
diff --git a/bdb/perl.BerkeleyDB/hints/solaris.pl b/bdb/perl/BerkeleyDB/hints/solaris.pl
index ddd941d634a..ddd941d634a 100644
--- a/bdb/perl.BerkeleyDB/hints/solaris.pl
+++ b/bdb/perl/BerkeleyDB/hints/solaris.pl
diff --git a/bdb/perl/BerkeleyDB/mkconsts b/bdb/perl/BerkeleyDB/mkconsts
new file mode 100644
index 00000000000..7e0964333cc
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/mkconsts
@@ -0,0 +1,770 @@
+#!/usr/bin/perl
+
+use ExtUtils::Constant qw(WriteConstants);
+
+use constant DEFINE => 'define' ;
+use constant STRING => 'string' ;
+use constant IGNORE => 'ignore' ;
+
+%constants = (
+
+ #########
+ # 2.0.0
+ #########
+
+ DBM_INSERT => IGNORE,
+ DBM_REPLACE => IGNORE,
+ DBM_SUFFIX => IGNORE,
+ DB_AFTER => DEFINE,
+ DB_AM_DUP => IGNORE,
+ DB_AM_INMEM => IGNORE,
+ DB_AM_LOCKING => IGNORE,
+ DB_AM_LOGGING => IGNORE,
+ DB_AM_MLOCAL => IGNORE,
+ DB_AM_PGDEF => IGNORE,
+ DB_AM_RDONLY => IGNORE,
+ DB_AM_RECOVER => IGNORE,
+ DB_AM_SWAP => IGNORE,
+ DB_AM_TXN => IGNORE,
+ DB_APP_INIT => DEFINE,
+ DB_BEFORE => DEFINE,
+ DB_BTREEMAGIC => DEFINE,
+ DB_BTREEVERSION => DEFINE,
+ DB_BT_DELIMITER => IGNORE,
+ DB_BT_EOF => IGNORE,
+ DB_BT_FIXEDLEN => IGNORE,
+ DB_BT_PAD => IGNORE,
+ DB_BT_SNAPSHOT => IGNORE,
+ DB_CHECKPOINT => DEFINE,
+ DB_CREATE => DEFINE,
+ DB_CURRENT => DEFINE,
+ DB_DBT_INTERNAL => IGNORE,
+ DB_DBT_MALLOC => IGNORE,
+ DB_DBT_PARTIAL => IGNORE,
+ DB_DBT_USERMEM => IGNORE,
+ DB_DELETED => DEFINE,
+ DB_DELIMITER => DEFINE,
+ DB_DUP => DEFINE,
+ DB_EXCL => DEFINE,
+ DB_FIRST => DEFINE,
+ DB_FIXEDLEN => DEFINE,
+ DB_FLUSH => DEFINE,
+ DB_HASHMAGIC => DEFINE,
+ DB_HASHVERSION => DEFINE,
+ DB_HS_DIRTYMETA => IGNORE,
+ DB_INCOMPLETE => DEFINE,
+ DB_INIT_LOCK => DEFINE,
+ DB_INIT_LOG => DEFINE,
+ DB_INIT_MPOOL => DEFINE,
+ DB_INIT_TXN => DEFINE,
+ DB_KEYEXIST => DEFINE,
+ DB_KEYFIRST => DEFINE,
+ DB_KEYLAST => DEFINE,
+ DB_LAST => DEFINE,
+ DB_LOCKMAGIC => DEFINE,
+ DB_LOCKVERSION => DEFINE,
+ DB_LOCK_DEADLOCK => DEFINE,
+ DB_LOCK_NOTGRANTED => DEFINE,
+ DB_LOCK_NOTHELD => DEFINE,
+ DB_LOCK_NOWAIT => DEFINE,
+ DB_LOCK_RIW_N => DEFINE,
+ DB_LOCK_RW_N => DEFINE,
+ DB_LOGMAGIC => DEFINE,
+ DB_LOGVERSION => DEFINE,
+ DB_MAX_PAGES => DEFINE,
+ DB_MAX_RECORDS => DEFINE,
+ DB_MPOOL_CLEAN => DEFINE,
+ DB_MPOOL_CREATE => DEFINE,
+ DB_MPOOL_DIRTY => DEFINE,
+ DB_MPOOL_DISCARD => DEFINE,
+ DB_MPOOL_LAST => DEFINE,
+ DB_MPOOL_NEW => DEFINE,
+ DB_MPOOL_PRIVATE => DEFINE,
+ DB_MUTEXDEBUG => DEFINE,
+ DB_NEEDSPLIT => DEFINE,
+ DB_NEXT => DEFINE,
+ DB_NOOVERWRITE => DEFINE,
+ DB_NORECURSE => DEFINE,
+ DB_NOSYNC => DEFINE,
+ DB_NOTFOUND => DEFINE,
+ DB_PAD => DEFINE,
+ DB_PREV => DEFINE,
+ DB_RDONLY => DEFINE,
+ DB_REGISTERED => DEFINE,
+ DB_RE_MODIFIED => IGNORE,
+ DB_SET => DEFINE,
+ DB_SET_RANGE => DEFINE,
+ DB_SNAPSHOT => DEFINE,
+ DB_SWAPBYTES => DEFINE,
+ DB_TRUNCATE => DEFINE,
+ DB_TXNMAGIC => DEFINE,
+ DB_TXNVERSION => DEFINE,
+ DB_TXN_BACKWARD_ROLL => DEFINE,
+ DB_TXN_FORWARD_ROLL => DEFINE,
+ DB_TXN_LOCK_2PL => DEFINE,
+ DB_TXN_LOCK_MASK => DEFINE,
+ DB_TXN_LOCK_OPTIMISTIC => DEFINE,
+ DB_TXN_LOG_MASK => DEFINE,
+ DB_TXN_LOG_REDO => DEFINE,
+ DB_TXN_LOG_UNDO => DEFINE,
+ DB_TXN_LOG_UNDOREDO => DEFINE,
+ DB_TXN_OPENFILES => DEFINE,
+ DB_TXN_REDO => DEFINE,
+ DB_TXN_UNDO => DEFINE,
+ DB_USE_ENVIRON => DEFINE,
+ DB_USE_ENVIRON_ROOT => DEFINE,
+ DB_VERSION_MAJOR => DEFINE,
+ DB_VERSION_MINOR => DEFINE,
+ DB_VERSION_PATCH => DEFINE,
+ DB_VERSION_STRING => STRING,
+ _DB_H_ => IGNORE,
+ __BIT_TYPES_DEFINED__ => IGNORE,
+ const => IGNORE,
+
+ # enum DBTYPE
+ DB_BTREE => '2.0.0',
+ DB_HASH => '2.0.0',
+ DB_RECNO => '2.0.0',
+ DB_UNKNOWN => '2.0.0',
+
+ # enum db_lockop_t
+ DB_LOCK_DUMP => '2.0.0',
+ DB_LOCK_GET => '2.0.0',
+ DB_LOCK_PUT => '2.0.0',
+ DB_LOCK_PUT_ALL => '2.0.0',
+ DB_LOCK_PUT_OBJ => '2.0.0',
+
+ # enum db_lockmode_t
+ DB_LOCK_NG => IGNORE, # 2.0.0
+ DB_LOCK_READ => IGNORE, # 2.0.0
+ DB_LOCK_WRITE => IGNORE, # 2.0.0
+ DB_LOCK_IREAD => IGNORE, # 2.0.0
+ DB_LOCK_IWRITE => IGNORE, # 2.0.0
+ DB_LOCK_IWR => IGNORE, # 2.0.0
+
+ # enum ACTION
+ FIND => IGNORE, # 2.0.0
+ ENTER => IGNORE, # 2.0.0
+
+ #########
+ # 2.0.3
+ #########
+
+ DB_SEQUENTIAL => DEFINE,
+ DB_TEMPORARY => DEFINE,
+
+ #########
+ # 2.1.0
+ #########
+
+ DB_NOMMAP => DEFINE,
+
+ #########
+ # 2.2.6
+ #########
+
+ DB_AM_THREAD => IGNORE,
+ DB_ARCH_ABS => DEFINE,
+ DB_ARCH_DATA => DEFINE,
+ DB_ARCH_LOG => DEFINE,
+ DB_LOCK_CONFLICT => DEFINE,
+ DB_LOCK_DEFAULT => DEFINE,
+ DB_LOCK_NORUN => DEFINE,
+ DB_LOCK_OLDEST => DEFINE,
+ DB_LOCK_RANDOM => DEFINE,
+ DB_LOCK_YOUNGEST => DEFINE,
+ DB_RECOVER => DEFINE,
+ DB_RECOVER_FATAL => DEFINE,
+ DB_THREAD => DEFINE,
+ DB_TXN_NOSYNC => DEFINE,
+
+ #########
+ # 2.3.0
+ #########
+
+ DB_BTREEOLDVER => DEFINE,
+ DB_BT_RECNUM => IGNORE,
+ DB_FILE_ID_LEN => DEFINE,
+ DB_GETREC => DEFINE,
+ DB_HASHOLDVER => DEFINE,
+ DB_KEYEMPTY => DEFINE,
+ DB_LOGOLDVER => DEFINE,
+ DB_RECNUM => DEFINE,
+ DB_RECORDCOUNT => DEFINE,
+ DB_RENUMBER => DEFINE,
+ DB_RE_DELIMITER => IGNORE,
+ DB_RE_FIXEDLEN => IGNORE,
+ DB_RE_PAD => IGNORE,
+ DB_RE_RENUMBER => IGNORE,
+ DB_RE_SNAPSHOT => IGNORE,
+
+ #########
+ # 2.3.1
+ #########
+
+ DB_GET_RECNO => DEFINE,
+ DB_SET_RECNO => DEFINE,
+
+ #########
+ # 2.3.3
+ #########
+
+ DB_APPEND => DEFINE,
+
+ #########
+ # 2.3.6
+ #########
+
+ DB_TXN_CKP => DEFINE,
+
+ #########
+ # 2.3.11
+ #########
+
+ DB_ENV_APPINIT => DEFINE,
+ DB_ENV_STANDALONE => DEFINE,
+ DB_ENV_THREAD => DEFINE,
+
+ #########
+ # 2.3.12
+ #########
+
+ DB_FUNC_CALLOC => IGNORE,
+ DB_FUNC_CLOSE => IGNORE,
+ DB_FUNC_DIRFREE => IGNORE,
+ DB_FUNC_DIRLIST => IGNORE,
+ DB_FUNC_EXISTS => IGNORE,
+ DB_FUNC_FREE => IGNORE,
+ DB_FUNC_FSYNC => IGNORE,
+ DB_FUNC_IOINFO => IGNORE,
+ DB_FUNC_MALLOC => IGNORE,
+ DB_FUNC_MAP => IGNORE,
+ DB_FUNC_OPEN => IGNORE,
+ DB_FUNC_READ => IGNORE,
+ DB_FUNC_REALLOC => IGNORE,
+ DB_FUNC_SEEK => IGNORE,
+ DB_FUNC_SLEEP => IGNORE,
+ DB_FUNC_STRDUP => IGNORE,
+ DB_FUNC_UNLINK => IGNORE,
+ DB_FUNC_UNMAP => IGNORE,
+ DB_FUNC_WRITE => IGNORE,
+ DB_FUNC_YIELD => IGNORE,
+
+ #########
+ # 2.3.14
+ #########
+
+ DB_TSL_SPINS => IGNORE,
+
+ #########
+ # 2.3.16
+ #########
+
+ DB_DBM_HSEARCH => IGNORE,
+ firstkey => IGNORE,
+ hdestroy => IGNORE,
+
+ #########
+ # 2.4.10
+ #########
+
+ DB_CURLSN => DEFINE,
+ DB_FUNC_RUNLINK => IGNORE,
+ DB_REGION_ANON => DEFINE,
+ DB_REGION_INIT => DEFINE,
+ DB_REGION_NAME => DEFINE,
+ DB_TXN_LOCK_OPTIMIST => DEFINE,
+ __CURRENTLY_UNUSED => IGNORE,
+
+ # enum db_status_t
+ DB_LSTAT_ABORTED => IGNORE, # 2.4.10
+ DB_LSTAT_ERR => IGNORE, # 2.4.10
+ DB_LSTAT_FREE => IGNORE, # 2.4.10
+ DB_LSTAT_HELD => IGNORE, # 2.4.10
+ DB_LSTAT_NOGRANT => IGNORE, # 2.4.10
+ DB_LSTAT_PENDING => IGNORE, # 2.4.10
+ DB_LSTAT_WAITING => IGNORE, # 2.4.10
+
+ #########
+ # 2.4.14
+ #########
+
+ DB_MUTEXLOCKS => DEFINE,
+ DB_PAGEYIELD => DEFINE,
+ __UNUSED_100 => IGNORE,
+ __UNUSED_4000 => IGNORE,
+
+ #########
+ # 2.5.2
+ #########
+
+ DBC_CONTINUE => IGNORE,
+ DBC_KEYSET => IGNORE,
+ DBC_RECOVER => IGNORE,
+ DBC_RMW => IGNORE,
+ DB_DBM_ERROR => IGNORE,
+ DB_GET_BOTH => DEFINE,
+ DB_NEXT_DUP => DEFINE,
+ DB_OPFLAGS_MASK => DEFINE,
+ DB_RMW => DEFINE,
+ DB_RUNRECOVERY => DEFINE,
+ dbmclose => IGNORE,
+
+ #########
+ # 2.5.9
+ #########
+
+ DB_DUPSORT => DEFINE,
+ DB_JOIN_ITEM => DEFINE,
+
+ #########
+ # 2.6.4
+ #########
+
+ DBC_WRITER => IGNORE,
+ DB_AM_CDB => IGNORE,
+ DB_ENV_CDB => DEFINE,
+ DB_INIT_CDB => DEFINE,
+ DB_LOCK_UPGRADE => DEFINE,
+ DB_WRITELOCK => DEFINE,
+
+ #########
+ # 2.7.1
+ #########
+
+
+ # enum db_lockop_t
+ DB_LOCK_INHERIT => '2.7.1',
+
+ #########
+ # 2.7.7
+ #########
+
+ DB_FCNTL_LOCKING => DEFINE,
+
+ #########
+ # 3.0.55
+ #########
+
+ DBC_WRITECURSOR => IGNORE,
+ DB_AM_DISCARD => IGNORE,
+ DB_AM_SUBDB => IGNORE,
+ DB_BT_REVSPLIT => IGNORE,
+ DB_CONSUME => DEFINE,
+ DB_CXX_NO_EXCEPTIONS => DEFINE,
+ DB_DBT_REALLOC => IGNORE,
+ DB_DUPCURSOR => DEFINE,
+ DB_ENV_CREATE => DEFINE,
+ DB_ENV_DBLOCAL => DEFINE,
+ DB_ENV_LOCKDOWN => DEFINE,
+ DB_ENV_LOCKING => DEFINE,
+ DB_ENV_LOGGING => DEFINE,
+ DB_ENV_NOMMAP => DEFINE,
+ DB_ENV_OPEN_CALLED => DEFINE,
+ DB_ENV_PRIVATE => DEFINE,
+ DB_ENV_SYSTEM_MEM => DEFINE,
+ DB_ENV_TXN => DEFINE,
+ DB_ENV_TXN_NOSYNC => DEFINE,
+ DB_ENV_USER_ALLOC => DEFINE,
+ DB_FORCE => DEFINE,
+ DB_LOCKDOWN => DEFINE,
+ DB_LOCK_RECORD => DEFINE,
+ DB_LOGFILEID_INVALID => DEFINE,
+ DB_MPOOL_NEW_GROUP => DEFINE,
+ DB_NEXT_NODUP => DEFINE,
+ DB_OK_BTREE => DEFINE,
+ DB_OK_HASH => DEFINE,
+ DB_OK_QUEUE => DEFINE,
+ DB_OK_RECNO => DEFINE,
+ DB_OLD_VERSION => DEFINE,
+ DB_OPEN_CALLED => DEFINE,
+ DB_PAGE_LOCK => DEFINE,
+ DB_POSITION => DEFINE,
+ DB_POSITIONI => DEFINE,
+ DB_PRIVATE => DEFINE,
+ DB_QAMMAGIC => DEFINE,
+ DB_QAMOLDVER => DEFINE,
+ DB_QAMVERSION => DEFINE,
+ DB_RECORD_LOCK => DEFINE,
+ DB_REVSPLITOFF => DEFINE,
+ DB_SYSTEM_MEM => DEFINE,
+ DB_TEST_POSTLOG => DEFINE,
+ DB_TEST_POSTLOGMETA => DEFINE,
+ DB_TEST_POSTOPEN => DEFINE,
+ DB_TEST_POSTRENAME => DEFINE,
+ DB_TEST_POSTSYNC => DEFINE,
+ DB_TEST_PREOPEN => DEFINE,
+ DB_TEST_PRERENAME => DEFINE,
+ DB_TXN_NOWAIT => DEFINE,
+ DB_TXN_SYNC => DEFINE,
+ DB_UPGRADE => DEFINE,
+ DB_VERB_CHKPOINT => DEFINE,
+ DB_VERB_DEADLOCK => DEFINE,
+ DB_VERB_RECOVERY => DEFINE,
+ DB_VERB_WAITSFOR => DEFINE,
+ DB_WRITECURSOR => DEFINE,
+ DB_XA_CREATE => DEFINE,
+
+ # enum DBTYPE
+ DB_QUEUE => '3.0.55',
+
+ #########
+ # 3.1.12
+ #########
+
+ DBC_ACTIVE => IGNORE,
+ DBC_OPD => IGNORE,
+ DBC_TRANSIENT => IGNORE,
+ DBC_WRITEDUP => IGNORE,
+ DB_AGGRESSIVE => DEFINE,
+ DB_AM_DUPSORT => IGNORE,
+ DB_CACHED_COUNTS => DEFINE,
+ DB_CLIENT => DEFINE,
+ DB_DBT_DUPOK => IGNORE,
+ DB_DBT_ISSET => IGNORE,
+ DB_ENV_RPCCLIENT => DEFINE,
+ DB_GET_BOTHC => DEFINE,
+ DB_JOIN_NOSORT => DEFINE,
+ DB_NODUPDATA => DEFINE,
+ DB_NOORDERCHK => DEFINE,
+ DB_NOSERVER => DEFINE,
+ DB_NOSERVER_HOME => DEFINE,
+ DB_NOSERVER_ID => DEFINE,
+ DB_ODDFILESIZE => DEFINE,
+ DB_ORDERCHKONLY => DEFINE,
+ DB_PREV_NODUP => DEFINE,
+ DB_PR_HEADERS => DEFINE,
+ DB_PR_PAGE => DEFINE,
+ DB_PR_RECOVERYTEST => DEFINE,
+ DB_RDWRMASTER => DEFINE,
+ DB_SALVAGE => DEFINE,
+ DB_VERIFY_BAD => DEFINE,
+ DB_VERIFY_FATAL => DEFINE,
+ DB_VRFY_FLAGMASK => DEFINE,
+
+ # enum db_recops
+ DB_TXN_ABORT => '3.1.12',
+ DB_TXN_BACKWARD_ROLL => '3.1.12',
+ DB_TXN_FORWARD_ROLL => '3.1.12',
+ DB_TXN_OPENFILES => '3.1.12',
+
+ #########
+ # 3.2.3
+ #########
+
+ DBC_COMPENSATE => IGNORE,
+ DB_AM_VERIFYING => IGNORE,
+ DB_CDB_ALLDB => DEFINE,
+ DB_ENV_CDB_ALLDB => DEFINE,
+ DB_EXTENT => DEFINE,
+ DB_JOINENV => DEFINE,
+ DB_LOCK_SWITCH => DEFINE,
+ DB_MPOOL_EXTENT => DEFINE,
+ DB_REGION_MAGIC => DEFINE,
+ DB_UNRESOLVED_CHILD => DEFINE,
+ DB_VERIFY => DEFINE,
+
+ # enum db_notices
+ DB_NOTICE_LOGFILE_CHANGED => IGNORE, # 3.2.3
+
+ #########
+ # 3.2.6
+ #########
+
+ DB_ALREADY_ABORTED => DEFINE,
+ DB_CONSUME_WAIT => DEFINE,
+ DB_JAVA_CALLBACK => DEFINE,
+ DB_TEST_POSTEXTDELETE => DEFINE,
+ DB_TEST_POSTEXTOPEN => DEFINE,
+ DB_TEST_POSTEXTUNLINK => DEFINE,
+ DB_TEST_PREEXTDELETE => DEFINE,
+ DB_TEST_PREEXTOPEN => DEFINE,
+ DB_TEST_PREEXTUNLINK => DEFINE,
+
+ # enum db_lockmode_t
+ DB_LOCK_WAIT => IGNORE, # 3.2.6
+
+ #########
+ # 3.3.4
+ #########
+
+ DBC_DIRTY_READ => IGNORE,
+ DBC_MULTIPLE => IGNORE,
+ DBC_MULTIPLE_KEY => IGNORE,
+ DB_AM_DIRTY => IGNORE,
+ DB_AM_SECONDARY => IGNORE,
+ DB_COMMIT => DEFINE,
+ DB_DBT_APPMALLOC => IGNORE,
+ DB_DIRTY_READ => DEFINE,
+ DB_DONOTINDEX => DEFINE,
+ DB_ENV_PANIC_OK => DEFINE,
+ DB_ENV_RPCCLIENT_GIVEN => DEFINE,
+ DB_FAST_STAT => DEFINE,
+ DB_LOCK_MAXLOCKS => DEFINE,
+ DB_LOCK_MINLOCKS => DEFINE,
+ DB_LOCK_MINWRITE => DEFINE,
+ DB_MULTIPLE => DEFINE,
+ DB_MULTIPLE_KEY => DEFINE,
+ DB_PAGE_NOTFOUND => DEFINE,
+ DB_RPC_SERVERPROG => DEFINE,
+ DB_RPC_SERVERVERS => DEFINE,
+ DB_UPDATE_SECONDARY => DEFINE,
+ DB_XIDDATASIZE => DEFINE,
+
+ # enum db_recops
+ DB_TXN_POPENFILES => '3.3.4',
+
+ # enum db_lockop_t
+ DB_LOCK_UPGRADE_WRITE => '3.3.4',
+
+ # enum db_lockmode_t
+ DB_LOCK_DIRTY => IGNORE, # 3.3.4
+ DB_LOCK_WWRITE => IGNORE, # 3.3.4
+
+ #########
+ # 3.3.11
+ #########
+
+ DB_SECONDARY_BAD => DEFINE,
+ DB_SURPRISE_KID => DEFINE,
+ DB_TEST_POSTDESTROY => DEFINE,
+ DB_TEST_PREDESTROY => DEFINE,
+
+ #########
+ # 4.0.7
+ #########
+
+ DB_APPLY_LOGREG => DEFINE,
+ DB_BROADCAST_EID => DEFINE,
+ DB_CL_WRITER => DEFINE,
+ DB_ENV_NOLOCKING => DEFINE,
+ DB_ENV_NOPANIC => DEFINE,
+ DB_ENV_REGION_INIT => DEFINE,
+ DB_ENV_REP_CLIENT => DEFINE,
+ DB_ENV_REP_LOGSONLY => DEFINE,
+ DB_ENV_REP_MASTER => DEFINE,
+ DB_ENV_YIELDCPU => DEFINE,
+ DB_GET_BOTH_RANGE => DEFINE,
+ DB_INVALID_EID => DEFINE,
+ DB_LOCK_EXPIRE => DEFINE,
+ DB_LOCK_FREE_LOCKER => DEFINE,
+ DB_LOCK_SET_TIMEOUT => DEFINE,
+ DB_LOGC_BUF_SIZE => DEFINE,
+ DB_LOG_DISK => DEFINE,
+ DB_LOG_LOCKED => DEFINE,
+ DB_LOG_SILENT_ERR => DEFINE,
+ DB_NOLOCKING => DEFINE,
+ DB_NOPANIC => DEFINE,
+ DB_PANIC_ENVIRONMENT => DEFINE,
+ DB_REP_CLIENT => DEFINE,
+ DB_REP_DUPMASTER => DEFINE,
+ DB_REP_HOLDELECTION => DEFINE,
+ DB_REP_LOGSONLY => DEFINE,
+ DB_REP_MASTER => DEFINE,
+ DB_REP_NEWMASTER => DEFINE,
+ DB_REP_NEWSITE => DEFINE,
+ DB_REP_OUTDATED => DEFINE,
+ DB_REP_PERMANENT => DEFINE,
+ DB_REP_UNAVAIL => DEFINE,
+ DB_SET_LOCK_TIMEOUT => DEFINE,
+ DB_SET_TXN_NOW => DEFINE,
+ DB_SET_TXN_TIMEOUT => DEFINE,
+ DB_STAT_CLEAR => DEFINE,
+ DB_TIMEOUT => DEFINE,
+ DB_YIELDCPU => DEFINE,
+ MP_FLUSH => IGNORE,
+ MP_OPEN_CALLED => IGNORE,
+ MP_READONLY => IGNORE,
+ MP_UPGRADE => IGNORE,
+ MP_UPGRADE_FAIL => IGNORE,
+ TXN_CHILDCOMMIT => IGNORE,
+ TXN_COMPENSATE => IGNORE,
+ TXN_DIRTY_READ => IGNORE,
+ TXN_LOCKTIMEOUT => IGNORE,
+ TXN_MALLOC => IGNORE,
+ TXN_NOSYNC => IGNORE,
+ TXN_NOWAIT => IGNORE,
+ TXN_SYNC => IGNORE,
+
+ # enum db_recops
+ DB_TXN_APPLY => '4.0.7',
+
+ # enum db_lockop_t
+ DB_LOCK_GET_TIMEOUT => '4.0.7',
+ DB_LOCK_PUT_READ => '4.0.7',
+ DB_LOCK_TIMEOUT => '4.0.7',
+
+ # enum db_status_t
+ DB_LSTAT_EXPIRED => IGNORE, # 4.0.7
+
+ #########
+ # 4.0.14
+ #########
+
+ DB_EID_BROADCAST => DEFINE,
+ DB_EID_INVALID => DEFINE,
+ DB_VERB_REPLICATION => DEFINE,
+
+ #########
+ # 4.1.17
+ #########
+
+ DBC_OWN_LID => IGNORE,
+ DB_AM_CHKSUM => IGNORE,
+ DB_AM_CL_WRITER => IGNORE,
+ DB_AM_COMPENSATE => IGNORE,
+ DB_AM_CREATED => IGNORE,
+ DB_AM_CREATED_MSTR => IGNORE,
+ DB_AM_DBM_ERROR => IGNORE,
+ DB_AM_DELIMITER => IGNORE,
+ DB_AM_ENCRYPT => IGNORE,
+ DB_AM_FIXEDLEN => IGNORE,
+ DB_AM_IN_RENAME => IGNORE,
+ DB_AM_OPEN_CALLED => IGNORE,
+ DB_AM_PAD => IGNORE,
+ DB_AM_RECNUM => IGNORE,
+ DB_AM_RENUMBER => IGNORE,
+ DB_AM_REVSPLITOFF => IGNORE,
+ DB_AM_SNAPSHOT => IGNORE,
+ DB_AUTO_COMMIT => DEFINE,
+ DB_CHKSUM_SHA1 => DEFINE,
+ DB_DIRECT => DEFINE,
+ DB_DIRECT_DB => DEFINE,
+ DB_DIRECT_LOG => DEFINE,
+ DB_ENCRYPT => DEFINE,
+ DB_ENCRYPT_AES => DEFINE,
+ DB_ENV_AUTO_COMMIT => DEFINE,
+ DB_ENV_DIRECT_DB => DEFINE,
+ DB_ENV_DIRECT_LOG => DEFINE,
+ DB_ENV_FATAL => DEFINE,
+ DB_ENV_OVERWRITE => DEFINE,
+ DB_ENV_TXN_WRITE_NOSYNC => DEFINE,
+ DB_HANDLE_LOCK => DEFINE,
+ DB_LOCK_NOTEXIST => DEFINE,
+ DB_LOCK_REMOVE => DEFINE,
+ DB_NOCOPY => DEFINE,
+ DB_OVERWRITE => DEFINE,
+ DB_PERMANENT => DEFINE,
+ DB_PRINTABLE => DEFINE,
+ DB_RENAMEMAGIC => DEFINE,
+ DB_TEST_ELECTINIT => DEFINE,
+ DB_TEST_ELECTSEND => DEFINE,
+ DB_TEST_ELECTVOTE1 => DEFINE,
+ DB_TEST_ELECTVOTE2 => DEFINE,
+ DB_TEST_ELECTWAIT1 => DEFINE,
+ DB_TEST_ELECTWAIT2 => DEFINE,
+ DB_TEST_SUBDB_LOCKS => DEFINE,
+ DB_TXN_LOCK => DEFINE,
+ DB_TXN_WRITE_NOSYNC => DEFINE,
+ DB_WRITEOPEN => DEFINE,
+ DB_WRNOSYNC => DEFINE,
+ _DB_EXT_PROT_IN_ => IGNORE,
+
+ # enum db_lockop_t
+ DB_LOCK_TRADE => '4.1.17',
+
+ # enum db_status_t
+ DB_LSTAT_NOTEXIST => IGNORE, # 4.1.17
+
+ # enum DB_CACHE_PRIORITY
+ DB_PRIORITY_VERY_LOW => '4.1.17',
+ DB_PRIORITY_LOW => '4.1.17',
+ DB_PRIORITY_DEFAULT => '4.1.17',
+ DB_PRIORITY_HIGH => '4.1.17',
+ DB_PRIORITY_VERY_HIGH => '4.1.17',
+
+ # enum db_recops
+ DB_TXN_BACKWARD_ALLOC => '4.1.17',
+ DB_TXN_GETPGNOS => '4.1.17',
+ DB_TXN_PRINT => '4.1.17',
+
+ ) ;
+
+sub enum_Macro
+{
+ my $str = shift ;
+ my ($major, $minor, $patch) = split /\./, $str ;
+
+ my $macro =
+ "#if (DB_VERSION_MAJOR > $major) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR > $minor) || \\\n" .
+ " (DB_VERSION_MAJOR == $major && DB_VERSION_MINOR == $minor && \\\n" .
+ " DB_VERSION_PATCH >= $patch)\n" ;
+
+ return $macro;
+
+}
+
+sub OutputXS
+{
+
+ my @names = () ;
+
+ foreach my $key (sort keys %constants)
+ {
+ my $val = $constants{$key} ;
+ next if $val eq IGNORE;
+
+ if ($val eq STRING)
+ { push @names, { name => $key, type => "PV" } }
+ elsif ($val eq DEFINE)
+ { push @names, $key }
+ else
+ { push @names, { name => $key, macro => [enum_Macro($val), "#endif\n"] } }
+ }
+
+ warn "Updating constants.xs & constants.h...\n";
+ WriteConstants(
+ NAME => BerkeleyDB,
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+ ) ;
+}
+
+sub OutputPM
+{
+ my $filename = 'BerkeleyDB.pm';
+ warn "Updating $filename...\n";
+ open IN, "<$filename" || die "Cannot open $filename: $!\n";
+ open OUT, ">$filename.tmp" || die "Cannot open $filename.tmp: $!\n";
+
+ my $START = '@EXPORT = qw(' ;
+ my $START_re = quotemeta $START ;
+ my $END = ');';
+ my $END_re = quotemeta $END ;
+
+ # skip to the @EXPORT declaration
+ OUTER: while (<IN>)
+ {
+ if ( /^\s*$START_re/ )
+ {
+ # skip to the end marker.
+ while (<IN>)
+ { last OUTER if /^\s*$END_re/ }
+ }
+ print OUT ;
+ }
+
+ print OUT "$START\n";
+ foreach my $key (sort keys %constants)
+ {
+ next if $constants{$key} eq IGNORE;
+ print OUT "\t$key\n";
+ }
+ print OUT "\t$END\n";
+
+ while (<IN>)
+ {
+ print OUT ;
+ }
+
+ close IN;
+ close OUT;
+
+ rename $filename, "$filename.bak" || die "Cannot rename $filename: $!\n" ;
+ rename "$filename.tmp", $filename || die "Cannot rename $filename.tmp: $!\n" ;
+}
+
+OutputXS() ;
+OutputPM() ;
diff --git a/bdb/perl.BerkeleyDB/mkpod b/bdb/perl/BerkeleyDB/mkpod
index 44bbf3fbf4f..44bbf3fbf4f 100755
--- a/bdb/perl.BerkeleyDB/mkpod
+++ b/bdb/perl/BerkeleyDB/mkpod
diff --git a/bdb/perl.BerkeleyDB/patches/5.004 b/bdb/perl/BerkeleyDB/patches/5.004
index 143ec95afbc..143ec95afbc 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004
+++ b/bdb/perl/BerkeleyDB/patches/5.004
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_01 b/bdb/perl/BerkeleyDB/patches/5.004_01
index 1b05eb4e02b..1b05eb4e02b 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004_01
+++ b/bdb/perl/BerkeleyDB/patches/5.004_01
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_02 b/bdb/perl/BerkeleyDB/patches/5.004_02
index 238f8737941..238f8737941 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004_02
+++ b/bdb/perl/BerkeleyDB/patches/5.004_02
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_03 b/bdb/perl/BerkeleyDB/patches/5.004_03
index 06331eac922..06331eac922 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004_03
+++ b/bdb/perl/BerkeleyDB/patches/5.004_03
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_04 b/bdb/perl/BerkeleyDB/patches/5.004_04
index a227dc700d9..a227dc700d9 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004_04
+++ b/bdb/perl/BerkeleyDB/patches/5.004_04
diff --git a/bdb/perl.BerkeleyDB/patches/5.004_05 b/bdb/perl/BerkeleyDB/patches/5.004_05
index 51c8bf35009..51c8bf35009 100644
--- a/bdb/perl.BerkeleyDB/patches/5.004_05
+++ b/bdb/perl/BerkeleyDB/patches/5.004_05
diff --git a/bdb/perl.BerkeleyDB/patches/5.005 b/bdb/perl/BerkeleyDB/patches/5.005
index effee3e8275..effee3e8275 100644
--- a/bdb/perl.BerkeleyDB/patches/5.005
+++ b/bdb/perl/BerkeleyDB/patches/5.005
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_01 b/bdb/perl/BerkeleyDB/patches/5.005_01
index 2a05dd545f6..2a05dd545f6 100644
--- a/bdb/perl.BerkeleyDB/patches/5.005_01
+++ b/bdb/perl/BerkeleyDB/patches/5.005_01
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_02 b/bdb/perl/BerkeleyDB/patches/5.005_02
index 5dd57ddc03f..5dd57ddc03f 100644
--- a/bdb/perl.BerkeleyDB/patches/5.005_02
+++ b/bdb/perl/BerkeleyDB/patches/5.005_02
diff --git a/bdb/perl.BerkeleyDB/patches/5.005_03 b/bdb/perl/BerkeleyDB/patches/5.005_03
index 115f9f5b909..115f9f5b909 100644
--- a/bdb/perl.BerkeleyDB/patches/5.005_03
+++ b/bdb/perl/BerkeleyDB/patches/5.005_03
diff --git a/bdb/perl.BerkeleyDB/patches/5.6.0 b/bdb/perl/BerkeleyDB/patches/5.6.0
index 1f9b3b620de..1f9b3b620de 100644
--- a/bdb/perl.BerkeleyDB/patches/5.6.0
+++ b/bdb/perl/BerkeleyDB/patches/5.6.0
diff --git a/bdb/perl/BerkeleyDB/ppport.h b/bdb/perl/BerkeleyDB/ppport.h
new file mode 100644
index 00000000000..0887c2159a9
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/bdb/perl/BerkeleyDB/scan b/bdb/perl/BerkeleyDB/scan
new file mode 100644
index 00000000000..eb064950b2e
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/scan
@@ -0,0 +1,229 @@
+#!/usr/local/bin/perl
+
+my $ignore_re = '^(' . join("|",
+ qw(
+ _
+ [a-z]
+ DBM
+ DBC
+ DB_AM_
+ DB_BT_
+ DB_RE_
+ DB_HS_
+ DB_FUNC_
+ DB_DBT_
+ DB_DBM
+ DB_TSL
+ MP
+ TXN
+ )) . ')' ;
+
+my %ignore_def = map {$_, 1} qw() ;
+
+%ignore_enums = map {$_, 1} qw( ACTION db_status_t db_notices db_lockmode_t ) ;
+
+my $filler = ' ' x 26 ;
+
+chdir "libraries" || die "Cannot chdir into './libraries': $!\n";
+
+foreach my $name (sort tuple glob "[2-9]*")
+{
+ my $inc = "$name/include/db.h" ;
+ next unless -f $inc ;
+
+ my $file = readFile($inc) ;
+ StripCommentsAndStrings($file) ;
+ my $result = scan($name, $file) ;
+ print "\n\t#########\n\t# $name\n\t#########\n\n$result"
+ if $result;
+}
+exit ;
+
+
+sub scan
+{
+ my $version = shift ;
+ my $file = shift ;
+
+ my %seen_define = () ;
+ my $result = "" ;
+
+ if (1) {
+ # Preprocess all tri-graphs
+ # including things stuck in quoted string constants.
+ $file =~ s/\?\?=/#/g; # | ??=| #|
+ $file =~ s/\?\?\!/|/g; # | ??!| ||
+ $file =~ s/\?\?'/^/g; # | ??'| ^|
+ $file =~ s/\?\?\(/[/g; # | ??(| [|
+ $file =~ s/\?\?\)/]/g; # | ??)| ]|
+ $file =~ s/\?\?\-/~/g; # | ??-| ~|
+ $file =~ s/\?\?\//\\/g; # | ??/| \|
+ $file =~ s/\?\?</{/g; # | ??<| {|
+ $file =~ s/\?\?>/}/g; # | ??>| }|
+ }
+
+ while ( $file =~ /^\s*#\s*define\s+([\$\w]+)\b(?!\()\s*(.*)/gm )
+ {
+ my $def = $1;
+ my $rest = $2;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_def{$def} || $def =~ /$ignore_re/o ;
+
+ # Cannot do: (-1) and ((LHANDLE)3) are OK:
+ #print("Skip non-wordy $def => $rest\n"),
+
+ $rest =~ s/\s*$//;
+ #next if $rest =~ /[^\w\$]/;
+
+ #print "Matched $_ ($def)\n" ;
+
+ next if $before{$def} ++ ;
+
+ if ($ignore)
+ { $seen_define{$def} = 'IGNORE' }
+ elsif ($rest =~ /"/)
+ { $seen_define{$def} = 'STRING' }
+ else
+ { $seen_define{$def} = 'DEFINE' }
+ }
+
+ foreach $define (sort keys %seen_define)
+ {
+ my $out = $filler ;
+ substr($out,0, length $define) = $define;
+ $result .= "\t$out => $seen_define{$define},\n" ;
+ }
+
+ while ($file =~ /\btypedef\s+enum\s*{(.*?)}\s*(\w+)/gs )
+ {
+ my $enum = $1 ;
+ my $name = $2 ;
+ my $ignore = 0 ;
+
+ $ignore = 1 if $ignore_enums{$name} ;
+
+ #$enum =~ s/\s*=\s*\S+\s*(,?)\s*\n/$1/g;
+ $enum =~ s/^\s*//;
+ $enum =~ s/\s*$//;
+
+ my @tokens = map { s/\s*=.*// ; $_} split /\s*,\s*/, $enum ;
+ my @new = grep { ! $Enums{$_}++ } @tokens ;
+ if (@new)
+ {
+ my $value ;
+ if ($ignore)
+ { $value = "IGNORE, # $version" }
+ else
+ { $value = "'$version'," }
+
+ $result .= "\n\t# enum $name\n";
+ my $out = $filler ;
+ foreach $name (@new)
+ {
+ $out = $filler ;
+ substr($out,0, length $name) = $name;
+ $result .= "\t$out => $value\n" ;
+ }
+ }
+ }
+
+ return $result ;
+}
+
+
+sub StripCommentsAndStrings
+{
+
+ # Strip C & C++ coments
+ # From the perlfaq
+ $_[0] =~
+
+ s{
+ /\* ## Start of /* ... */ comment
+ [^*]*\*+ ## Non-* followed by 1-or-more *'s
+ (
+ [^/*][^*]*\*+
+ )* ## 0-or-more things which don't start with /
+ ## but do end with '*'
+ / ## End of /* ... */ comment
+
+ | ## OR C++ Comment
+ // ## Start of C++ comment //
+ [^\n]* ## followed by 0-or-more non end of line characters
+
+ | ## OR various things which aren't comments:
+
+ (
+ " ## Start of " ... " string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^"\\] ## Non "\
+ )*
+ " ## End of " ... " string
+
+ | ## OR
+
+ ' ## Start of ' ... ' string
+ (
+ \\. ## Escaped char
+ | ## OR
+ [^'\\] ## Non '\
+ )*
+ ' ## End of ' ... ' string
+
+ | ## OR
+
+ . ## Anything other char
+ [^/"'\\]* ## Chars which doesn't start a comment, string or escape
+ )
+ }{$2}gxs;
+
+
+
+ # Remove double-quoted strings.
+ #$_[0] =~ s#"(\\.|[^"\\])*"##g;
+
+ # Remove single-quoted strings.
+ #$_[0] =~ s#'(\\.|[^'\\])*'##g;
+
+ # Remove leading whitespace.
+ $_[0] =~ s/\A\s+//m ;
+
+ # Remove trailing whitespace.
+ $_[0] =~ s/\s+\Z//m ;
+
+ # Replace all multiple whitespace by a single space.
+ #$_[0] =~ s/\s+/ /g ;
+}
+
+
+sub readFile
+{
+ my $filename = shift ;
+ open F, "<$filename" || die "Cannot open $filename: $!\n" ;
+ local $/ ;
+ my $x = <F> ;
+ close F ;
+ return $x ;
+}
+
+sub tuple
+{
+ my (@a) = split(/\./, $a) ;
+ my (@b) = split(/\./, $b) ;
+ if (@a != @b) {
+ my $diff = @a - @b ;
+ push @b, (0 x $diff) if $diff > 0 ;
+ push @a, (0 x -$diff) if $diff < 0 ;
+ }
+ foreach $A (@a) {
+ $B = shift @b ;
+ $A == $B or return $A <=> $B ;
+ }
+ return 0;
+}
+
+__END__
+
diff --git a/bdb/perl.BerkeleyDB/t/btree.t b/bdb/perl/BerkeleyDB/t/btree.t
index 97bb3257c97..fd6ed8f1268 100644
--- a/bdb/perl.BerkeleyDB/t/btree.t
+++ b/bdb/perl/BerkeleyDB/t/btree.t
@@ -11,58 +11,10 @@ BEGIN {
}
}
-#use Config;
-#
-#BEGIN {
-# if(-d "lib" && -f "TEST") {
-# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
-# print "1..74\n";
-# exit 0;
-# }
-# }
-#}
-
use BerkeleyDB;
-use File::Path qw(rmtree);
-
-print "1..243\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
+use t::util ;
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
+print "1..244\n";
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
@@ -149,7 +101,7 @@ umask(0) ;
my $lex = new LexFile $Dfile ;
my $home = "./fred" ;
- ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 27, my $lexD = new LexDir($home) ;
ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
-Home => $home ;
@@ -164,7 +116,6 @@ umask(0) ;
ok 32, $value eq "some value" ;
undef $db ;
undef $env ;
- rmtree $home ;
}
@@ -679,8 +630,7 @@ umask(0) ;
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 177, mkdir($home, 0777) ;
+ ok 177, my $lexD = new LexDir($home) ;
ok 178, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -690,6 +640,9 @@ umask(0) ;
-Env => $env,
-Txn => $txn ;
+ ok 181, (my $Z = $txn->txn_commit()) == 0 ;
+ ok 182, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
# create some data
my %data = (
@@ -702,39 +655,38 @@ umask(0) ;
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 181, $ret == 0 ;
+ ok 183, $ret == 0 ;
# should be able to see all the records
- ok 182, my $cursor = $db1->db_cursor() ;
+ ok 184, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 183, $count == 3 ;
+ ok 185, $count == 3 ;
undef $cursor ;
# now abort the transaction
#ok 151, $txn->txn_abort() == 0 ;
- ok 184, (my $Z = $txn->txn_abort()) == 0 ;
+ ok 186, ($Z = $txn->txn_abort()) == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 185, $cursor = $db1->db_cursor() ;
+ ok 187, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 186, $count == 0 ;
+ ok 188, $count == 0 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
{
@@ -742,7 +694,7 @@ umask(0) ;
my $lex = new LexFile $Dfile ;
my %hash ;
- ok 187, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ ok 189, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
-Property => DB_DUP,
-Flags => DB_CREATE ;
@@ -753,24 +705,25 @@ umask(0) ;
$hash{'Wall'} = 'Brick' ;
$hash{'mouse'} = 'mickey' ;
- ok 188, keys %hash == 6 ;
+ ok 190, keys %hash == 6 ;
# create a cursor
- ok 189, my $cursor = $db->db_cursor() ;
+ ok 191, my $cursor = $db->db_cursor() ;
my $key = "Wall" ;
my $value ;
- ok 190, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 191, $key eq "Wall" && $value eq "Larry" ;
- ok 192, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 193, $key eq "Wall" && $value eq "Stone" ;
+ ok 192, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 193, $key eq "Wall" && $value eq "Larry" ;
ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 195, $key eq "Wall" && $value eq "Brick" ;
+ ok 195, $key eq "Wall" && $value eq "Stone" ;
ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
ok 197, $key eq "Wall" && $value eq "Brick" ;
+ ok 198, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 199, $key eq "Wall" && $value eq "Brick" ;
- my $ref = $db->db_stat() ;
- ok 198, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+ #my $ref = $db->db_stat() ;
+ #ok 200, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+#print "bt_flags " . $ref->{bt_flags} . " DB_DUP " . DB_DUP ."\n";
undef $db ;
undef $cursor ;
@@ -785,16 +738,16 @@ umask(0) ;
my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
my %hash ;
my ($k, $v) ;
- ok 199, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ ok 200, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
-Flags => DB_CREATE,
-Minkey =>3 ,
-Pagesize => 2 **12
;
my $ref = $db->db_stat() ;
- ok 200, $ref->{$recs} == 0;
- ok 201, $ref->{'bt_minkey'} == 3;
- ok 202, $ref->{'bt_pagesize'} == 2 ** 12;
+ ok 201, $ref->{$recs} == 0;
+ ok 202, $ref->{'bt_minkey'} == 3;
+ ok 203, $ref->{'bt_pagesize'} == 2 ** 12;
# create some data
my %data = (
@@ -807,10 +760,10 @@ umask(0) ;
while (($k, $v) = each %data) {
$ret += $db->db_put($k, $v) ;
}
- ok 203, $ret == 0 ;
+ ok 204, $ret == 0 ;
$ref = $db->db_stat() ;
- ok 204, $ref->{$recs} == 3;
+ ok 205, $ref->{$recs} == 3;
}
{
@@ -861,7 +814,7 @@ EOM
BEGIN { push @INC, '.'; }
eval 'use SubDB ; ';
- main::ok 205, $@ eq "" ;
+ main::ok 206, $@ eq "" ;
my %h ;
my $X ;
eval '
@@ -870,25 +823,27 @@ EOM
-Mode => 0640 );
' ;
- main::ok 206, $@ eq "" ;
+ main::ok 207, $@ eq "" && $X ;
my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
- main::ok 207, $@ eq "" ;
- main::ok 208, $ret == 7 ;
+ main::ok 208, $@ eq "" ;
+ main::ok 209, $ret == 7 ;
my $value = 0;
$ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
- main::ok 209, $@ eq "" ;
- main::ok 210, $ret == 10 ;
+ main::ok 210, $@ eq "" ;
+ main::ok 211, $ret == 10 ;
$ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
- main::ok 211, $@ eq "" ;
- main::ok 212, $ret == 1 ;
+ main::ok 212, $@ eq "" ;
+ main::ok 213, $ret == 1 ;
$ret = eval '$X->A_new_method("joe") ' ;
- main::ok 213, $@ eq "" ;
- main::ok 214, $ret eq "[[10]]" ;
+ main::ok 214, $@ eq "" ;
+ main::ok 215, $ret eq "[[10]]" ;
+ undef $X;
+ untie %h;
unlink "SubDB.pm", "dbbtree.tmp" ;
}
@@ -899,7 +854,7 @@ EOM
my $lex = new LexFile $Dfile ;
my %hash ;
my ($k, $v) = ("", "");
- ok 215, my $db = new BerkeleyDB::Btree
+ ok 216, my $db = new BerkeleyDB::Btree
-Filename => $Dfile,
-Flags => DB_CREATE,
-Property => DB_RECNUM ;
@@ -920,57 +875,57 @@ EOM
$ret += $db->db_put($_, $ix) ;
++ $ix ;
}
- ok 216, $ret == 0 ;
+ ok 217, $ret == 0 ;
# db_get & DB_SET_RECNO
$k = 1 ;
- ok 217, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 218, $k eq "B one" && $v == 1 ;
+ ok 218, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 219, $k eq "B one" && $v == 1 ;
$k = 3 ;
- ok 219, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 220, $k eq "D three" && $v == 3 ;
+ ok 220, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 221, $k eq "D three" && $v == 3 ;
$k = 4 ;
- ok 221, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 222, $k eq "E four" && $v == 4 ;
+ ok 222, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 223, $k eq "E four" && $v == 4 ;
$k = 0 ;
- ok 223, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 224, $k eq "A zero" && $v == 0 ;
+ ok 224, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 225, $k eq "A zero" && $v == 0 ;
# cursor & DB_SET_RECNO
# create the cursor
- ok 225, my $cursor = $db->db_cursor() ;
+ ok 226, my $cursor = $db->db_cursor() ;
$k = 2 ;
- ok 226, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 227, $k eq "C two" && $v == 2 ;
+ ok 227, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 228, $k eq "C two" && $v == 2 ;
$k = 0 ;
- ok 228, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
- ok 229, $k eq "A zero" && $v == 0 ;
+ ok 229, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
+ ok 230, $k eq "A zero" && $v == 0 ;
$k = 3 ;
- ok 230, $db->db_get($k, $v, DB_SET_RECNO) == 0;
- ok 231, $k eq "D three" && $v == 3 ;
+ ok 231, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 232, $k eq "D three" && $v == 3 ;
# cursor & DB_GET_RECNO
- ok 232, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
- ok 233, $k eq "A zero" && $v == 0 ;
- ok 234, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
- ok 235, $v == 0 ;
-
- ok 236, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
- ok 237, $k eq "B one" && $v == 1 ;
- ok 238, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
- ok 239, $v == 1 ;
-
- ok 240, $cursor->c_get($k, $v, DB_LAST) == 0 ;
- ok 241, $k eq "E four" && $v == 4 ;
- ok 242, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
- ok 243, $v == 4 ;
+ ok 233, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 234, $k eq "A zero" && $v == 0 ;
+ ok 235, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 236, $v == 0 ;
+
+ ok 237, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 238, $k eq "B one" && $v == 1 ;
+ ok 239, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 240, $v == 1 ;
+
+ ok 241, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 242, $k eq "E four" && $v == 4 ;
+ ok 243, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 244, $v == 4 ;
}
diff --git a/bdb/perl.BerkeleyDB/t/destroy.t b/bdb/perl/BerkeleyDB/t/destroy.t
index e3a1e2a97c6..7457d36c583 100644
--- a/bdb/perl.BerkeleyDB/t/destroy.t
+++ b/bdb/perl/BerkeleyDB/t/destroy.t
@@ -10,47 +10,9 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
-
-print "1..13\n";
-
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
+use t::util ;
+print "1..15\n";
my $Dfile = "dbhash.tmp";
my $home = "./fred" ;
@@ -58,14 +20,13 @@ my $home = "./fred" ;
umask(0);
{
- # let object destroction kill everything
+ # let object destruction kill everything
my $lex = new LexFile $Dfile ;
my %hash ;
my $value ;
- rmtree $home if -e $home ;
- ok 1, mkdir($home, 0777) ;
+ ok 1, my $lexD = new LexDir($home) ;
ok 2, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -75,6 +36,9 @@ umask(0);
-Env => $env,
-Txn => $txn ;
+ ok 5, $txn->txn_commit() == 0 ;
+ ok 6, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
# create some data
my %data = (
@@ -87,31 +51,31 @@ umask(0);
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 5, $ret == 0 ;
+ ok 7, $ret == 0 ;
# should be able to see all the records
- ok 6, my $cursor = $db1->db_cursor() ;
+ ok 8, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 7, $count == 3 ;
+ ok 9, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 8, $txn->txn_abort() == 0 ;
+ ok 10, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 9, $cursor = $db1->db_cursor() ;
+ ok 11, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 10, $count == 0 ;
+ ok 12, $count == 0 ;
#undef $txn ;
#undef $cursor ;
@@ -120,22 +84,22 @@ umask(0);
#untie %hash ;
}
+
{
my $lex = new LexFile $Dfile ;
my %hash ;
my $cursor ;
my ($k, $v) = ("", "") ;
- ok 11, my $db1 = tie %hash, 'BerkeleyDB::Hash',
+ ok 13, my $db1 = tie %hash, 'BerkeleyDB::Hash',
-Filename => $Dfile,
-Flags => DB_CREATE ;
my $count = 0 ;
# sequence forwards
- ok 12, $cursor = $db1->db_cursor() ;
+ ok 14, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 13, $count == 0 ;
+ ok 15, $count == 0 ;
}
-rmtree $home ;
diff --git a/bdb/perl.BerkeleyDB/t/env.t b/bdb/perl/BerkeleyDB/t/env.t
index 5d0197f85c0..3905abfae43 100644
--- a/bdb/perl.BerkeleyDB/t/env.t
+++ b/bdb/perl/BerkeleyDB/t/env.t
@@ -12,47 +12,9 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
-
-print "1..52\n";
-
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
+use t::util ;
+print "1..47\n";
my $Dfile = "dbhash.tmp";
@@ -87,24 +49,21 @@ umask(0);
{
# create a very simple environment
my $home = "./fred" ;
- ok 11, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
- mkdir "./fred", 0777 ;
+ ok 11, my $lexD = new LexDir($home) ;
chdir "./fred" ;
ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
chdir ".." ;
undef $env ;
- rmtree $home ;
}
{
# create an environment with a Home
my $home = "./fred" ;
- ok 13, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 13, my $lexD = new LexDir($home) ;
ok 14, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE ;
undef $env ;
- rmtree $home ;
}
{
@@ -115,7 +74,7 @@ umask(0);
my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_INIT_LOCK ;
ok 16, ! $env ;
- ok 17, $! != 0 ;
+ ok 17, $! != 0 || $^E != 0 ;
rmtree $home ;
}
@@ -128,7 +87,7 @@ umask(0);
my $data_dir = "$home/data_dir" ;
my $log_dir = "$home/log_dir" ;
my $data_file = "data.db" ;
- ok 18, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 18, my $lexD = new LexDir($home) ;
ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
my $env = new BerkeleyDB::Env -Home => $home,
@@ -156,14 +115,13 @@ umask(0);
undef $txn ;
undef $env ;
- rmtree $home ;
}
{
# -ErrFile with a filename
my $errfile = "./errfile" ;
my $home = "./fred" ;
- ok 24, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 24, my $lexD = new LexDir($home) ;
my $lex = new LexFile $errfile ;
ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
-Flags => DB_CREATE,
@@ -180,72 +138,53 @@ umask(0);
ok 29, $BerkeleyDB::Error eq $contents ;
undef $env ;
- rmtree $home ;
}
{
- # -ErrFile with a filehandle
- use IO ;
+ # -ErrFile with a filehandle/reference -- should fail
my $home = "./fred" ;
- ok 30, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
- my $errfile = "./errfile" ;
- my $lex = new LexFile $errfile ;
- ok 31, my $ef = new IO::File ">$errfile" ;
- ok 32, my $env = new BerkeleyDB::Env( -ErrFile => $ef ,
+ ok 30, my $lexD = new LexDir($home) ;
+ eval { my $env = new BerkeleyDB::Env( -ErrFile => [],
-Flags => DB_CREATE,
- -Home => $home) ;
- my $db = new BerkeleyDB::Hash -Filename => $Dfile,
- -Env => $env,
- -Flags => -1;
- ok 33, !$db ;
-
- ok 34, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
- $ef->close() ;
- ok 35, -e $errfile ;
- my $contents = "" ;
- $contents = docat($errfile) ;
- chomp $contents ;
- ok 36, $BerkeleyDB::Error eq $contents ;
- undef $env ;
- rmtree $home ;
+ -Home => $home) ; };
+ ok 31, $@ =~ /ErrFile parameter must be a file name/;
}
{
# -ErrPrefix
use IO ;
my $home = "./fred" ;
- ok 37, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 32, my $lexD = new LexDir($home) ;
my $errfile = "./errfile" ;
my $lex = new LexFile $errfile ;
- ok 38, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ ok 33, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
-ErrPrefix => "PREFIX",
-Flags => DB_CREATE,
-Home => $home) ;
my $db = new BerkeleyDB::Hash -Filename => $Dfile,
-Env => $env,
-Flags => -1;
- ok 39, !$db ;
+ ok 34, !$db ;
- ok 40, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
- ok 41, -e $errfile ;
+ ok 35, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
+ ok 36, -e $errfile ;
my $contents = docat($errfile) ;
chomp $contents ;
- ok 42, $BerkeleyDB::Error eq $contents ;
+ ok 37, $BerkeleyDB::Error eq $contents ;
# change the prefix on the fly
my $old = $env->errPrefix("NEW ONE") ;
- ok 43, $old eq "PREFIX" ;
+ ok 38, $old eq "PREFIX" ;
$db = new BerkeleyDB::Hash -Filename => $Dfile,
-Env => $env,
-Flags => -1;
- ok 44, !$db ;
- ok 45, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
+ ok 39, !$db ;
+ ok 40, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
$contents = docat($errfile) ;
chomp $contents ;
- ok 46, $contents =~ /$BerkeleyDB::Error$/ ;
+ ok 41, $contents =~ /$BerkeleyDB::Error$/ ;
undef $env ;
- rmtree $home ;
}
{
@@ -256,22 +195,21 @@ umask(0);
my $data_dir = "$home/data_dir" ;
my $log_dir = "$home/log_dir" ;
my $data_file = "data.db" ;
- ok 47, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
- ok 48, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
- ok 49, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ ok 42, my $lexD = new LexDir($home);
+ ok 43, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 44, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
my $env = new BerkeleyDB::Env -Home => $home,
-Config => { DB_DATA_DIR => $data_dir,
DB_LOG_DIR => $log_dir
},
-Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
DB_INIT_MPOOL|DB_INIT_LOCK ;
- ok 50, $env ;
+ ok 45, $env ;
- ok 51, my $txn_mgr = $env->TxnMgr() ;
+ ok 46, my $txn_mgr = $env->TxnMgr() ;
- ok 52, $env->db_appexit() == 0 ;
+ ok 47, $env->db_appexit() == 0 ;
- #rmtree $home ;
}
# test -Verbose
diff --git a/bdb/perl.BerkeleyDB/t/examples.t b/bdb/perl/BerkeleyDB/t/examples.t
index 4b6702d540a..69b7f8ff8c5 100644
--- a/bdb/perl.BerkeleyDB/t/examples.t
+++ b/bdb/perl/BerkeleyDB/t/examples.t
@@ -10,91 +10,10 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util;
print "1..7\n";
-my $FA = 0 ;
-
-{
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- $FA = 0 ;
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
-}
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-{
- package Redirect ;
- use Symbol ;
-
- sub new
- {
- my $class = shift ;
- my $filename = shift ;
- my $fh = gensym ;
- open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
- my $real_stdout = select($fh) ;
- return bless [$fh, $real_stdout ] ;
-
- }
- sub DESTROY
- {
- my $self = shift ;
- close $self->[0] ;
- select($self->[1]) ;
- }
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT> || "" ;
- close(CAT);
- return $result;
-}
-
-sub docat_del
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file: $!";
- my $result = <CAT> || "" ;
- close(CAT);
- unlink $file ;
- return $result;
-}
-
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
diff --git a/bdb/perl.BerkeleyDB/t/examples.t.T b/bdb/perl/BerkeleyDB/t/examples.t.T
index fe0922318ca..fe9bdf76b06 100644
--- a/bdb/perl.BerkeleyDB/t/examples.t.T
+++ b/bdb/perl/BerkeleyDB/t/examples.t.T
@@ -10,91 +10,10 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util;
print "1..7\n";
-my $FA = 0 ;
-
-{
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- $FA = 0 ;
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
-}
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-{
- package Redirect ;
- use Symbol ;
-
- sub new
- {
- my $class = shift ;
- my $filename = shift ;
- my $fh = gensym ;
- open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
- my $real_stdout = select($fh) ;
- return bless [$fh, $real_stdout ] ;
-
- }
- sub DESTROY
- {
- my $self = shift ;
- close $self->[0] ;
- select($self->[1]) ;
- }
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT> || "" ;
- close(CAT);
- return $result;
-}
-
-sub docat_del
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file: $!";
- my $result = <CAT> || "" ;
- close(CAT);
- unlink $file ;
- return $result;
-}
-
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
diff --git a/bdb/perl.BerkeleyDB/t/examples3.t b/bdb/perl/BerkeleyDB/t/examples3.t
index 9cc1fa72c29..22e94b770e1 100644
--- a/bdb/perl.BerkeleyDB/t/examples3.t
+++ b/bdb/perl/BerkeleyDB/t/examples3.t
@@ -10,7 +10,7 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util;
BEGIN
{
@@ -23,87 +23,6 @@ BEGIN
print "1..2\n";
-my $FA = 0 ;
-
-{
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- $FA = 0 ;
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
-}
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-{
- package Redirect ;
- use Symbol ;
-
- sub new
- {
- my $class = shift ;
- my $filename = shift ;
- my $fh = gensym ;
- open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
- my $real_stdout = select($fh) ;
- return bless [$fh, $real_stdout ] ;
-
- }
- sub DESTROY
- {
- my $self = shift ;
- close $self->[0] ;
- select($self->[1]) ;
- }
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT> || "" ;
- close(CAT);
- return $result;
-}
-
-sub docat_del
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file: $!";
- my $result = <CAT> || "" ;
- close(CAT);
- unlink $file ;
- return $result;
-}
-
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
diff --git a/bdb/perl.BerkeleyDB/t/examples3.t.T b/bdb/perl/BerkeleyDB/t/examples3.t.T
index 573c04903e3..5eeaa14d00c 100644
--- a/bdb/perl.BerkeleyDB/t/examples3.t.T
+++ b/bdb/perl/BerkeleyDB/t/examples3.t.T
@@ -10,7 +10,7 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util;
BEGIN
{
@@ -23,87 +23,6 @@ BEGIN
print "1..2\n";
-my $FA = 0 ;
-
-{
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- $FA = 0 ;
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
-}
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-{
- package Redirect ;
- use Symbol ;
-
- sub new
- {
- my $class = shift ;
- my $filename = shift ;
- my $fh = gensym ;
- open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
- my $real_stdout = select($fh) ;
- return bless [$fh, $real_stdout ] ;
-
- }
- sub DESTROY
- {
- my $self = shift ;
- close $self->[0] ;
- select($self->[1]) ;
- }
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT> || "" ;
- close(CAT);
- return $result;
-}
-
-sub docat_del
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file: $!";
- my $result = <CAT> || "" ;
- close(CAT);
- unlink $file ;
- return $result;
-}
-
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
diff --git a/bdb/perl.BerkeleyDB/t/filter.t b/bdb/perl/BerkeleyDB/t/filter.t
index 8bcdc7f3f90..47a7c107acf 100644
--- a/bdb/perl.BerkeleyDB/t/filter.t
+++ b/bdb/perl/BerkeleyDB/t/filter.t
@@ -12,37 +12,10 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
print "1..46\n";
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
my $Dfile = "dbhash.tmp";
unlink $Dfile;
diff --git a/bdb/perl.BerkeleyDB/t/hash.t b/bdb/perl/BerkeleyDB/t/hash.t
index 1a42c60acb2..0e683851c3d 100644
--- a/bdb/perl.BerkeleyDB/t/hash.t
+++ b/bdb/perl/BerkeleyDB/t/hash.t
@@ -11,59 +11,10 @@ BEGIN {
}
}
-#use Config;
-#
-#BEGIN {
-# if(-d "lib" && -f "TEST") {
-# if ($Config{'extensions'} !~ /\bBerkeleyDB\b/ ) {
-# print "1..74\n";
-# exit 0;
-# }
-# }
-#}
-
use BerkeleyDB;
-use File::Path qw(rmtree);
-
-print "1..210\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
+use t::util ;
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
+print "1..212\n";
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
@@ -150,7 +101,7 @@ umask(0) ;
my $lex = new LexFile $Dfile ;
my $home = "./fred" ;
- ok 28, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 28, my $lexD = new LexDir($home);
ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,
-Home => $home ;
@@ -165,7 +116,6 @@ umask(0) ;
ok 33, $value eq "some value" ;
undef $db ;
undef $env ;
- rmtree $home ;
}
{
@@ -486,8 +436,7 @@ umask(0) ;
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 146, mkdir($home, 0777) ;
+ ok 146, my $lexD = new LexDir($home);
ok 147, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -498,6 +447,9 @@ umask(0) ;
-Txn => $txn ;
+ ok 150, $txn->txn_commit() == 0 ;
+ ok 151, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
# create some data
my %data = (
"red" => "boat",
@@ -509,38 +461,37 @@ umask(0) ;
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 150, $ret == 0 ;
+ ok 152, $ret == 0 ;
# should be able to see all the records
- ok 151, my $cursor = $db1->db_cursor() ;
+ ok 153, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 152, $count == 3 ;
+ ok 154, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 153, $txn->txn_abort() == 0 ;
+ ok 155, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 154, $cursor = $db1->db_cursor() ;
+ ok 156, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 155, $count == 0 ;
+ ok 157, $count == 0 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
@@ -549,7 +500,7 @@ umask(0) ;
my $lex = new LexFile $Dfile ;
my %hash ;
- ok 156, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ ok 158, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-Property => DB_DUP,
-Flags => DB_CREATE ;
@@ -560,36 +511,36 @@ umask(0) ;
$hash{'Wall'} = 'Brick' ;
$hash{'mouse'} = 'mickey' ;
- ok 157, keys %hash == 6 ;
+ ok 159, keys %hash == 6 ;
# create a cursor
- ok 158, my $cursor = $db->db_cursor() ;
+ ok 160, my $cursor = $db->db_cursor() ;
my $key = "Wall" ;
my $value ;
- ok 159, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 160, $key eq "Wall" && $value eq "Larry" ;
- ok 161, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 162, $key eq "Wall" && $value eq "Stone" ;
+ ok 161, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 162, $key eq "Wall" && $value eq "Larry" ;
ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 164, $key eq "Wall" && $value eq "Brick" ;
+ ok 164, $key eq "Wall" && $value eq "Stone" ;
ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
ok 166, $key eq "Wall" && $value eq "Brick" ;
+ ok 167, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 168, $key eq "Wall" && $value eq "Brick" ;
#my $ref = $db->db_stat() ;
#ok 143, $ref->{bt_flags} | DB_DUP ;
# test DB_DUP_NEXT
my ($k, $v) = ("Wall", "") ;
- ok 167, $cursor->c_get($k, $v, DB_SET) == 0 ;
- ok 168, $k eq "Wall" && $v eq "Larry" ;
- ok 169, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
- ok 170, $k eq "Wall" && $v eq "Stone" ;
+ ok 169, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 170, $k eq "Wall" && $v eq "Larry" ;
ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
- ok 172, $k eq "Wall" && $v eq "Brick" ;
+ ok 172, $k eq "Wall" && $v eq "Stone" ;
ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
ok 174, $k eq "Wall" && $v eq "Brick" ;
- ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+ ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 176, $k eq "Wall" && $v eq "Brick" ;
+ ok 177, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
undef $db ;
@@ -606,12 +557,12 @@ umask(0) ;
my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
- ok 176, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
+ ok 178, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
-DupCompare => sub { $_[0] cmp $_[1] },
-Property => DB_DUP|DB_DUPSORT,
-Flags => DB_CREATE ;
- ok 177, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
+ ok 179, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
-DupCompare => sub { $_[0] <=> $_[1] },
-Property => DB_DUP|DB_DUPSORT,
-Flags => DB_CREATE ;
@@ -623,23 +574,23 @@ umask(0) ;
$g{$_} = $value ;
}
- ok 178, my $cursor = (tied %h)->db_cursor() ;
+ ok 180, my $cursor = (tied %h)->db_cursor() ;
$key = 9 ; $value = "";
- ok 179, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 180, $key == 9 && $value eq 11 ;
- ok 181, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 182, $key == 9 && $value == 2 ;
+ ok 181, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 182, $key == 9 && $value eq 11 ;
ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 184, $key == 9 && $value eq "x" ;
+ ok 184, $key == 9 && $value == 2 ;
+ ok 185, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 186, $key == 9 && $value eq "x" ;
$cursor = (tied %g)->db_cursor() ;
$key = 9 ;
- ok 185, $cursor->c_get($key, $value, DB_SET) == 0 ;
- ok 186, $key == 9 && $value eq "x" ;
- ok 187, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 188, $key == 9 && $value == 2 ;
+ ok 187, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 188, $key == 9 && $value eq "x" ;
ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
- ok 190, $key == 9 && $value == 11 ;
+ ok 190, $key == 9 && $value == 2 ;
+ ok 191, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 192, $key == 9 && $value == 11 ;
}
@@ -649,7 +600,7 @@ umask(0) ;
my $lex = new LexFile $Dfile;
my %hh ;
- ok 191, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
+ ok 193, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
-DupCompare => sub { $_[0] cmp $_[1] },
-Property => DB_DUP,
-Flags => DB_CREATE ;
@@ -661,34 +612,34 @@ umask(0) ;
$hh{'mouse'} = 'mickey' ;
# first work in scalar context
- ok 192, scalar $YY->get_dup('Unknown') == 0 ;
- ok 193, scalar $YY->get_dup('Smith') == 1 ;
- ok 194, scalar $YY->get_dup('Wall') == 3 ;
+ ok 194, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 195, scalar $YY->get_dup('Smith') == 1 ;
+ ok 196, scalar $YY->get_dup('Wall') == 3 ;
# now in list context
my @unknown = $YY->get_dup('Unknown') ;
- ok 195, "@unknown" eq "" ;
+ ok 197, "@unknown" eq "" ;
my @smith = $YY->get_dup('Smith') ;
- ok 196, "@smith" eq "John" ;
+ ok 198, "@smith" eq "John" ;
{
my @wall = $YY->get_dup('Wall') ;
my %wall ;
@wall{@wall} = @wall ;
- ok 197, (@wall == 3 && $wall{'Larry'}
+ ok 199, (@wall == 3 && $wall{'Larry'}
&& $wall{'Stone'} && $wall{'Brick'});
}
# hash
my %unknown = $YY->get_dup('Unknown', 1) ;
- ok 198, keys %unknown == 0 ;
+ ok 200, keys %unknown == 0 ;
my %smith = $YY->get_dup('Smith', 1) ;
- ok 199, keys %smith == 1 && $smith{'John'} ;
+ ok 201, keys %smith == 1 && $smith{'John'} ;
my %wall = $YY->get_dup('Wall', 1) ;
- ok 200, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ ok 202, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
&& $wall{'Brick'} == 1 ;
undef $YY ;
@@ -744,7 +695,7 @@ EOM
BEGIN { push @INC, '.'; }
eval 'use SubDB ; ';
- main::ok 201, $@ eq "" ;
+ main::ok 203, $@ eq "" ;
my %h ;
my $X ;
eval '
@@ -753,24 +704,24 @@ EOM
-Mode => 0640 );
' ;
- main::ok 202, $@ eq "" ;
+ main::ok 204, $@ eq "" ;
my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
- main::ok 203, $@ eq "" ;
- main::ok 204, $ret == 7 ;
+ main::ok 205, $@ eq "" ;
+ main::ok 206, $ret == 7 ;
my $value = 0;
$ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
- main::ok 205, $@ eq "" ;
- main::ok 206, $ret == 10 ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 10 ;
$ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
- main::ok 207, $@ eq "" ;
- main::ok 208, $ret == 1 ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret == 1 ;
$ret = eval '$X->A_new_method("joe") ' ;
- main::ok 209, $@ eq "" ;
- main::ok 210, $ret eq "[[10]]" ;
+ main::ok 211, $@ eq "" ;
+ main::ok 212, $ret eq "[[10]]" ;
unlink "SubDB.pm", "dbhash.tmp" ;
diff --git a/bdb/perl.BerkeleyDB/t/join.t b/bdb/perl/BerkeleyDB/t/join.t
index f986d76f734..ed9b6a269cb 100644
--- a/bdb/perl.BerkeleyDB/t/join.t
+++ b/bdb/perl/BerkeleyDB/t/join.t
@@ -12,7 +12,7 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
if ($BerkeleyDB::db_ver < 2.005002)
{
@@ -23,33 +23,6 @@ if ($BerkeleyDB::db_ver < 2.005002)
print "1..37\n";
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
my $Dfile1 = "dbhash1.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
@@ -57,22 +30,6 @@ unlink $Dfile1, $Dfile2, $Dfile3 ;
umask(0) ;
-sub addData
-{
- my $db = shift ;
- my @data = @_ ;
- die "addData odd data\n" unless @data /2 != 0 ;
- my ($k, $v) ;
- my $ret = 0 ;
- while (@data) {
- $k = shift @data ;
- $v = shift @data ;
- $ret += $db->db_put($k, $v) ;
- }
-
- return ($ret == 0) ;
-}
-
{
# error cases
my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
@@ -115,8 +72,7 @@ sub addData
my $status ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 6, mkdir($home, 0777) ;
+ ok 6, my $lexD = new LexDir($home);
ok 7, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN
|DB_INIT_MPOOL;
@@ -265,6 +221,5 @@ sub addData
untie %hash1 ;
untie %hash2 ;
untie %hash3 ;
- rmtree $home ;
}
-
+print "# at the end\n";
diff --git a/bdb/perl/BerkeleyDB/t/mldbm.t b/bdb/perl/BerkeleyDB/t/mldbm.t
new file mode 100644
index 00000000000..d35f7e15895
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/t/mldbm.t
@@ -0,0 +1,161 @@
+#!/usr/bin/perl -w
+
+use strict ;
+
+BEGIN
+{
+ if ($] < 5.005) {
+ print "1..0 # This is Perl $], skipping test\n" ;
+ exit 0 ;
+ }
+
+ eval { require Data::Dumper ; };
+ if ($@) {
+ print "1..0 # Data::Dumper is not installed on this system.\n";
+ exit 0 ;
+ }
+ if ($Data::Dumper::VERSION < 2.08) {
+ print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
+ exit 0 ;
+ }
+ eval { require MLDBM ; };
+ if ($@) {
+ print "1..0 # MLDBM is not installed on this system.\n";
+ exit 0 ;
+ }
+}
+
+use t::util ;
+
+print "1..12\n";
+
+{
+ package BTREE ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ $MLDBM::UseDB = "BerkeleyDB::Btree" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 1, $db ;
+ ::ok 2, $db->type() == DB_BTREE ;
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 3, $first eq $second ;
+ ::ok 4, $o{d} eq "{once upon a time}" ;
+ ::ok 5, $o{e} == 1024 ;
+ ::ok 6, $o{f} eq 1024.1024 ;
+
+}
+
+{
+
+ package HASH ;
+
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Hash) ;
+ use Data::Dumper;
+
+ my $filename = "";
+ my $lex = new LexFile $filename;
+
+ unlink $filename ;
+ $MLDBM::UseDB = "BerkeleyDB::Hash" ;
+ my %o ;
+ my $db = tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+ ::ok 7, $db ;
+ ::ok 8, $db->type() == DB_HASH ;
+
+
+ my $c = [\'c'];
+ my $b = {};
+ my $a = [1, $b, $c];
+ $b->{a} = $a;
+ $b->{b} = $a->[1];
+ $b->{c} = $a->[2];
+ @o{qw(a b c)} = ($a, $b, $c);
+ $o{d} = "{once upon a time}";
+ $o{e} = 1024;
+ $o{f} = 1024.1024;
+ my $first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+ my $second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+ ::ok 9, $first eq $second ;
+ ::ok 10, $o{d} eq "{once upon a time}" ;
+ ::ok 11, $o{e} == 1024 ;
+ ::ok 12, $o{f} eq 1024.1024 ;
+
+}
diff --git a/bdb/perl.BerkeleyDB/t/queue.t b/bdb/perl/BerkeleyDB/t/queue.t
index 0f459a43a69..86add129ca4 100644
--- a/bdb/perl.BerkeleyDB/t/queue.t
+++ b/bdb/perl/BerkeleyDB/t/queue.t
@@ -12,104 +12,17 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
BEGIN
{
- if ($BerkeleyDB::db_version < 3) {
- print "1..0 # Skipping test, Queue needs Berkeley DB 3.x or better\n" ;
+ if ($BerkeleyDB::db_version < 3.3) {
+ print "1..0 # Skipping test, Queue needs Berkeley DB 3.3.x or better\n" ;
exit 0 ;
}
}
-print "1..197\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION'=> "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY'=> "DB_RUNRECOVERY: Fatal error, run database recovery",
- ) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
-
-sub touch
-{
- my $file = shift ;
- open(CAT,">$file") || die "Cannot open $file:$!";
- close(CAT);
-}
-
-sub joiner
-{
- my $db = shift ;
- my $sep = shift ;
- my ($k, $v) = (0, "") ;
- my @data = () ;
-
- my $cursor = $db->db_cursor() or return () ;
- for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
- $status == 0 ;
- $status = $cursor->c_get($k, $v, DB_NEXT)) {
- push @data, $v ;
- }
-
- (scalar(@data), join($sep, @data)) ;
-}
-
-sub countRecords
-{
- my $db = shift ;
- my ($k, $v) = (0,0) ;
- my ($count) = 0 ;
- my ($cursor) = $db->db_cursor() ;
- #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
-# $status == 0 ;
-# $status = $cursor->c_get($k, $v, DB_NEXT) )
- while ($cursor->c_get($k, $v, DB_NEXT) == 0)
- { ++ $count }
-
- return $count ;
-}
+print "1..201\n";
sub fillout
{
@@ -207,11 +120,10 @@ umask(0) ;
my $home = "./fred" ;
my $rec_len = 11 ;
- ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 27, my $lexD = new LexDir($home);
ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
-Home => $home ;
-
ok 29, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
-Env => $env,
-Flags => DB_CREATE,
@@ -224,7 +136,6 @@ umask(0) ;
ok 32, $value eq fillout("some value", $rec_len) ;
undef $db ;
undef $env ;
- rmtree $home ;
}
@@ -299,17 +210,6 @@ umask(0) ;
{
# Tied Array interface
- # full tied array support started in Perl 5.004_57
- # just double check.
- my $FA = 0 ;
- {
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
- }
-
my $lex = new LexFile $Dfile ;
my @array ;
my $db ;
@@ -618,8 +518,7 @@ umask(0) ;
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 169, mkdir($home, 0777) ;
+ ok 169, my $lexD = new LexDir($home);
my $rec_len = 9 ;
ok 170, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
@@ -635,6 +534,10 @@ umask(0) ;
-Pad => " " ;
+ ok 173, $txn->txn_commit() == 0 ;
+ ok 174, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
# create some data
my @data = (
"boat",
@@ -647,38 +550,37 @@ umask(0) ;
for ($i = 0 ; $i < @data ; ++$i) {
$ret += $db1->db_put($i, $data[$i]) ;
}
- ok 173, $ret == 0 ;
+ ok 175, $ret == 0 ;
# should be able to see all the records
- ok 174, my $cursor = $db1->db_cursor() ;
+ ok 176, my $cursor = $db1->db_cursor() ;
my ($k, $v) = (0, "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 175, $count == 3 ;
+ ok 177, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 176, $txn->txn_abort() == 0 ;
+ ok 178, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 177, $cursor = $db1->db_cursor() ;
+ ok 179, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 178, $count == 0 ;
+ ok 180, $count == 0 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie @array ;
- rmtree $home ;
}
@@ -690,7 +592,7 @@ umask(0) ;
my @array ;
my ($k, $v) ;
my $rec_len = 7 ;
- ok 179, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ ok 181, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
-Flags => DB_CREATE,
-Pagesize => 4 * 1024,
-Len => $rec_len,
@@ -698,8 +600,8 @@ umask(0) ;
;
my $ref = $db->db_stat() ;
- ok 180, $ref->{$recs} == 0;
- ok 181, $ref->{'qs_pagesize'} == 4 * 1024;
+ ok 182, $ref->{$recs} == 0;
+ ok 183, $ref->{'qs_pagesize'} == 4 * 1024;
# create some data
my @data = (
@@ -713,10 +615,10 @@ umask(0) ;
for ($i = $db->ArrayOffset ; @data ; ++$i) {
$ret += $db->db_put($i, shift @data) ;
}
- ok 182, $ret == 0 ;
+ ok 184, $ret == 0 ;
$ref = $db->db_stat() ;
- ok 183, $ref->{$recs} == 3;
+ ok 185, $ref->{$recs} == 3;
}
{
@@ -767,12 +669,12 @@ EOM
BEGIN { push @INC, '.'; }
eval 'use SubDB ; ';
- main::ok 184, $@ eq "" ;
+ main::ok 186, $@ eq "" ;
my @h ;
my $X ;
my $rec_len = 34 ;
eval '
- $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ $X = tie(@h, "SubDB", -Filename => "dbqueue.tmp",
-Flags => DB_CREATE,
-Mode => 0640 ,
-Len => $rec_len,
@@ -780,26 +682,28 @@ EOM
);
' ;
- main::ok 185, $@ eq "" ;
+ main::ok 187, $@ eq "" ;
my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
- main::ok 186, $@ eq "" ;
- main::ok 187, $ret == 7 ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 7 ;
my $value = 0;
$ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
- main::ok 188, $@ eq "" ;
- main::ok 189, $ret == 10 ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 10 ;
$ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
- main::ok 190, $@ eq "" ;
- main::ok 191, $ret == 1 ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret == 1 ;
$ret = eval '$X->A_new_method(1) ' ;
- main::ok 192, $@ eq "" ;
- main::ok 193, $ret eq "[[10]]" ;
+ main::ok 194, $@ eq "" ;
+ main::ok 195, $ret eq "[[10]]" ;
- unlink "SubDB.pm", "dbbtree.tmp" ;
+ undef $X ;
+ untie @h ;
+ unlink "SubDB.pm", "dbqueue.tmp" ;
}
@@ -810,7 +714,7 @@ EOM
my @array ;
my $value ;
my $rec_len = 21 ;
- ok 194, my $db = tie @array, 'BerkeleyDB::Queue',
+ ok 196, my $db = tie @array, 'BerkeleyDB::Queue',
-Filename => $Dfile,
-Flags => DB_CREATE ,
-Len => $rec_len,
@@ -821,14 +725,36 @@ EOM
$array[3] = "ghi" ;
my $k = 0 ;
- ok 195, $db->db_put($k, "fred", DB_APPEND) == 0 ;
- ok 196, $k == 4 ;
- ok 197, $array[4] eq fillout("fred", $rec_len) ;
+ ok 197, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 198, $k == 4 ;
+ ok 199, $array[4] eq fillout("fred", $rec_len) ;
undef $db ;
untie @array ;
}
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 21 ;
+ ok 200, $db = tie @array, 'BerkeleyDB::Queue',
+ -Flags => DB_CREATE ,
+ -ArrayBase => 0,
+ -Len => $rec_len,
+ -Pad => " " ,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 201, ($FA ? pop @array : $db->pop()) eq fillout("first", $rec_len) ;
+
+ undef $db;
+ untie @array ;
+
+}
+
__END__
diff --git a/bdb/perl.BerkeleyDB/t/recno.t b/bdb/perl/BerkeleyDB/t/recno.t
index 0f210f540c3..64b1803f736 100644
--- a/bdb/perl.BerkeleyDB/t/recno.t
+++ b/bdb/perl/BerkeleyDB/t/recno.t
@@ -12,96 +12,9 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
-print "1..218\n";
-
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
-) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
-
-sub touch
-{
- my $file = shift ;
- open(CAT,">$file") || die "Cannot open $file:$!";
- close(CAT);
-}
-
-sub joiner
-{
- my $db = shift ;
- my $sep = shift ;
- my ($k, $v) = (0, "") ;
- my @data = () ;
-
- my $cursor = $db->db_cursor() or return () ;
- for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
- $status == 0 ;
- $status = $cursor->c_get($k, $v, DB_NEXT)) {
- push @data, $v ;
- }
-
- (scalar(@data), join($sep, @data)) ;
-}
-
-sub countRecords
-{
- my $db = shift ;
- my ($k, $v) = (0,0) ;
- my ($count) = 0 ;
- my ($cursor) = $db->db_cursor() ;
- #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
-# $status == 0 ;
-# $status = $cursor->c_get($k, $v, DB_NEXT) )
- while ($cursor->c_get($k, $v, DB_NEXT) == 0)
- { ++ $count }
-
- return $count ;
-}
+print "1..226\n";
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
@@ -110,7 +23,6 @@ unlink $Dfile;
umask(0) ;
-
# Check for invalid parameters
{
# Check for invalid parameters
@@ -183,7 +95,7 @@ umask(0) ;
my $lex = new LexFile $Dfile ;
my $home = "./fred" ;
- ok 27, -d $home ? chmod 0777, $home : mkdir($home, 0777) ;
+ ok 27, my $lexD = new LexDir($home);
ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
-Home => $home ;
@@ -199,7 +111,6 @@ umask(0) ;
ok 32, $value eq "some value" ;
undef $db ;
undef $env ;
- rmtree $home ;
}
@@ -272,16 +183,6 @@ umask(0) ;
{
# Tied Array interface
- # full tied array support started in Perl 5.004_57
- # just double check.
- my $FA = 0 ;
- {
- sub try::TIEARRAY { bless [], "try" }
- sub try::FETCHSIZE { $FA = 1 }
- my @a ;
- tie @a, 'try' ;
- my $a = @a ;
- }
my $lex = new LexFile $Dfile ;
my @array ;
@@ -563,8 +464,7 @@ umask(0) ;
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 167, mkdir($home, 0777) ;
+ ok 167, my $lexD = new LexDir($home);
ok 168, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -577,6 +477,10 @@ umask(0) ;
-Txn => $txn ;
+ ok 171, $txn->txn_commit() == 0 ;
+ ok 172, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
# create some data
my @data = (
"boat",
@@ -589,38 +493,37 @@ umask(0) ;
for ($i = 0 ; $i < @data ; ++$i) {
$ret += $db1->db_put($i, $data[$i]) ;
}
- ok 171, $ret == 0 ;
+ ok 173, $ret == 0 ;
# should be able to see all the records
- ok 172, my $cursor = $db1->db_cursor() ;
+ ok 174, my $cursor = $db1->db_cursor() ;
my ($k, $v) = (0, "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 173, $count == 3 ;
+ ok 175, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 174, $txn->txn_abort() == 0 ;
+ ok 176, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 175, $cursor = $db1->db_cursor() ;
+ ok 177, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 176, $count == 0 ;
+ ok 178, $count == 0 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie @array ;
- rmtree $home ;
}
@@ -631,14 +534,14 @@ umask(0) ;
my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
my @array ;
my ($k, $v) ;
- ok 177, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ ok 179, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
-Flags => DB_CREATE,
-Pagesize => 4 * 1024,
;
my $ref = $db->db_stat() ;
- ok 178, $ref->{$recs} == 0;
- ok 179, $ref->{'bt_pagesize'} == 4 * 1024;
+ ok 180, $ref->{$recs} == 0;
+ ok 181, $ref->{'bt_pagesize'} == 4 * 1024;
# create some data
my @data = (
@@ -652,10 +555,10 @@ umask(0) ;
for ($i = $db->ArrayOffset ; @data ; ++$i) {
$ret += $db->db_put($i, shift @data) ;
}
- ok 180, $ret == 0 ;
+ ok 182, $ret == 0 ;
$ref = $db->db_stat() ;
- ok 181, $ref->{$recs} == 3;
+ ok 183, $ref->{$recs} == 3;
}
{
@@ -706,35 +609,37 @@ EOM
BEGIN { push @INC, '.'; }
eval 'use SubDB ; ';
- main::ok 182, $@ eq "" ;
+ main::ok 184, $@ eq "" ;
my @h ;
my $X ;
eval '
- $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ $X = tie(@h, "SubDB", -Filename => "dbrecno.tmp",
-Flags => DB_CREATE,
-Mode => 0640 );
' ;
- main::ok 183, $@ eq "" ;
+ main::ok 185, $@ eq "" ;
my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
- main::ok 184, $@ eq "" ;
- main::ok 185, $ret == 7 ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 7 ;
my $value = 0;
$ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
- main::ok 186, $@ eq "" ;
- main::ok 187, $ret == 10 ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 10 ;
$ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
- main::ok 188, $@ eq "" ;
- main::ok 189, $ret == 1 ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 1 ;
$ret = eval '$X->A_new_method(1) ' ;
- main::ok 190, $@ eq "" ;
- main::ok 191, $ret eq "[[10]]" ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret eq "[[10]]" ;
- unlink "SubDB.pm", "dbbtree.tmp" ;
+ undef $X;
+ untie @h;
+ unlink "SubDB.pm", "dbrecno.tmp" ;
}
@@ -745,7 +650,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 192, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Source => $Dfile2 ;
@@ -755,7 +660,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 193, $x eq "abc\ndef\n\nghi\n" ;
+ ok 195, $x eq "abc\ndef\n\nghi\n" ;
}
{
@@ -765,7 +670,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Source => $Dfile2 ,
@@ -776,7 +681,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 195, $x eq "abc-def--ghi-";
+ ok 197, $x eq "abc-def--ghi-";
}
{
@@ -786,7 +691,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Len => 5,
@@ -797,7 +702,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 197, $x eq "abc def ghi " ;
+ ok 199, $x eq "abc def ghi " ;
}
{
@@ -807,7 +712,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ ok 200, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Len => 5,
@@ -819,7 +724,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 199, $x eq "abc--def-------ghi--" ;
+ ok 201, $x eq "abc--def-------ghi--" ;
}
{
@@ -828,7 +733,7 @@ EOM
my $lex = new LexFile $Dfile;
my @array ;
my $value ;
- ok 200, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ ok 202, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
-Property => DB_RENUMBER,
-ArrayBase => 0,
-Flags => DB_CREATE ;
@@ -837,14 +742,14 @@ EOM
$array[1] = "def" ;
$array[3] = "ghi" ;
- ok 201, my ($length, $joined) = joiner($db, "|") ;
- ok 202, $length == 3 ;
- ok 203, $joined eq "abc|def|ghi";
+ ok 203, my ($length, $joined) = joiner($db, "|") ;
+ ok 204, $length == 3 ;
+ ok 205, $joined eq "abc|def|ghi";
- ok 204, $db->db_del(1) == 0 ;
- ok 205, ($length, $joined) = joiner($db, "|") ;
- ok 206, $length == 2 ;
- ok 207, $joined eq "abc|ghi";
+ ok 206, $db->db_del(1) == 0 ;
+ ok 207, ($length, $joined) = joiner($db, "|") ;
+ ok 208, $length == 2 ;
+ ok 209, $joined eq "abc|ghi";
undef $db ;
untie @array ;
@@ -857,7 +762,7 @@ EOM
my $lex = new LexFile $Dfile;
my @array ;
my $value ;
- ok 208, my $db = tie @array, 'BerkeleyDB::Recno',
+ ok 210, my $db = tie @array, 'BerkeleyDB::Recno',
-Filename => $Dfile,
-Flags => DB_CREATE ;
@@ -866,8 +771,8 @@ EOM
$array[3] = "ghi" ;
my $k = 0 ;
- ok 209, $db->db_put($k, "fred", DB_APPEND) == 0 ;
- ok 210, $k == 4 ;
+ ok 211, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 212, $k == 4 ;
undef $db ;
untie @array ;
@@ -880,7 +785,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 211, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
+ ok 213, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
-ArrayBase => 0,
-Property => DB_RENUMBER,
-Flags => DB_CREATE ;
@@ -890,7 +795,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 212, $x eq "abc\ndef\n\nghi\n" ;
+ ok 214, $x eq "abc\ndef\n\nghi\n" ;
}
{
@@ -900,7 +805,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 213, tie @array, 'BerkeleyDB::Recno',
+ ok 215, tie @array, 'BerkeleyDB::Recno',
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Source => $Dfile2 ,
@@ -912,7 +817,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 214, $x eq "abc-def--ghi-";
+ ok 216, $x eq "abc-def--ghi-";
}
{
@@ -922,7 +827,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 215, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
+ ok 217, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
-Flags => DB_CREATE ,
-Property => DB_RENUMBER,
-Len => 5,
@@ -933,7 +838,7 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 216, $x eq "abc def ghi " ;
+ ok 218, $x eq "abc def ghi " ;
}
{
@@ -943,7 +848,7 @@ EOM
touch $Dfile2 ;
my @array ;
my $value ;
- ok 217, tie @array, 'BerkeleyDB::Recno',
+ ok 219, tie @array, 'BerkeleyDB::Recno',
-ArrayBase => 0,
-Flags => DB_CREATE ,
-Property => DB_RENUMBER,
@@ -956,9 +861,50 @@ EOM
untie @array ;
my $x = docat($Dfile2) ;
- ok 218, $x eq "abc--def-------ghi--" ;
+ ok 220, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # 23 Sept 2001 -- push into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 221, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? push @array, "first"
+ : $db->push("first") ;
+
+ ok 222, $array[0] eq "first" ;
+ ok 223, $FA ? pop @array : $db->pop() eq "first" ;
+
+ undef $db;
+ untie @array ;
+
}
+{
+ # 23 Sept 2001 -- unshift into an empty array
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 224, $db = tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Filename => $Dfile ;
+ $FA ? unshift @array, "first"
+ : $db->unshift("first") ;
+
+ ok 225, $array[0] eq "first" ;
+ ok 226, ($FA ? shift @array : $db->shift()) eq "first" ;
+
+ undef $db;
+ untie @array ;
+
+}
__END__
diff --git a/bdb/perl.BerkeleyDB/t/strict.t b/bdb/perl/BerkeleyDB/t/strict.t
index 0a856bbb1c6..ab41d44cb41 100644
--- a/bdb/perl.BerkeleyDB/t/strict.t
+++ b/bdb/perl/BerkeleyDB/t/strict.t
@@ -10,48 +10,10 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
print "1..44\n";
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
-
-
my $Dfile = "dbhash.tmp";
my $home = "./fred" ;
@@ -63,8 +25,7 @@ umask(0);
my %hash ;
my $status ;
- rmtree $home if -e $home ;
- ok 1, mkdir($home, 0777) ;
+ ok 1, my $lexD = new LexDir($home);
ok 2, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -80,7 +41,6 @@ umask(0);
ok 6, $@ eq "" ;
#print "[$@]\n" ;
- rmtree $home if -e $home ;
}
{
@@ -88,8 +48,7 @@ umask(0);
my $lex = new LexFile $Dfile ;
my %hash ;
- rmtree $home if -e $home ;
- ok 7, mkdir($home, 0777) ;
+ ok 7, my $lexD = new LexDir($home);
ok 8, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -105,7 +64,6 @@ umask(0);
undef $db1 ;
untie %hash ;
undef $env ;
- rmtree $home if -e $home ;
}
{
@@ -114,8 +72,7 @@ umask(0);
my %hash ;
my $status ;
- rmtree $home if -e $home ;
- ok 11, mkdir($home, 0777) ;
+ ok 11, my $lexD = new LexDir($home);
ok 12, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -130,6 +87,7 @@ umask(0);
eval { $status = $db->db_close() ; } ;
ok 16, $status == 0 ;
ok 17, $@ eq "" ;
+ #print "[$@]\n" ;
eval { $status = $env->db_appexit() ; } ;
ok 18, $status == 0 ;
ok 19, $@ eq "" ;
@@ -141,8 +99,7 @@ umask(0);
my $lex = new LexFile $Dfile ;
my %hash ;
- rmtree $home if -e $home ;
- ok 20, mkdir($home, 0777) ;
+ ok 20, my $lexD = new LexDir($home);
ok 21, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -171,7 +128,6 @@ umask(0);
ok 28, $status == 0 ;
ok 29, $@ eq "" ;
#print "[$@]\n" ;
- rmtree $home if -e $home ;
}
{
@@ -184,7 +140,6 @@ umask(0);
eval { $db->db_close() ; } ;
ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
#print "[$@]\n" ;
- rmtree $home if -e $home ;
}
{
@@ -193,8 +148,7 @@ umask(0);
my %hash ;
my $status ;
- rmtree $home if -e $home ;
- ok 33, mkdir($home, 0777) ;
+ ok 33, my $lexD = new LexDir($home);
ok 34, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -211,10 +165,10 @@ umask(0);
eval { $status = $db->db_close() ; } ;
ok 41, $status == 0 ;
ok 42, $@ eq "" ;
+ #print "[$@]\n" ;
eval { $status = $env->db_appexit() ; } ;
ok 43, $status == 0 ;
ok 44, $@ eq "" ;
#print "[$@]\n" ;
- rmtree $home if -e $home ;
}
diff --git a/bdb/perl.BerkeleyDB/t/subdb.t b/bdb/perl/BerkeleyDB/t/subdb.t
index 290e5d691e4..23016d6463f 100644
--- a/bdb/perl.BerkeleyDB/t/subdb.t
+++ b/bdb/perl/BerkeleyDB/t/subdb.t
@@ -10,7 +10,7 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
BEGIN
{
@@ -22,60 +22,6 @@ BEGIN
print "1..43\n";
-my %DB_errors = (
- 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
- 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
- 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
- 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
- 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
- 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
- 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
- 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
- ) ;
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub addData
-{
- my $db = shift ;
- my @data = @_ ;
- die "addData odd data\n" unless @data /2 != 0 ;
- my ($k, $v) ;
- my $ret = 0 ;
- while (@data) {
- $k = shift @data ;
- $v = shift @data ;
- $ret += $db->db_put($k, $v) ;
- }
-
- return ($ret == 0) ;
-}
-
my $Dfile = "dbhash.tmp";
my $Dfile2 = "dbhash2.tmp";
my $Dfile3 = "dbhash3.tmp";
@@ -286,6 +232,7 @@ umask(0) ;
ok 38, $status == DB_NOTFOUND ;
ok 39, @dbnames == 0 ;
undef $db ;
+ undef $cursor ;
ok 40, -e $Dfile ;
ok 41, BerkeleyDB::db_remove(-Filename => $Dfile) == 0 ;
diff --git a/bdb/perl.BerkeleyDB/t/txn.t b/bdb/perl/BerkeleyDB/t/txn.t
index 6bef1887ea3..ba6b636cdc8 100644
--- a/bdb/perl.BerkeleyDB/t/txn.t
+++ b/bdb/perl/BerkeleyDB/t/txn.t
@@ -10,47 +10,9 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
-
-print "1..50\n";
-
-
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub docat
-{
- my $file = shift;
- local $/ = undef;
- open(CAT,$file) || die "Cannot open $file:$!";
- my $result = <CAT>;
- close(CAT);
- return $result;
-}
+use t::util ;
+print "1..58\n";
my $Dfile = "dbhash.tmp";
@@ -64,8 +26,7 @@ umask(0);
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 1, mkdir($home, 0777) ;
+ ok 1, my $lexD = new LexDir($home);
ok 2, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE| DB_INIT_MPOOL;
eval { $env->txn_begin() ; } ;
@@ -74,7 +35,6 @@ umask(0);
eval { my $txn_mgr = $env->TxnMgr() ; } ;
ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
undef $env ;
- rmtree $home ;
}
@@ -86,8 +46,7 @@ umask(0);
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 5, mkdir($home, 0777) ;
+ ok 5, my $lexD = new LexDir($home);
ok 6, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
@@ -98,6 +57,10 @@ umask(0);
-Txn => $txn ;
+ ok 9, $txn->txn_commit() == 0 ;
+ ok 10, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
# create some data
my %data = (
"red" => "boat",
@@ -109,41 +72,40 @@ umask(0);
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 9, $ret == 0 ;
+ ok 11, $ret == 0 ;
# should be able to see all the records
- ok 10, my $cursor = $db1->db_cursor() ;
+ ok 12, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 11, $count == 3 ;
+ ok 13, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 12, $txn->txn_abort() == 0 ;
+ ok 14, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 13, $cursor = $db1->db_cursor() ;
+ ok 15, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 14, $count == 0 ;
+ ok 16, $count == 0 ;
my $stat = $env->txn_stat() ;
- ok 15, $stat->{'st_naborts'} == 1 ;
+ ok 17, $stat->{'st_naborts'} == 1 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
{
@@ -154,18 +116,20 @@ umask(0);
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 16, mkdir($home, 0777) ;
- ok 17, my $env = new BerkeleyDB::Env -Home => $home,
+ ok 18, my $lexD = new LexDir($home);
+ ok 19, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
- ok 18, my $txn_mgr = $env->TxnMgr() ;
- ok 19, my $txn = $txn_mgr->txn_begin() ;
- ok 20, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ ok 20, my $txn_mgr = $env->TxnMgr() ;
+ ok 21, my $txn = $txn_mgr->txn_begin() ;
+ ok 22, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-Flags => DB_CREATE ,
-Env => $env,
-Txn => $txn ;
+ ok 23, $txn->txn_commit() == 0 ;
+ ok 24, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
# create some data
my %data = (
@@ -178,34 +142,34 @@ umask(0);
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 21, $ret == 0 ;
+ ok 25, $ret == 0 ;
# should be able to see all the records
- ok 22, my $cursor = $db1->db_cursor() ;
+ ok 26, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 23, $count == 3 ;
+ ok 27, $count == 3 ;
undef $cursor ;
# now abort the transaction
- ok 24, $txn->txn_abort() == 0 ;
+ ok 28, $txn->txn_abort() == 0 ;
# there shouldn't be any records in the database
$count = 0 ;
# sequence forwards
- ok 25, $cursor = $db1->db_cursor() ;
+ ok 29, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 26, $count == 0 ;
+ ok 30, $count == 0 ;
my $stat = $txn_mgr->txn_stat() ;
- ok 27, $stat->{'st_naborts'} == 1 ;
+ ok 31, $stat->{'st_naborts'} == 1 ;
undef $txn ;
undef $cursor ;
@@ -213,7 +177,6 @@ umask(0);
undef $txn_mgr ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
{
@@ -224,18 +187,21 @@ umask(0);
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 28, mkdir($home, 0777) ;
- ok 29, my $env = new BerkeleyDB::Env -Home => $home,
+ ok 32, my $lexD = new LexDir($home);
+ ok 33, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
- ok 30, my $txn = $env->txn_begin() ;
- ok 31, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ ok 34, my $txn = $env->txn_begin() ;
+ ok 35, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-Flags => DB_CREATE ,
-Env => $env,
-Txn => $txn ;
+ ok 36, $txn->txn_commit() == 0 ;
+ ok 37, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
+
# create some data
my %data = (
"red" => "boat",
@@ -247,40 +213,39 @@ umask(0);
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 32, $ret == 0 ;
+ ok 38, $ret == 0 ;
# should be able to see all the records
- ok 33, my $cursor = $db1->db_cursor() ;
+ ok 39, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 34, $count == 3 ;
+ ok 40, $count == 3 ;
undef $cursor ;
# now commit the transaction
- ok 35, $txn->txn_commit() == 0 ;
+ ok 41, $txn->txn_commit() == 0 ;
$count = 0 ;
# sequence forwards
- ok 36, $cursor = $db1->db_cursor() ;
+ ok 42, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 37, $count == 3 ;
+ ok 43, $count == 3 ;
my $stat = $env->txn_stat() ;
- ok 38, $stat->{'st_naborts'} == 0 ;
+ ok 44, $stat->{'st_naborts'} == 0 ;
undef $txn ;
undef $cursor ;
undef $db1 ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
{
@@ -291,18 +256,20 @@ umask(0);
my $value ;
my $home = "./fred" ;
- rmtree $home if -e $home ;
- ok 39, mkdir($home, 0777) ;
- ok 40, my $env = new BerkeleyDB::Env -Home => $home,
+ ok 45, my $lexD = new LexDir($home);
+ ok 46, my $env = new BerkeleyDB::Env -Home => $home,
-Flags => DB_CREATE|DB_INIT_TXN|
DB_INIT_MPOOL|DB_INIT_LOCK ;
- ok 41, my $txn_mgr = $env->TxnMgr() ;
- ok 42, my $txn = $txn_mgr->txn_begin() ;
- ok 43, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ ok 47, my $txn_mgr = $env->TxnMgr() ;
+ ok 48, my $txn = $txn_mgr->txn_begin() ;
+ ok 49, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
-Flags => DB_CREATE ,
-Env => $env,
-Txn => $txn ;
+ ok 50, $txn->txn_commit() == 0 ;
+ ok 51, $txn = $env->txn_begin() ;
+ $db1->Txn($txn);
# create some data
my %data = (
@@ -315,33 +282,33 @@ umask(0);
while (my ($k, $v) = each %data) {
$ret += $db1->db_put($k, $v) ;
}
- ok 44, $ret == 0 ;
+ ok 52, $ret == 0 ;
# should be able to see all the records
- ok 45, my $cursor = $db1->db_cursor() ;
+ ok 53, my $cursor = $db1->db_cursor() ;
my ($k, $v) = ("", "") ;
my $count = 0 ;
# sequence forwards
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 46, $count == 3 ;
+ ok 54, $count == 3 ;
undef $cursor ;
# now commit the transaction
- ok 47, $txn->txn_commit() == 0 ;
+ ok 55, $txn->txn_commit() == 0 ;
$count = 0 ;
# sequence forwards
- ok 48, $cursor = $db1->db_cursor() ;
+ ok 56, $cursor = $db1->db_cursor() ;
while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
++ $count ;
}
- ok 49, $count == 3 ;
+ ok 57, $count == 3 ;
my $stat = $txn_mgr->txn_stat() ;
- ok 50, $stat->{'st_naborts'} == 0 ;
+ ok 58, $stat->{'st_naborts'} == 0 ;
undef $txn ;
undef $cursor ;
@@ -349,6 +316,5 @@ umask(0);
undef $txn_mgr ;
undef $env ;
untie %hash ;
- rmtree $home ;
}
diff --git a/bdb/perl.BerkeleyDB/t/unknown.t b/bdb/perl/BerkeleyDB/t/unknown.t
index e72021f0b18..f2630b585c0 100644
--- a/bdb/perl.BerkeleyDB/t/unknown.t
+++ b/bdb/perl/BerkeleyDB/t/unknown.t
@@ -12,46 +12,10 @@ BEGIN {
}
use BerkeleyDB;
-use File::Path qw(rmtree);
+use t::util ;
print "1..41\n";
-{
- package LexFile ;
-
- sub new
- {
- my $self = shift ;
- unlink @_ ;
- bless [ @_ ], $self ;
- }
-
- sub DESTROY
- {
- my $self = shift ;
- unlink @{ $self } ;
- }
-}
-
-
-sub ok
-{
- my $no = shift ;
- my $result = shift ;
-
- print "not " unless $result ;
- print "ok $no\n" ;
-}
-
-sub writeFile
-{
- my $name = shift ;
- open(FH, ">$name") or return 0 ;
- print FH @_ ;
- close FH ;
- return 1 ;
-}
-
my $Dfile = "dbhash.tmp";
unlink $Dfile;
diff --git a/bdb/perl/BerkeleyDB/t/util.pm b/bdb/perl/BerkeleyDB/t/util.pm
new file mode 100644
index 00000000000..1a1449751eb
--- /dev/null
+++ b/bdb/perl/BerkeleyDB/t/util.pm
@@ -0,0 +1,220 @@
+package util ;
+
+package main ;
+
+use strict ;
+use BerkeleyDB ;
+use File::Path qw(rmtree);
+use vars qw(%DB_errors $FA) ;
+
+$| = 1;
+
+%DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+# full tied array support started in Perl 5.004_57
+# just double check.
+$FA = 0 ;
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ use vars qw( $basename @files ) ;
+ $basename = "db0000" ;
+
+ sub new
+ {
+ my $self = shift ;
+ #my @files = () ;
+ foreach (@_)
+ {
+ $_ = $basename ;
+ unlink $basename ;
+ push @files, $basename ;
+ ++ $basename ;
+ }
+ bless [ @files ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ #unlink @{ $self } ;
+ }
+
+ END
+ {
+ foreach (@files) { unlink $_ }
+ }
+}
+
+
+{
+ package LexDir ;
+
+ use File::Path qw(rmtree);
+
+ use vars qw( $basename %dirs ) ;
+
+ sub new
+ {
+ my $self = shift ;
+ my $dir = shift ;
+
+ rmtree $dir if -e $dir ;
+
+ mkdir $dir, 0777 or return undef ;
+
+ return bless [ $dir ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ my $dir = $self->[0];
+ #rmtree $dir;
+ $dirs{$dir} ++ ;
+ }
+
+ END
+ {
+ foreach (keys %dirs) {
+ rmtree $_ if -d $_ ;
+ }
+ }
+
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub writeFile
+{
+ my $name = shift ;
+ open(FH, ">$name") or return 0 ;
+ print FH @_ ;
+ close FH ;
+ return 1 ;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" if @data % 2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+
+1;
diff --git a/bdb/perl.BerkeleyDB/typemap b/bdb/perl/BerkeleyDB/typemap
index d6c4c7647ce..81ead2c36d9 100644
--- a/bdb/perl.BerkeleyDB/typemap
+++ b/bdb/perl/BerkeleyDB/typemap
@@ -72,7 +72,7 @@ T_AV
croak(\"$var is not an array reference\")
T_RAW
- $var = ($type)SvIV($arg)
+ $var = INT2PTR($type,SvIV($arg)
T_U_INT
$var = SvUV($arg)
@@ -82,7 +82,7 @@ T_SV_REF_NULL
$var = NULL ;
else if (sv_derived_from($arg, \"${ntype}\")) {
IV tmp = SvIV((SV *)GetInternalObject($arg));
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -94,7 +94,7 @@ T_HV_REF_NULL
HV * hv = (HV *)GetInternalObject($arg);
SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
IV tmp = SvIV(*svp);
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -104,7 +104,7 @@ T_HV_REF
HV * hv = (HV *)GetInternalObject($arg);
SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
IV tmp = SvIV(*svp);
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -113,7 +113,7 @@ T_HV_REF
T_P_REF
if (sv_derived_from($arg, \"${ntype}\")) {
IV tmp = SvIV((SV*)SvRV($arg));
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -124,7 +124,7 @@ T_INNER
HV * hv = (HV *)SvRV($arg);
SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
IV tmp = SvIV(*svp);
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
T_PV_NULL
@@ -147,7 +147,7 @@ T_PTROBJ_NULL
$var = NULL ;
else if (sv_derived_from($arg, \"${ntype}\")) {
IV tmp = SvIV((SV*)SvRV($arg));
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -157,7 +157,7 @@ T_PTROBJ_SELF
$var = NULL ;
else if (sv_derived_from($arg, \"${ntype}\")) {
IV tmp = SvIV((SV*)SvRV($arg));
- $var = ($type) tmp;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
@@ -166,14 +166,14 @@ T_PTROBJ_AV
if ($arg == &PL_sv_undef || $arg == NULL)
$var = NULL ;
else if (sv_derived_from($arg, \"${ntype}\")) {
- IV tmp = getInnerObject($arg) ;
- $var = ($type) tmp;
+ IV tmp = SvIV(getInnerObject($arg)) ;
+ $var = INT2PTR($type, tmp);
}
else
croak(\"$var is not of type ${ntype}\")
T_dbtkeydatum
- ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
DBT_clear($var) ;
if (db->recno_or_queue) {
Value = GetRecnoKey(db, SvIV($arg)) ;
@@ -186,7 +186,7 @@ T_dbtkeydatum
}
T_dbtkeydatum_btree
- ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
DBT_clear($var) ;
if (db->recno_or_queue ||
(db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
@@ -200,7 +200,7 @@ T_dbtkeydatum_btree
}
T_dbtdatum
- ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
DBT_clear($var) ;
$var.data = SvPV($arg, PL_na);
$var.size = (int)PL_na;
@@ -211,7 +211,7 @@ T_dbtdatum
T_dbtdatum_opt
DBT_clear($var) ;
if (flagSet(DB_GET_BOTH)) {
- ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
$var.data = SvPV($arg, PL_na);
$var.size = (int)PL_na;
$var.flags = db->partial ;
@@ -222,7 +222,7 @@ T_dbtdatum_opt
T_dbtdatum_btree
DBT_clear($var) ;
if (flagSet(DB_GET_BOTH)) {
- ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
$var.data = SvPV($arg, PL_na);
$var.size = (int)PL_na;
$var.flags = db->partial ;
@@ -234,19 +234,19 @@ T_dbtdatum_btree
OUTPUT
T_RAW
- sv_setiv($arg, (IV)$var);
+ sv_setiv($arg, PTR2IV($var));
T_SV_REF_NULL
- sv_setiv($arg, (IV)$var);
+ sv_setiv($arg, PTR2IV($var));
T_HV_REF_NULL
- sv_setiv($arg, (IV)$var);
+ sv_setiv($arg, PTR2IV($var));
T_HV_REF
- sv_setiv($arg, (IV)$var);
+ sv_setiv($arg, PTR2IV($var));
T_P_REF
- sv_setiv($arg, (IV)$var);
+ sv_setiv($arg, PTR2IV($var));
T_DUAL
setDUALerrno($arg, $var) ;
diff --git a/bdb/perl.DB_File/Changes b/bdb/perl/DB_File/Changes
index b8684cac3de..7883cbdfef0 100644
--- a/bdb/perl.DB_File/Changes
+++ b/bdb/perl/DB_File/Changes
@@ -1,196 +1,234 @@
-0.1
+1.805 1st September 2002
- First Release.
+ * Added support to allow DB_File to build with Berkeley DB 4.1.X
-0.2
+ * Tightened up the test harness to test that calls to untie don't generate
+ the "untie attempted while %d inner references still exist" warning.
- When DB_File is opening a database file it no longer terminates the
- process if dbopen returned an error. This allows file protection
- errors to be caught at run time. Thanks to Judith Grass
- <grass@cybercash.com> for spotting the bug.
+ * added code to guard against calling the callbacks (compare,hash & prefix)
+ recursively.
-0.3
+ * pasing undef for the flags and/or mode when opening a database could cause
+ a "Use of uninitialized value in subroutine entry" warning. Now silenced.
- Added prototype support for multiple btree compare callbacks.
+ * DBM filter code beefed up to cope with read-only $_.
-1.0
+1.804 2nd June 2002
- DB_File has been in use for over a year. To reflect that, the
- version number has been incremented to 1.0.
+ * Perl core patch 14939 added a new warning to "splice". This broke the
+ db-recno test harness. Fixed.
- Added complete support for multiple concurrent callbacks.
+ * merged core patches 16502 & 16540.
- Using the push method on an empty list didn't work properly. This
- has been fixed.
+1.803 1st March 2002
-1.01
+ * Fixed a problem with db-btree.t where it complained about an "our"
+ variable redeclaation.
- Fixed a core dump problem with SunOS.
+ * FETCH, STORE & DELETE don't map the flags parameter into the
+ equivalent Berkeley DB function anymore.
- The return value from TIEHASH wasn't set to NULL when dbopen
- returned an error.
+1.802 6th January 2002
-1.02
+ * The message about some test failing in db-recno.t had the wrong test
+ numbers. Fixed.
- Merged OS/2 specific code into DB_File.xs
+ * merged core patch 13942.
- Removed some redundant code in DB_File.xs.
+1.801 26th November 2001
- Documentation update.
+ * Fixed typo in Makefile.PL
- Allow negative subscripts with RECNO interface.
+ * Added "clean" attribute to Makefile.PL
+
+1.800 23rd November 2001
- Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+ * use pport.h for perl backward compatability code.
- The example code which showed how to lock a database needed a call
- to sync added. Without it the resultant database file was empty.
+ * use new ExtUtils::Constant module to generate XS constants.
- Added get_dup method.
+ * upgrade Makefile.PL upgrade/downgrade code to toggle "our" with
+ "use vars"
-1.03
+1.79 22nd October 2001
- Documentation update.
+ * Added a "local $SIG{__DIE__}" inside the eval that checks for
+ the presence of XSLoader s suggested by Andrew Hryckowin.
- DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
- automatically.
+ * merged core patch 12277.
- The standard hash function exists is now supported.
+ * Changed NEXTKEY to not initialise the input key. It isn't used anyway.
- Modified the behavior of get_dup. When it returns an associative
- array, the value is the count of the number of matching BTREE
- values.
+1.79 22nd October 2001
-1.04
+ * Fixed test harness for cygwin
- Minor documentation changes.
+1.78 30th July 2001
- Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
- <hammen@gothamcity.jsc.nasa.govt>.
+ * the test in Makefile.PL for AIX used -plthreads. Should have been
+ -lpthreads
- Fixed a bug with the constructors for DB_File::HASHINFO,
- DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
- constructors to make them -w clean.
+ * merged Core patches
+ 10372, 10335, 10372, 10534, 10549, 10643, 11051, 11194, 11432
- Reworked part of the test harness to be more locale friendly.
+ * added documentation patch regarding duplicate keys from Andrew Johnson
-1.05
- Made all scripts in the documentation strict and -w clean.
+1.77 26th April 2001
- Added logic to DB_File.xs to allow the module to be built after
- Perl is installed.
+ * AIX is reported to need -lpthreads, so Makefile.PL now checks for
+ AIX and adds it to the link options.
-1.06
+ * Minor documentation updates.
- Minor namespace cleanup: Localized PrintBtree.
+ * Merged Core patch 9176
-1.07
+ * Added a patch from Edward Avis that adds support for splice with
+ recno databases.
- Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+ * Modified Makefile.PL to only enable the warnings pragma if using perl
+ 5.6.1 or better.
-1.08
+1.76 15th January 2001
- Documented operation of bval.
+ * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
+ with DB_File on Linux. Thanks to Norbert Bollow for sending details of
+ this approach.
-1.09
- Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
- DB_File::BTREEINFO.
+1.75 17th December 2000
- Changed default mode to 0666.
+ * Fixed perl core patch 7703
-1.10
+ * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
+ btree_compare, btree_prefix and hash_cb needed to be changed.
- Fixed fd method so that it still returns -1 for in-memory files
- when db 1.86 is used.
+ * Updated dbinfo to support Berkeley DB 3.2 file format changes.
-1.11
- Documented the untie gotcha.
+1.74 10th December 2000
-1.12
+ * A "close" call in DB_File.xs needed parenthesised to stop win32 from
+ thinking it was one of its macros.
- Documented the incompatibility with version 2 of Berkeley DB.
+ * Updated dbinfo to support Berkeley DB 3.1 file format changes.
-1.13
+ * DB_File.pm & the test hasness now use the warnings pragma (when
+ available).
- Minor changes to DB_FIle.xs and DB_File.pm
+ * Included Perl core patch 7703 -- size argument for hash_cb is different
+ for Berkeley DB 3.x
-1.14
+ * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
+ treatment.
- Made it illegal to tie an associative array to a RECNO database and
- an ordinary array to a HASH or BTREE database.
+ * @a = () produced the warning 'Argument "" isn't numeric in entersub'
+ This has been fixed. Thanks to Edward Avis for spotting this bug.
-1.15
+ * Added note about building under Linux. Included patches.
- Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
- value" warning with db_get and db_seq.
+ * Included Perl core patch 8068 -- fix for bug 20001013.009
+ When run with warnings enabled "$hash{XX} = undef " produced an
+ "Uninitialized value" warning. This has been fixed.
- Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
- O_* constants from Fcntl.
+1.73 31st May 2000
- Removed the DESTROY method from the DB_File::HASHINFO module.
+ * Added support in version.c for building with threaded Perl.
- Previously DB_File hard-wired the class name of any object that it
- created to "DB_File". This makes sub-classing difficult. Now
- DB_File creats objects in the namespace of the package it has been
- inherited into.
+ * Berkeley DB 3.1 has reenabled support for null keys. The test
+ harness has been updated to reflect this.
+1.72 16th January 2000
-1.16
+ * Added hints/sco.pl
- A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
+ * The module will now use XSLoader when it is available. When it
+ isn't it will use DynaLoader.
- Small fix for the AIX strict C compiler XLC which doesn't like
- __attribute__ being defined via proto.h and redefined via db.h. Fix
- courtesy of Jarkko Hietaniemi.
+ * The locking section in DB_File.pm has been discredited. Many thanks
+ to David Harris for spotting the underlying problem, contributing
+ the updates to the documentation and writing DB_File::Lock (available
+ on CPAN).
-1.50
+1.71 7th September 1999
- DB_File can now build with either DB 1.x or 2.x, but not both at
- the same time.
+ * Fixed a bug that prevented 1.70 from compiling under win32
-1.51
+ * Updated to support Berkeley DB 3.x
- Fixed the test harness so that it doesn't expect DB_File to have
- been installed by the main Perl build.
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+1.70 4th August 1999
- Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+ * Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
-1.52
+ * Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
- Patch from Nick Ing-Simmons now allows DB_File to build on NT.
- Merged 1.15 patch.
+1.69 3rd August 1999
-1.53
+ * fixed a bug in push -- DB_APPEND wasn't working properly.
- Added DB_RENUMBER to flags for recno.
+ * Fixed the R_SETCURSOR bug introduced in 1.68
-1.54
+ * Added a new Perl variable $DB_File::db_ver
+
+1.68 22nd July 1999
- Fixed a small bug in the test harness when run under win32
- The emulation of fd when useing DB 2.x was busted.
+ * Merged changes from 5.005_58
-1.55
- Merged 1.16 changes.
+ * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
+ 2 databases.
-1.56
- Documented the Solaris 2.5 mutex bug
+ * Added some of the examples in the POD into the test harness.
-1.57
- If Perl has been compiled with Threads support,the symbol op will be
- defined. This clashes with a field name in db.h, so it needs to be
- #undef'ed before db.h is included.
+1.67 6th June 1999
-1.58
- Tied Array support was enhanced in Perl 5.004_57. DB_File now
- supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
+ * Added DBM Filter documentation to DB_File.pm
- Fixed a problem with the use of sv_setpvn. When the size is
- specified as 0, it does a strlen on the data. This was ok for DB
- 1.x, but isn't for DB 2.x.
+ * Fixed DBM Filter code to work with 5.004
+
+ * A few instances of newSVpvn were used in 1.66. This isn't available in
+ Perl 5.004_04 or earlier. Replaced with newSVpv.
+
+1.66 15th March 1999
+
+ * Added DBM Filter code
+
+1.65 6th March 1999
+
+ * Fixed a bug in the recno PUSH logic.
+ * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+
+1.64 21st February 1999
+
+ * Tidied the 1.x to 2.x flag mapping code.
+ * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
+ mapping problem with O_RDONLY on the Hurd
+ * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+
+1.63 19th December 1998
+
+ * Fix to allow DB 2.6.x to build with DB_File
+ * Documentation updated to use push,pop etc in the RECNO example &
+ to include the find_dup & del_dup methods.
+
+1.62 30th November 1998
+
+ Added hints/dynixptx.pl.
+ Fixed typemap -- 1.61 used PL_na instead of na
+
+1.61 19th November 1998
+
+ Added a note to README about how to build Berkeley DB 2.x when
+ using HP-UX.
+ Minor modifications to get the module to build with DB 2.5.x
+ Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+
+1.60
+ Changed the test to check for full tied array support
1.59
Updated the license section.
@@ -201,143 +239,196 @@
Added dbinfo to the distribution.
-1.60
- Changed the test to check for full tied array support
+1.58
+ Tied Array support was enhanced in Perl 5.004_57. DB_File now
+ supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
-1.61 19th November 1998
+ Fixed a problem with the use of sv_setpvn. When the size is
+ specified as 0, it does a strlen on the data. This was ok for DB
+ 1.x, but isn't for DB 2.x.
- Added a note to README about how to build Berkeley DB 2.x when
- using HP-UX.
- Minor modifications to get the module to build with DB 2.5.x
- Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+1.57
+ If Perl has been compiled with Threads support,the symbol op will be
+ defined. This clashes with a field name in db.h, so it needs to be
+ #undef'ed before db.h is included.
-1.62 30th November 1998
+1.56
+ Documented the Solaris 2.5 mutex bug
- Added hints/dynixptx.pl.
- Fixed typemap -- 1.61 used PL_na instead of na
+1.55
+ Merged 1.16 changes.
-1.63 19th December 1998
+1.54
- * Fix to allow DB 2.6.x to build with DB_File
- * Documentation updated to use push,pop etc in the RECNO example &
- to include the find_dup & del_dup methods.
+ Fixed a small bug in the test harness when run under win32
+ The emulation of fd when useing DB 2.x was busted.
-1.64 21st February 1999
+1.53
- * Tidied the 1.x to 2.x flag mapping code.
- * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
- mapping problem with O_RDONLY on the Hurd
- * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+ Added DB_RENUMBER to flags for recno.
-1.65 6th March 1999
+1.52
- * Fixed a bug in the recno PUSH logic.
- * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+ Patch from Nick Ing-Simmons now allows DB_File to build on NT.
+ Merged 1.15 patch.
-1.66 15th March 1999
+1.51
- * Added DBM Filter code
+ Fixed the test harness so that it doesn't expect DB_File to have
+ been installed by the main Perl build.
-1.67 6th June 1999
- * Added DBM Filter documentation to DB_File.pm
+ Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
- * Fixed DBM Filter code to work with 5.004
+1.50
- * A few instances of newSVpvn were used in 1.66. This isn't available in
- Perl 5.004_04 or earlier. Replaced with newSVpv.
+ DB_File can now build with either DB 1.x or 2.x, but not both at
+ the same time.
-1.68 22nd July 1999
+1.16
- * Merged changes from 5.005_58
+ A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
- * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
- 2 databases.
+ Small fix for the AIX strict C compiler XLC which doesn't like
+ __attribute__ being defined via proto.h and redefined via db.h. Fix
+ courtesy of Jarkko Hietaniemi.
- * Added some of the examples in the POD into the test harness.
+1.15
-1.69 3rd August 1999
+ Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
+ value" warning with db_get and db_seq.
- * fixed a bug in push -- DB_APPEND wasn't working properly.
+ Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
+ O_* constants from Fcntl.
- * Fixed the R_SETCURSOR bug introduced in 1.68
+ Removed the DESTROY method from the DB_File::HASHINFO module.
- * Added a new Perl variable $DB_File::db_ver
-
-1.70 4th August 1999
+ Previously DB_File hard-wired the class name of any object that it
+ created to "DB_File". This makes sub-classing difficult. Now
+ DB_File creats objects in the namespace of the package it has been
+ inherited into.
- * Initialise $DB_File::db_ver and $DB_File::db_version with
- GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
- * Added a BOOT check to test for equivalent versions of db.h &
- libdb.a/so.
+1.14
-1.71 7th September 1999
+ Made it illegal to tie an associative array to a RECNO database and
+ an ordinary array to a HASH or BTREE database.
- * Fixed a bug that prevented 1.70 from compiling under win32
+1.13
- * Updated to support Berkeley DB 3.x
+ Minor changes to DB_FIle.xs and DB_File.pm
- * Updated dbinfo for Berkeley DB 3.x file formats.
+1.12
-1.72 16th January 2000
+ Documented the incompatibility with version 2 of Berkeley DB.
- * Added hints/sco.pl
+1.11
- * The module will now use XSLoader when it is available. When it
- isn't it will use DynaLoader.
+ Documented the untie gotcha.
- * The locking section in DB_File.pm has been discredited. Many thanks
- to David Harris for spotting the underlying problem, contributing
- the updates to the documentation and writing DB_File::Lock (available
- on CPAN).
+1.10
-1.73 31st May 2000
+ Fixed fd method so that it still returns -1 for in-memory files
+ when db 1.86 is used.
- * Added support in version.c for building with threaded Perl.
+1.09
- * Berkeley DB 3.1 has reenabled support for null keys. The test
- harness has been updated to reflect this.
+ Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
+ DB_File::BTREEINFO.
-1.74 10th December 2000
+ Changed default mode to 0666.
- * A "close" call in DB_File.xs needed parenthesised to stop win32 from
- thinking it was one of its macros.
+1.08
- * Updated dbinfo to support Berkeley DB 3.1 file format changes.
+ Documented operation of bval.
- * DB_File.pm & the test hasness now use the warnings pragma (when
- available).
+1.07
- * Included Perl core patch 7703 -- size argument for hash_cb is different
- for Berkeley DB 3.x
+ Fixed bug with RECNO, where bval wasn't defaulting to "\n".
- * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
- treatment.
+1.06
- * @a = () produced the warning 'Argument "" isn't numeric in entersub'
- This has been fixed. Thanks to Edward Avis for spotting this bug.
+ Minor namespace cleanup: Localized PrintBtree.
- * Added note about building under Linux. Included patches.
+1.05
- * Included Perl core patch 8068 -- fix for bug 20001013.009
- When run with warnings enabled "$hash{XX} = undef " produced an
- "Uninitialized value" warning. This has been fixed.
+ Made all scripts in the documentation strict and -w clean.
-1.75 17th December 2000
+ Added logic to DB_File.xs to allow the module to be built after
+ Perl is installed.
- * Fixed perl core patch 7703
+1.04
- * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
- btree_compare, btree_prefix and hash_cb needed to be changed.
+ Minor documentation changes.
- * Updated dbinfo to support Berkeley DB 3.2 file format changes.
+ Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
+ <hammen@gothamcity.jsc.nasa.govt>.
+ Fixed a bug with the constructors for DB_File::HASHINFO,
+ DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
+ constructors to make them -w clean.
-1.76 15th January 2001
+ Reworked part of the test harness to be more locale friendly.
- * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
- with DB_File on Linux. Thanks to Norbert Bollow for sending details of
- this approach.
+1.03
+
+ Documentation update.
+
+ DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
+ automatically.
+ The standard hash function exists is now supported.
+
+ Modified the behavior of get_dup. When it returns an associative
+ array, the value is the count of the number of matching BTREE
+ values.
+
+1.02
+
+ Merged OS/2 specific code into DB_File.xs
+
+ Removed some redundant code in DB_File.xs.
+
+ Documentation update.
+
+ Allow negative subscripts with RECNO interface.
+
+ Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+
+ The example code which showed how to lock a database needed a call
+ to sync added. Without it the resultant database file was empty.
+
+ Added get_dup method.
+
+1.01
+
+ Fixed a core dump problem with SunOS.
+
+ The return value from TIEHASH wasn't set to NULL when dbopen
+ returned an error.
+
+1.0
+
+ DB_File has been in use for over a year. To reflect that, the
+ version number has been incremented to 1.0.
+
+ Added complete support for multiple concurrent callbacks.
+
+ Using the push method on an empty list didn't work properly. This
+ has been fixed.
+
+0.3
+
+ Added prototype support for multiple btree compare callbacks.
+
+0.2
+
+ When DB_File is opening a database file it no longer terminates the
+ process if dbopen returned an error. This allows file protection
+ errors to be caught at run time. Thanks to Judith Grass
+ <grass@cybercash.com> for spotting the bug.
+
+0.1
+
+ First Release.
diff --git a/bdb/perl.DB_File/DB_File.pm b/bdb/perl/DB_File/DB_File.pm
index e9b6a40d7e3..49004ffa148 100644
--- a/bdb/perl.DB_File/DB_File.pm
+++ b/bdb/perl/DB_File/DB_File.pm
@@ -1,17 +1,17 @@
# DB_File.pm -- Perl 5 interface to Berkeley DB
#
# written by Paul Marquess (Paul.Marquess@btinternet.com)
-# last modified 15th January 2001
-# version 1.76
+# last modified 1st September 2002
+# version 1.805
#
-# Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+# Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
# This program is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself.
package DB_File::HASHINFO ;
-require 5.003 ;
+require 5.00404;
use warnings;
use strict;
@@ -32,8 +32,13 @@ sub TIEHASH
{
my $pkg = shift ;
- bless { VALID => { map {$_, 1}
- qw( bsize ffactor nelem cachesize hash lorder)
+ bless { VALID => {
+ bsize => 1,
+ ffactor => 1,
+ nelem => 1,
+ cachesize => 1,
+ hash => 2,
+ lorder => 1,
},
GOT => {}
}, $pkg ;
@@ -58,8 +63,12 @@ sub STORE
my $key = shift ;
my $value = shift ;
- if ( exists $self->{VALID}{$key} )
+ my $type = $self->{VALID}{$key};
+
+ if ( $type )
{
+ croak "Key '$key' not associated with a code reference"
+ if $type == 2 && !ref $value && ref $value ne 'CODE';
$self->{GOT}{$key} = $value ;
return ;
}
@@ -132,9 +141,15 @@ sub TIEHASH
{
my $pkg = shift ;
- bless { VALID => { map {$_, 1}
- qw( flags cachesize maxkeypage minkeypage psize
- compare prefix lorder )
+ bless { VALID => {
+ flags => 1,
+ cachesize => 1,
+ maxkeypage => 1,
+ minkeypage => 1,
+ psize => 1,
+ compare => 2,
+ prefix => 2,
+ lorder => 1,
},
GOT => {},
}, $pkg ;
@@ -145,13 +160,19 @@ package DB_File ;
use warnings;
use strict;
-use vars qw($VERSION @ISA @EXPORT $AUTOLOAD $DB_BTREE $DB_HASH $DB_RECNO
- $db_version $use_XSLoader
- ) ;
+our ($VERSION, @ISA, @EXPORT, $AUTOLOAD, $DB_BTREE, $DB_HASH, $DB_RECNO);
+our ($db_version, $use_XSLoader, $splice_end_array);
use Carp;
-$VERSION = "1.76" ;
+$VERSION = "1.805" ;
+
+{
+ local $SIG{__WARN__} = sub {$splice_end_array = "@_";};
+ my @a =(1); splice(@a, 3);
+ $splice_end_array =
+ ($splice_end_array =~ /^splice\(\) offset past end of array at /);
+}
#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
$DB_BTREE = new DB_File::BTREEINFO ;
@@ -163,7 +184,7 @@ require Exporter;
use AutoLoader;
BEGIN {
$use_XSLoader = 1 ;
- eval { require XSLoader } ;
+ { local $SIG{__DIE__} ; eval { require XSLoader } ; }
if ($@) {
$use_XSLoader = 0 ;
@@ -210,21 +231,12 @@ push @ISA, qw(Tie::Hash Exporter);
sub AUTOLOAD {
my($constname);
($constname = $AUTOLOAD) =~ s/.*:://;
- my $val = constant($constname, @_ ? $_[0] : 0);
- if ($! != 0) {
- if ($! =~ /Invalid/ || $!{EINVAL}) {
- $AutoLoader::AUTOLOAD = $AUTOLOAD;
- goto &AutoLoader::AUTOLOAD;
- }
- else {
- my($pack,$file,$line) = caller;
- croak "Your vendor has not defined DB macro $constname, used at $file line $line.
-";
- }
- }
- eval "sub $AUTOLOAD { $val }";
- goto &$AUTOLOAD;
-}
+ my ($error, $val) = constant($constname);
+ Carp::croak $error if $error;
+ no strict 'refs';
+ *{$AUTOLOAD} = sub { $val };
+ goto &{$AUTOLOAD};
+}
eval {
@@ -251,6 +263,9 @@ sub tie_hash_or_array
$arg[4] = tied %{ $arg[4] }
if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
+ $arg[2] = O_CREAT()|O_RDWR() if @arg >=3 && ! defined $arg[2];
+ $arg[3] = 0666 if @arg >=4 && ! defined $arg[3];
+
# make recno in Berkeley DB version 2 work like recno in version 1.
if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and
$arg[1] and ! -e $arg[1]) {
@@ -307,6 +322,173 @@ sub STORESIZE
}
}
+
+sub SPLICE
+{
+ my $self = shift;
+ my $offset = shift;
+ if (not defined $offset) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $offset = 0;
+ }
+
+ my $length = @_ ? shift : 0;
+ # Carping about definedness comes _after_ the OFFSET sanity check.
+ # This is so we get the same error messages as Perl's splice().
+ #
+
+ my @list = @_;
+
+ my $size = $self->FETCHSIZE();
+
+ # 'If OFFSET is negative then it start that far from the end of
+ # the array.'
+ #
+ if ($offset < 0) {
+ my $new_offset = $size + $offset;
+ if ($new_offset < 0) {
+ die "Modification of non-creatable array value attempted, "
+ . "subscript $offset";
+ }
+ $offset = $new_offset;
+ }
+
+ if (not defined $length) {
+ warnings::warnif('uninitialized', 'Use of uninitialized value in splice');
+ $length = 0;
+ }
+
+ if ($offset > $size) {
+ $offset = $size;
+ warnings::warnif('misc', 'splice() offset past end of array')
+ if $splice_end_array;
+ }
+
+ # 'If LENGTH is omitted, removes everything from OFFSET onward.'
+ if (not defined $length) {
+ $length = $size - $offset;
+ }
+
+ # 'If LENGTH is negative, leave that many elements off the end of
+ # the array.'
+ #
+ if ($length < 0) {
+ $length = $size - $offset + $length;
+
+ if ($length < 0) {
+ # The user must have specified a length bigger than the
+ # length of the array passed in. But perl's splice()
+ # doesn't catch this, it just behaves as for length=0.
+ #
+ $length = 0;
+ }
+ }
+
+ if ($length > $size - $offset) {
+ $length = $size - $offset;
+ }
+
+ # $num_elems holds the current number of elements in the database.
+ my $num_elems = $size;
+
+ # 'Removes the elements designated by OFFSET and LENGTH from an
+ # array,'...
+ #
+ my @removed = ();
+ foreach (0 .. $length - 1) {
+ my $old;
+ my $status = $self->get($offset, $old);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on get($offset, \$old)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+ push @removed, $old;
+
+ $status = $self->del($offset);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on del($offset)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ -- $num_elems;
+ }
+
+ # ...'and replaces them with the elements of LIST, if any.'
+ my $pos = $offset;
+ while (defined (my $elem = shift @list)) {
+ my $old_pos = $pos;
+ my $status;
+ if ($pos >= $num_elems) {
+ $status = $self->put($pos, $elem);
+ }
+ else {
+ $status = $self->put($pos, $elem, $self->R_IBEFORE);
+ }
+
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on put($pos, $elem, ...)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ", error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ die "pos unexpectedly changed from $old_pos to $pos with R_IBEFORE"
+ if $old_pos != $pos;
+
+ ++ $pos;
+ ++ $num_elems;
+ }
+
+ if (wantarray) {
+ # 'In list context, returns the elements removed from the
+ # array.'
+ #
+ return @removed;
+ }
+ elsif (defined wantarray and not wantarray) {
+ # 'In scalar context, returns the last element removed, or
+ # undef if no elements are removed.'
+ #
+ if (@removed) {
+ my $last = pop @removed;
+ return "$last";
+ }
+ else {
+ return undef;
+ }
+ }
+ elsif (not defined wantarray) {
+ # Void context
+ }
+ else { die }
+}
+sub ::DB_File::splice { &SPLICE }
+
sub find_dup
{
croak "Usage: \$db->find_dup(key,value)\n"
@@ -388,8 +570,8 @@ DB_File - Perl5 access to Berkeley DB version 1.x
=head1 SYNOPSIS
- use DB_File ;
-
+ use DB_File;
+
[$X =] tie %hash, 'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
[$X =] tie %hash, 'DB_File', $filename, $flags, $mode, $DB_BTREE ;
[$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
@@ -414,6 +596,7 @@ DB_File - Perl5 access to Berkeley DB version 1.x
$X->push(list);
$a = $X->shift;
$X->unshift(list);
+ @r = $X->splice(offset, length, elements);
# DBM Filters
$old_filter = $db->filter_store_key ( sub { ... } ) ;
@@ -428,7 +611,7 @@ DB_File - Perl5 access to Berkeley DB version 1.x
B<DB_File> is a module which allows Perl programs to make use of the
facilities provided by Berkeley DB version 1.x (if you have a newer
-version of DB, see L<Using DB_File with Berkeley DB version 2 or 3>).
+version of DB, see L<Using DB_File with Berkeley DB version 2 or greater>).
It is assumed that you have a copy of the Berkeley DB manual pages at
hand when reading this documentation. The interface defined here
mirrors the Berkeley DB interface closely.
@@ -472,27 +655,27 @@ number.
=back
-=head2 Using DB_File with Berkeley DB version 2 or 3
+=head2 Using DB_File with Berkeley DB version 2 or greater
Although B<DB_File> is intended to be used with Berkeley DB version 1,
-it can also be used with version 2.or 3 In this case the interface is
+it can also be used with version 2, 3 or 4. In this case the interface is
limited to the functionality provided by Berkeley DB 1.x. Anywhere the
-version 2 or 3 interface differs, B<DB_File> arranges for it to work
+version 2 or greater interface differs, B<DB_File> arranges for it to work
like version 1. This feature allows B<DB_File> scripts that were built
-with version 1 to be migrated to version 2 or 3 without any changes.
+with version 1 to be migrated to version 2 or greater without any changes.
If you want to make use of the new features available in Berkeley DB
2.x or greater, use the Perl module B<BerkeleyDB> instead.
-B<Note:> The database file format has changed in both Berkeley DB
-version 2 and 3. If you cannot recreate your databases, you must dump
-any existing databases with the C<db_dump185> utility that comes with
-Berkeley DB.
-Once you have rebuilt DB_File to use Berkeley DB version 2 or 3, your
-databases can be recreated using C<db_load>. Refer to the Berkeley DB
+B<Note:> The database file format has changed multiple times in Berkeley
+DB version 2, 3 and 4. If you cannot recreate your databases, you
+must dump any existing databases with either the C<db_dump> or the
+C<db_dump185> utility that comes with Berkeley DB.
+Once you have rebuilt DB_File to use Berkeley DB version 2 or greater,
+your databases can be recreated using C<db_load>. Refer to the Berkeley DB
documentation for further details.
-Please read L<"COPYRIGHT"> before using version 2.x or 3.x of Berkeley
+Please read L<"COPYRIGHT"> before using version 2.x or greater of Berkeley
DB with DB_File.
=head2 Interface to Berkeley DB
@@ -672,10 +855,10 @@ contents of the database.
use warnings ;
use strict ;
use DB_File ;
- use vars qw( %h $k $v ) ;
+ our (%h, $k, $v) ;
unlink "fruit" ;
- tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0666, $DB_HASH
or die "Cannot open file 'fruit': $!\n";
# Add a few key/value pairs to the file
@@ -699,7 +882,7 @@ contents of the database.
here is the output:
Banana Exists
-
+
orange -> orange
tomato -> red
banana -> yellow
@@ -736,7 +919,7 @@ insensitive compare function will be used.
$DB_BTREE->{'compare'} = \&Compare ;
unlink "tree" ;
- tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open file 'tree': $!\n" ;
# Add a key/value pair to the file
@@ -777,6 +960,35 @@ You cannot change the ordering once the database has been created. Thus
you must use the same compare function every time you access the
database.
+=item 3
+
+Duplicate keys are entirely defined by the comparison function.
+In the case-insensitive example above, the keys: 'KEY' and 'key'
+would be considered duplicates, and assigning to the second one
+would overwrite the first. If duplicates are allowed for (with the
+R_DUPS flag discussed below), only a single copy of duplicate keys
+is stored in the database --- so (again with example above) assigning
+three values to the keys: 'KEY', 'Key', and 'key' would leave just
+the first key: 'KEY' in the database with three values. For some
+situations this results in information loss, so care should be taken
+to provide fully qualified comparison functions when necessary.
+For example, the above comparison routine could be modified to
+additionally compare case-sensitively if two keys are equal in the
+case insensitive comparison:
+
+ sub compare {
+ my($key1, $key2) = @_;
+ lc $key1 cmp lc $key2 ||
+ $key1 cmp $key2;
+ }
+
+And now you will only have duplicates when the keys themselves
+are truly the same. (note: in versions of the db library prior to
+about November 1996, such duplicate keys were retained so it was
+possible to recover the original keys in sets of keys that
+compared as equal).
+
+
=back
=head2 Handling Duplicate Keys
@@ -793,17 +1005,17 @@ code:
use strict ;
use DB_File ;
- use vars qw($filename %h ) ;
+ my ($filename, %h) ;
$filename = "tree" ;
unlink $filename ;
-
+
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
-
- tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
-
+
# Add some key/value pairs to the file
$h{'Wall'} = 'Larry' ;
$h{'Wall'} = 'Brick' ; # Note the duplicate key
@@ -847,25 +1059,25 @@ Here is the script above rewritten using the C<seq> API method.
use warnings ;
use strict ;
use DB_File ;
-
- use vars qw($filename $x %h $status $key $value) ;
+
+ my ($filename, $x, %h, $status, $key, $value) ;
$filename = "tree" ;
unlink $filename ;
-
+
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
-
- $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
-
+
# Add some key/value pairs to the file
$h{'Wall'} = 'Larry' ;
$h{'Wall'} = 'Brick' ; # Note the duplicate key
$h{'Wall'} = 'Brick' ; # Note the duplicate key and value
$h{'Smith'} = 'John' ;
$h{'mouse'} = 'mickey' ;
-
+
# iterate through the btree using seq
# and print each key/value pair.
$key = $value = 0 ;
@@ -873,7 +1085,7 @@ Here is the script above rewritten using the C<seq> API method.
$status == 0 ;
$status = $x->seq($key, $value, R_NEXT) )
{ print "$key -> $value\n" }
-
+
undef $x ;
untie %h ;
@@ -919,15 +1131,15 @@ this:
use warnings ;
use strict ;
use DB_File ;
-
- use vars qw($filename $x %h ) ;
+
+ my ($filename, $x, %h) ;
$filename = "tree" ;
-
+
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
-
- $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
my $cnt = $x->get_dup("Wall") ;
@@ -942,7 +1154,7 @@ this:
@list = $x->get_dup("Smith") ;
print "Smith => [@list]\n" ;
-
+
@list = $x->get_dup("Dog") ;
print "Dog => [@list]\n" ;
@@ -969,23 +1181,23 @@ Assuming the database from the previous example:
use warnings ;
use strict ;
use DB_File ;
-
- use vars qw($filename $x %h $found) ;
- my $filename = "tree" ;
-
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
-
- $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
$found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
print "Larry Wall is $found there\n" ;
-
+
$found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
print "Harry Wall is $found there\n" ;
-
+
undef $x ;
untie %h ;
@@ -1008,22 +1220,22 @@ Again assuming the existence of the C<tree> database
use warnings ;
use strict ;
use DB_File ;
-
- use vars qw($filename $x %h $found) ;
- my $filename = "tree" ;
-
+ my ($filename, $x, %h, $found) ;
+
+ $filename = "tree" ;
+
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
-
- $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
$x->del_dup("Wall", "Larry") ;
$found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
print "Larry Wall is $found there\n" ;
-
+
undef $x ;
untie %h ;
@@ -1055,7 +1267,7 @@ and print the first matching key/value pair given a partial key.
use DB_File ;
use Fcntl ;
- use vars qw($filename $x %h $st $key $value) ;
+ my ($filename, $x, %h, $st, $key, $value) ;
sub match
{
@@ -1069,24 +1281,24 @@ and print the first matching key/value pair given a partial key.
$filename = "tree" ;
unlink $filename ;
- $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
or die "Cannot open $filename: $!\n";
-
+
# Add some key/value pairs to the file
$h{'mouse'} = 'mickey' ;
$h{'Wall'} = 'Larry' ;
$h{'Walls'} = 'Brick' ;
$h{'Smith'} = 'John' ;
-
+
$key = $value = 0 ;
print "IN ORDER\n" ;
for ($st = $x->seq($key, $value, R_FIRST) ;
$st == 0 ;
$st = $x->seq($key, $value, R_NEXT) )
-
+
{ print "$key -> $value\n" }
-
+
print "\nPARTIAL MATCH\n" ;
match "Wa" ;
@@ -1149,6 +1361,9 @@ That means that you can specify other options (e.g. cachesize) and
still have bval default to C<"\n"> for variable length records, and
space for fixed length records.
+Also note that the bval option only allows you to specify a single byte
+as a delimeter.
+
=head2 A Simple Example
Here is a simple example that uses RECNO (if you are using a version
@@ -1163,7 +1378,7 @@ L<Extra RECNO Methods> for a workaround).
unlink $filename ;
my @h ;
- tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_RECNO
or die "Cannot open file 'text': $!\n" ;
# Add a few key/value pairs to the file
@@ -1237,6 +1452,10 @@ Pushes the elements of C<list> to the start of the array.
Returns the number of elements in the array.
+=item B<$X-E<gt>splice(offset, length, elements);>
+
+Returns a splice of the the array.
+
=back
=head2 Another Example
@@ -1247,17 +1466,17 @@ L<THE API INTERFACE>).
use warnings ;
use strict ;
- use vars qw(@h $H $file $i) ;
+ my (@h, $H, $file, $i) ;
use DB_File ;
use Fcntl ;
-
+
$file = "text" ;
unlink $file ;
- $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0666, $DB_RECNO
or die "Cannot open file $file: $!\n" ;
-
+
# first create a text file to play with
$h[0] = "zero" ;
$h[1] = "one" ;
@@ -1265,7 +1484,7 @@ L<THE API INTERFACE>).
$h[3] = "three" ;
$h[4] = "four" ;
-
+
# Print the records in order.
#
# The length method is needed here because evaluating a tied
@@ -1675,7 +1894,7 @@ peril!
The locking technique went like this.
- $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0644)
+ $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0666)
|| die "dbcreat /tmp/foo.db $!";
$fd = $db->fd;
open(DB_FH, "+<&=$fd") || die "dup $!";
@@ -1812,7 +2031,7 @@ F<authors/id/TOMC/scripts/nshist.gz>).
use DB_File ;
use Fcntl ;
- use vars qw( $dotdir $HISTORY %hist_db $href $binary_time $date ) ;
+ my ($dotdir, $HISTORY, %hist_db, $href, $binary_time, $date) ;
$dotdir = $ENV{HOME} || $ENV{LOGNAME};
$HISTORY = "$dotdir/.netscape/history.db";
@@ -1967,7 +2186,7 @@ Consider this script:
use warnings ;
use strict ;
use DB_File ;
- use vars qw(%x) ;
+ my %x ;
tie %x, DB_File, "filename" ;
Running it produces the error in question:
@@ -2033,7 +2252,7 @@ compile properly on IRIX 5.3.
=head1 COPYRIGHT
-Copyright (c) 1995-1999 Paul Marquess. All rights reserved. This program
+Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This program
is free software; you can redistribute it and/or modify it under the
same terms as Perl itself.
diff --git a/bdb/perl.DB_File/DB_File.xs b/bdb/perl/DB_File/DB_File.xs
index 6811342066d..fba8dede791 100644
--- a/bdb/perl.DB_File/DB_File.xs
+++ b/bdb/perl/DB_File/DB_File.xs
@@ -3,12 +3,12 @@
DB_File.xs -- Perl 5 interface to Berkeley DB
written by Paul Marquess <Paul.Marquess@btinternet.com>
- last modified 15th January 2001
- version 1.76
+ last modified 1st September 2002
+ version 1.805
All comments/suggestions/problems are welcome
- Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
@@ -91,43 +91,42 @@
Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
needed to be changed.
1.76 - No change to DB_File.xs
+ 1.77 - Tidied up a few types used in calling newSVpvn.
+ 1.78 - Core patch 10335, 10372, 10534, 10549, 11051 included.
+ 1.79 - NEXTKEY ignores the input key.
+ Added lots of casts
+ 1.800 - Moved backward compatability code into ppport.h.
+ Use the new constants code.
+ 1.801 - No change to DB_File.xs
+ 1.802 - No change to DB_File.xs
+ 1.803 - FETCH, STORE & DELETE don't map the flags parameter
+ into the equivalent Berkeley DB function anymore.
+ 1.804 - no change.
+ 1.805 - recursion detection added to the callbacks
+ Support for 4.1.X added.
+ Filter code can now cope with read-only $_
*/
+#define PERL_NO_GET_CONTEXT
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
-#ifndef PERL_VERSION
-# include "patchlevel.h"
-# define PERL_REVISION 5
-# define PERL_VERSION PATCHLEVEL
-# define PERL_SUBVERSION SUBVERSION
+#ifdef _NOT_CORE
+# include "ppport.h"
#endif
-#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
-
-# define PL_sv_undef sv_undef
-# define PL_na na
-
-#endif
-
-/* DEFSV appears first in 5.004_56 */
-#ifndef DEFSV
-# define DEFSV GvSV(defgv)
-#endif
+/* Mention DB_VERSION_MAJOR_CFG, DB_VERSION_MINOR_CFG, and
+ DB_VERSION_PATCH_CFG here so that Configure pulls them all in. */
/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
* shortly #included by the <db.h>) __attribute__ to the possibly
* already defined __attribute__, for example by GNUC or by Perl. */
-#undef __attribute__
-
-/* If Perl has been compiled with Threads support,the symbol op will
- be defined here. This clashes with a field name in db.h, so get rid of it.
- */
-#ifdef op
-# undef op
+/* #if DB_VERSION_MAJOR_CFG < 2 */
+#ifndef DB_VERSION_MAJOR
+# undef __attribute__
#endif
#ifdef COMPAT185
@@ -136,25 +135,37 @@
# include <db.h>
#endif
-#ifdef CAN_PROTOTYPE
-extern void __getBerkeleyDBInfo(void);
-#endif
+/* Wall starts with 5.7.x */
-#ifndef pTHX
-# define pTHX
-# define pTHX_
-# define aTHX
-# define aTHX_
-#endif
+#if PERL_REVISION > 5 || (PERL_REVISION == 5 && PERL_VERSION >= 7)
-#ifndef newSVpvn
-# define newSVpvn(a,b) newSVpv(a,b)
-#endif
+/* Since we dropped the gccish definition of __attribute__ we will want
+ * to redefine dNOOP, however (so that dTHX continues to work). Yes,
+ * all this means that we can't do attribute checking on the DB_File,
+ * boo, hiss. */
+# ifndef DB_VERSION_MAJOR
+
+# undef dNOOP
+# define dNOOP extern int Perl___notused
+
+ /* Ditto for dXSARGS. */
+# undef dXSARGS
+# define dXSARGS \
+ dSP; dMARK; \
+ I32 ax = mark - PL_stack_base + 1; \
+ I32 items = sp - mark
+
+# endif
+
+/* avoid -Wall; DB_File xsubs never make use of `ix' setup for ALIASes */
+# undef dXSI32
+# define dXSI32 dNOOP
+
+#endif /* Perl >= 5.7 */
#include <fcntl.h>
/* #define TRACE */
-#define DBM_FILTERING
#ifdef TRACE
# define Trace(x) printf x
@@ -175,6 +186,10 @@ extern void __getBerkeleyDBInfo(void);
# define AT_LEAST_DB_3_2
#endif
+#if DB_VERSION_MAJOR > 4 || (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_4_1
+#endif
+
/* map version 2 features & constants onto their version 1 equivalent */
#ifdef DB_Prefix_t
@@ -319,16 +334,16 @@ typedef union INFO {
-#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, flags)
-#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, flags)
-#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, 0)
+#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, 0)
+#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, 0)
#define db_sync(db, flags) ((db->dbp)->sync)(db->dbp, flags)
#define db_get(db, key, value, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
#ifdef DB_VERSION_MAJOR
-#define db_DESTROY(db) ( db->cursor->c_close(db->cursor),\
- (db->dbp->close)(db->dbp, 0) )
+#define db_DESTROY(db) (!db->aborted && ( db->cursor->c_close(db->cursor),\
+ (db->dbp->close)(db->dbp, 0) ))
#define db_close(db) ((db->dbp)->close)(db->dbp, 0)
#define db_del(db, key, flags) (flagSet(flags, R_CURSOR) \
? ((db->cursor)->c_del)(db->cursor, 0) \
@@ -336,7 +351,7 @@ typedef union INFO {
#else /* ! DB_VERSION_MAJOR */
-#define db_DESTROY(db) ((db->dbp)->close)(db->dbp)
+#define db_DESTROY(db) (!db->aborted && ((db->dbp)->close)(db->dbp))
#define db_close(db) ((db->dbp)->close)(db->dbp)
#define db_del(db, key, flags) ((db->dbp)->del)(db->dbp, &key, flags)
#define db_put(db, key, value, flags) ((db->dbp)->put)(db->dbp, &key, &value, flags)
@@ -350,8 +365,12 @@ typedef struct {
DBTYPE type ;
DB * dbp ;
SV * compare ;
+ bool in_compare ;
SV * prefix ;
+ bool in_prefix ;
SV * hash ;
+ bool in_hash ;
+ bool aborted ;
int in_memory ;
#ifdef BERKELEY_DB_1_OR_2
INFO info ;
@@ -359,51 +378,25 @@ typedef struct {
#ifdef DB_VERSION_MAJOR
DBC * cursor ;
#endif
-#ifdef DBM_FILTERING
SV * filter_fetch_key ;
SV * filter_store_key ;
SV * filter_fetch_value ;
SV * filter_store_value ;
int filtering ;
-#endif /* DBM_FILTERING */
} DB_File_type;
typedef DB_File_type * DB_File ;
typedef DBT DBTKEY ;
-#ifdef DBM_FILTERING
-
-#define ckFilter(arg,type,name) \
- if (db->type) { \
- SV * save_defsv ; \
- /* printf("filtering %s\n", name) ;*/ \
- if (db->filtering) \
- croak("recursion detected in %s", name) ; \
- db->filtering = TRUE ; \
- save_defsv = newSVsv(DEFSV) ; \
- sv_setsv(DEFSV, arg) ; \
- PUSHMARK(sp) ; \
- (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
- sv_setsv(arg, DEFSV) ; \
- sv_setsv(DEFSV, save_defsv) ; \
- SvREFCNT_dec(save_defsv) ; \
- db->filtering = FALSE ; \
- /*printf("end of filtering %s\n", name) ;*/ \
- }
-
-#else
-
-#define ckFilter(arg,type, name)
-
-#endif /* DBM_FILTERING */
-
#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
#define OutputValue(arg, name) \
{ if (RETVAL == 0) { \
my_sv_setpvn(arg, name.data, name.size) ; \
- ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
} \
}
@@ -415,16 +408,37 @@ typedef DBT DBTKEY ;
} \
else \
sv_setiv(arg, (I32)*(I32*)name.data - 1); \
- ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
+ TAINT; \
+ SvTAINTED_on(arg); \
+ DBM_ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
} \
}
+#define my_SvUV32(sv) ((u_int32_t)SvUV(sv))
+
+#ifdef CAN_PROTOTYPE
+extern void __getBerkeleyDBInfo(void);
+#endif
/* Internal Global Data */
-static recno_t Value ;
-static recno_t zero = 0 ;
-static DB_File CurrentDB ;
-static DBTKEY empty ;
+
+#define MY_CXT_KEY "DB_File::_guts" XS_VERSION
+
+typedef struct {
+ recno_t x_Value;
+ recno_t x_zero;
+ DB_File x_CurrentDB;
+ DBTKEY x_empty;
+} my_cxt_t;
+
+START_MY_CXT
+
+#define Value (MY_CXT.x_Value)
+#define zero (MY_CXT.x_zero)
+#define CurrentDB (MY_CXT.x_CurrentDB)
+#define empty (MY_CXT.x_empty)
+
+#define ERR_BUFF "DB_File::Error"
#ifdef DB_VERSION_MAJOR
@@ -488,6 +502,13 @@ u_int flags ;
#endif /* DB_VERSION_MAJOR */
+static void
+tidyUp(DB_File db)
+{
+ /* db_DESTROY(db); */
+ db->aborted = TRUE ;
+}
+
static int
#ifdef AT_LEAST_DB_3_2
@@ -518,12 +539,20 @@ const DBT * key2 ;
dTHX;
#endif
dSP ;
+ dMY_CXT ;
void * data1, * data2 ;
int retval ;
int count ;
+ DB_File keep_CurrentDB = CurrentDB;
- data1 = key1->data ;
- data2 = key2->data ;
+
+ if (CurrentDB->in_compare) {
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_compare: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
#ifndef newSVpvn
/* As newSVpv will assume that the data pointer is a null terminated C
@@ -545,18 +574,26 @@ const DBT * key2 ;
PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
PUTBACK ;
+ CurrentDB->in_compare = TRUE;
+
count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_compare = FALSE;
+
SPAGAIN ;
- if (count != 1)
+ if (count != 1){
+ tidyUp(CurrentDB);
croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
+ }
retval = POPi ;
PUTBACK ;
FREETMPS ;
LEAVE ;
+
return (retval) ;
}
@@ -589,12 +626,19 @@ const DBT * key2 ;
dTHX;
#endif
dSP ;
- void * data1, * data2 ;
+ dMY_CXT ;
+ char * data1, * data2 ;
int retval ;
int count ;
+ DB_File keep_CurrentDB = CurrentDB;
- data1 = key1->data ;
- data2 = key2->data ;
+ if (CurrentDB->in_prefix){
+ tidyUp(CurrentDB);
+ croak ("DB_File btree_prefix: recursion detected\n") ;
+ }
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
#ifndef newSVpvn
/* As newSVpv will assume that the data pointer is a null terminated C
@@ -616,12 +660,19 @@ const DBT * key2 ;
PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
PUTBACK ;
+ CurrentDB->in_prefix = TRUE;
+
count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_prefix = FALSE;
+
SPAGAIN ;
- if (count != 1)
+ if (count != 1){
+ tidyUp(CurrentDB);
croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
+ }
retval = POPi ;
@@ -667,8 +718,15 @@ HASH_CB_SIZE_TYPE size ;
dTHX;
#endif
dSP ;
+ dMY_CXT;
int retval ;
int count ;
+ DB_File keep_CurrentDB = CurrentDB;
+
+ if (CurrentDB->in_hash){
+ tidyUp(CurrentDB);
+ croak ("DB_File hash callback: recursion detected\n") ;
+ }
#ifndef newSVpvn
if (size == 0)
@@ -684,12 +742,19 @@ HASH_CB_SIZE_TYPE size ;
XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
PUTBACK ;
+ keep_CurrentDB->in_hash = TRUE;
+
count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+ CurrentDB = keep_CurrentDB;
+ CurrentDB->in_hash = FALSE;
+
SPAGAIN ;
- if (count != 1)
+ if (count != 1){
+ tidyUp(CurrentDB);
croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
+ }
retval = POPi ;
@@ -700,6 +765,23 @@ HASH_CB_SIZE_TYPE size ;
return (retval) ;
}
+static void
+#ifdef CAN_PROTOTYPE
+db_errcall_cb(const char * db_errpfx, char * buffer)
+#else
+db_errcall_cb(db_errpfx, buffer)
+const char * db_errpfx;
+char * buffer;
+#endif
+{
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
@@ -809,8 +891,10 @@ I32 value ;
I32 length = GetArrayLength(aTHX_ db) ;
/* check for attempt to write before start of array */
- if (length + value + 1 <= 0)
+ if (length + value + 1 <= 0) {
+ tidyUp(db);
croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+ }
value = length + value + 1 ;
}
@@ -842,16 +926,15 @@ SV * sv ;
void * openinfo = NULL ;
INFO * info = &RETVAL->info ;
STRLEN n_a;
+ dMY_CXT;
/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
Zero(RETVAL, 1, DB_File_type) ;
/* Default to HASH */
-#ifdef DBM_FILTERING
RETVAL->filtering = 0 ;
RETVAL->filter_fetch_key = RETVAL->filter_store_key =
RETVAL->filter_fetch_value = RETVAL->filter_store_value =
-#endif /* DBM_FILTERING */
RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
RETVAL->type = DB_HASH ;
@@ -1115,16 +1198,15 @@ SV * sv ;
DB * dbp ;
STRLEN n_a;
int status ;
+ dMY_CXT;
/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
Zero(RETVAL, 1, DB_File_type) ;
/* Default to HASH */
-#ifdef DBM_FILTERING
RETVAL->filtering = 0 ;
RETVAL->filter_fetch_key = RETVAL->filter_store_key =
RETVAL->filter_fetch_value = RETVAL->filter_store_value =
-#endif /* DBM_FILTERING */
RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
RETVAL->type = DB_HASH ;
@@ -1171,23 +1253,23 @@ SV * sv ;
svp = hv_fetch(action, "ffactor", 7, FALSE);
if (svp)
- (void)dbp->set_h_ffactor(dbp, SvIV(*svp)) ;
+ (void)dbp->set_h_ffactor(dbp, my_SvUV32(*svp)) ;
svp = hv_fetch(action, "nelem", 5, FALSE);
if (svp)
- (void)dbp->set_h_nelem(dbp, SvIV(*svp)) ;
+ (void)dbp->set_h_nelem(dbp, my_SvUV32(*svp)) ;
svp = hv_fetch(action, "bsize", 5, FALSE);
if (svp)
- (void)dbp->set_pagesize(dbp, SvIV(*svp));
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp));
svp = hv_fetch(action, "cachesize", 9, FALSE);
if (svp)
- (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
svp = hv_fetch(action, "lorder", 6, FALSE);
if (svp)
- (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
PrintHash(info) ;
}
@@ -1214,19 +1296,19 @@ SV * sv ;
svp = hv_fetch(action, "flags", 5, FALSE);
if (svp)
- (void)dbp->set_flags(dbp, SvIV(*svp)) ;
+ (void)dbp->set_flags(dbp, my_SvUV32(*svp)) ;
svp = hv_fetch(action, "cachesize", 9, FALSE);
if (svp)
- (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+ (void)dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
svp = hv_fetch(action, "psize", 5, FALSE);
if (svp)
- (void)dbp->set_pagesize(dbp, SvIV(*svp)) ;
+ (void)dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
svp = hv_fetch(action, "lorder", 6, FALSE);
if (svp)
- (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+ (void)dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
PrintBtree(info) ;
@@ -1252,17 +1334,17 @@ SV * sv ;
svp = hv_fetch(action, "cachesize", 9, FALSE);
if (svp) {
- status = dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+ status = dbp->set_cachesize(dbp, 0, my_SvUV32(*svp), 0) ;
}
svp = hv_fetch(action, "psize", 5, FALSE);
if (svp) {
- status = dbp->set_pagesize(dbp, SvIV(*svp)) ;
+ status = dbp->set_pagesize(dbp, my_SvUV32(*svp)) ;
}
svp = hv_fetch(action, "lorder", 6, FALSE);
if (svp) {
- status = dbp->set_lorder(dbp, SvIV(*svp)) ;
+ status = dbp->set_lorder(dbp, (int)SvIV(*svp)) ;
}
svp = hv_fetch(action, "bval", 4, FALSE);
@@ -1272,7 +1354,7 @@ SV * sv ;
if (SvPOK(*svp))
value = (int)*SvPV(*svp, n_a) ;
else
- value = SvIV(*svp) ;
+ value = (int)SvIV(*svp) ;
if (fixed) {
status = dbp->set_re_pad(dbp, value) ;
@@ -1286,7 +1368,7 @@ SV * sv ;
if (fixed) {
svp = hv_fetch(action, "reclen", 6, FALSE);
if (svp) {
- u_int32_t len = (u_int32_t)SvIV(*svp) ;
+ u_int32_t len = my_SvUV32(*svp) ;
status = dbp->set_re_len(dbp, len) ;
}
}
@@ -1305,10 +1387,10 @@ SV * sv ;
name = NULL ;
- status = dbp->set_flags(dbp, DB_RENUMBER) ;
+ status = dbp->set_flags(dbp, (u_int32_t)DB_RENUMBER) ;
if (flags){
- (void)dbp->set_flags(dbp, flags) ;
+ (void)dbp->set_flags(dbp, (u_int32_t)flags) ;
}
PrintRecno(info) ;
}
@@ -1317,7 +1399,7 @@ SV * sv ;
}
{
- int Flags = 0 ;
+ u_int32_t Flags = 0 ;
int status ;
/* Map 1.x flags to 3.x flags */
@@ -1336,14 +1418,22 @@ SV * sv ;
Flags |= DB_TRUNCATE ;
#endif
+#ifdef AT_LEAST_DB_4_1
+ status = (RETVAL->dbp->open)(RETVAL->dbp, NULL, name, NULL, RETVAL->type,
+ Flags, mode) ;
+#else
status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type,
Flags, mode) ;
+#endif
/* printf("open returned %d %s\n", status, db_strerror(status)) ; */
- if (status == 0)
+ if (status == 0) {
+ RETVAL->dbp->set_errcall(RETVAL->dbp, db_errcall_cb) ;
+
status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
0) ;
- /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+ /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+ }
if (status)
RETVAL->dbp = NULL ;
@@ -1357,246 +1447,16 @@ SV * sv ;
} /* ParseOpenInfo */
-static double
-#ifdef CAN_PROTOTYPE
-constant(char *name, int arg)
-#else
-constant(name, arg)
-char *name;
-int arg;
-#endif
-{
- errno = 0;
- switch (*name) {
- case 'A':
- break;
- case 'B':
- if (strEQ(name, "BTREEMAGIC"))
-#ifdef BTREEMAGIC
- return BTREEMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "BTREEVERSION"))
-#ifdef BTREEVERSION
- return BTREEVERSION;
-#else
- goto not_there;
-#endif
- break;
- case 'C':
- break;
- case 'D':
- if (strEQ(name, "DB_LOCK"))
-#ifdef DB_LOCK
- return DB_LOCK;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_SHMEM"))
-#ifdef DB_SHMEM
- return DB_SHMEM;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "DB_TXN"))
-#ifdef DB_TXN
- return (U32)DB_TXN;
-#else
- goto not_there;
-#endif
- break;
- case 'E':
- break;
- case 'F':
- break;
- case 'G':
- break;
- case 'H':
- if (strEQ(name, "HASHMAGIC"))
-#ifdef HASHMAGIC
- return HASHMAGIC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "HASHVERSION"))
-#ifdef HASHVERSION
- return HASHVERSION;
-#else
- goto not_there;
-#endif
- break;
- case 'I':
- break;
- case 'J':
- break;
- case 'K':
- break;
- case 'L':
- break;
- case 'M':
- if (strEQ(name, "MAX_PAGE_NUMBER"))
-#ifdef MAX_PAGE_NUMBER
- return (U32)MAX_PAGE_NUMBER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "MAX_PAGE_OFFSET"))
-#ifdef MAX_PAGE_OFFSET
- return MAX_PAGE_OFFSET;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "MAX_REC_NUMBER"))
-#ifdef MAX_REC_NUMBER
- return (U32)MAX_REC_NUMBER;
-#else
- goto not_there;
-#endif
- break;
- case 'N':
- break;
- case 'O':
- break;
- case 'P':
- break;
- case 'Q':
- break;
- case 'R':
- if (strEQ(name, "RET_ERROR"))
-#ifdef RET_ERROR
- return RET_ERROR;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "RET_SPECIAL"))
-#ifdef RET_SPECIAL
- return RET_SPECIAL;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "RET_SUCCESS"))
-#ifdef RET_SUCCESS
- return RET_SUCCESS;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_CURSOR"))
-#ifdef R_CURSOR
- return R_CURSOR;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_DUP"))
-#ifdef R_DUP
- return R_DUP;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_FIRST"))
-#ifdef R_FIRST
- return R_FIRST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_FIXEDLEN"))
-#ifdef R_FIXEDLEN
- return R_FIXEDLEN;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_IAFTER"))
-#ifdef R_IAFTER
- return R_IAFTER;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_IBEFORE"))
-#ifdef R_IBEFORE
- return R_IBEFORE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_LAST"))
-#ifdef R_LAST
- return R_LAST;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_NEXT"))
-#ifdef R_NEXT
- return R_NEXT;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_NOKEY"))
-#ifdef R_NOKEY
- return R_NOKEY;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_NOOVERWRITE"))
-#ifdef R_NOOVERWRITE
- return R_NOOVERWRITE;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_PREV"))
-#ifdef R_PREV
- return R_PREV;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_RECNOSYNC"))
-#ifdef R_RECNOSYNC
- return R_RECNOSYNC;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_SETCURSOR"))
-#ifdef R_SETCURSOR
- return R_SETCURSOR;
-#else
- goto not_there;
-#endif
- if (strEQ(name, "R_SNAPSHOT"))
-#ifdef R_SNAPSHOT
- return R_SNAPSHOT;
-#else
- goto not_there;
-#endif
- break;
- case 'S':
- break;
- case 'T':
- break;
- case 'U':
- break;
- case 'V':
- break;
- case 'W':
- break;
- case 'X':
- break;
- case 'Y':
- break;
- case 'Z':
- break;
- case '_':
- break;
- }
- errno = EINVAL;
- return 0;
-
-not_there:
- errno = ENOENT;
- return 0;
-}
+#include "constants.h"
MODULE = DB_File PACKAGE = DB_File PREFIX = db_
+INCLUDE: constants.xs
+
BOOT:
{
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ MY_CXT_INIT;
__getBerkeleyDBInfo() ;
DBT_clear(empty) ;
@@ -1604,10 +1464,6 @@ BOOT:
empty.size = sizeof(recno_t) ;
}
-double
-constant(name,arg)
- char * name
- int arg
DB_File
@@ -1638,16 +1494,19 @@ db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_H
int
db_DESTROY(db)
DB_File db
+ PREINIT:
+ dMY_CXT;
INIT:
CurrentDB = db ;
+ Trace(("DESTROY %p\n", db));
CLEANUP:
+ Trace(("DESTROY %p done\n", db));
if (db->hash)
SvREFCNT_dec(db->hash) ;
if (db->compare)
SvREFCNT_dec(db->compare) ;
if (db->prefix)
SvREFCNT_dec(db->prefix) ;
-#ifdef DBM_FILTERING
if (db->filter_fetch_key)
SvREFCNT_dec(db->filter_fetch_key) ;
if (db->filter_store_key)
@@ -1656,7 +1515,6 @@ db_DESTROY(db)
SvREFCNT_dec(db->filter_fetch_value) ;
if (db->filter_store_value)
SvREFCNT_dec(db->filter_store_value) ;
-#endif /* DBM_FILTERING */
safefree(db) ;
#ifdef DB_VERSION_MAJOR
if (RETVAL > 0)
@@ -1669,6 +1527,8 @@ db_DELETE(db, key, flags=0)
DB_File db
DBTKEY key
u_int flags
+ PREINIT:
+ dMY_CXT;
INIT:
CurrentDB = db ;
@@ -1677,6 +1537,8 @@ int
db_EXISTS(db, key)
DB_File db
DBTKEY key
+ PREINIT:
+ dMY_CXT;
CODE:
{
DBT value ;
@@ -1688,18 +1550,20 @@ db_EXISTS(db, key)
OUTPUT:
RETVAL
-int
+void
db_FETCH(db, key, flags=0)
DB_File db
DBTKEY key
u_int flags
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
CODE:
{
DBT value ;
DBT_clear(value) ;
CurrentDB = db ;
- /* RETVAL = ((db->dbp)->get)(db->dbp, TXN &key, &value, flags) ; */
RETVAL = db_get(db, key, value, flags) ;
ST(0) = sv_newmortal();
OutputValue(ST(0), value)
@@ -1711,13 +1575,18 @@ db_STORE(db, key, value, flags=0)
DBTKEY key
DBT value
u_int flags
+ PREINIT:
+ dMY_CXT;
INIT:
CurrentDB = db ;
-int
+void
db_FIRSTKEY(db)
DB_File db
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
CODE:
{
DBTKEY key ;
@@ -1731,14 +1600,18 @@ db_FIRSTKEY(db)
OutputKey(ST(0), key) ;
}
-int
+void
db_NEXTKEY(db, key)
DB_File db
- DBTKEY key
+ DBTKEY key = NO_INIT
+ PREINIT:
+ dMY_CXT ;
+ int RETVAL ;
CODE:
{
DBT value ;
+ DBT_clear(key) ;
DBT_clear(value) ;
CurrentDB = db ;
RETVAL = do_SEQ(db, key, value, R_NEXT) ;
@@ -1754,13 +1627,14 @@ int
unshift(db, ...)
DB_File db
ALIAS: UNSHIFT = 1
+ PREINIT:
+ dMY_CXT;
CODE:
{
DBTKEY key ;
DBT value ;
int i ;
int One ;
- DB * Db = db->dbp ;
STRLEN n_a;
DBT_clear(key) ;
@@ -1783,7 +1657,7 @@ unshift(db, ...)
#ifdef DB_VERSION_MAJOR
RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
#else
- RETVAL = (Db->put)(Db, &key, &value, R_IBEFORE) ;
+ RETVAL = (db->dbp->put)(db->dbp, &key, &value, R_IBEFORE) ;
#endif
if (RETVAL != 0)
break;
@@ -1792,10 +1666,14 @@ unshift(db, ...)
OUTPUT:
RETVAL
-I32
+void
pop(db)
DB_File db
+ PREINIT:
+ dMY_CXT;
ALIAS: POP = 1
+ PREINIT:
+ I32 RETVAL;
CODE:
{
DBTKEY key ;
@@ -1819,10 +1697,14 @@ pop(db)
}
}
-I32
+void
shift(db)
DB_File db
+ PREINIT:
+ dMY_CXT;
ALIAS: SHIFT = 1
+ PREINIT:
+ I32 RETVAL;
CODE:
{
DBT value ;
@@ -1849,6 +1731,8 @@ shift(db)
I32
push(db, ...)
DB_File db
+ PREINIT:
+ dMY_CXT;
ALIAS: PUSH = 1
CODE:
{
@@ -1891,6 +1775,8 @@ push(db, ...)
I32
length(db)
DB_File db
+ PREINIT:
+ dMY_CXT;
ALIAS: FETCHSIZE = 1
CODE:
CurrentDB = db ;
@@ -1908,6 +1794,8 @@ db_del(db, key, flags=0)
DB_File db
DBTKEY key
u_int flags
+ PREINIT:
+ dMY_CXT;
CODE:
CurrentDB = db ;
RETVAL = db_del(db, key, flags) ;
@@ -1927,6 +1815,8 @@ db_get(db, key, value, flags=0)
DBTKEY key
DBT value = NO_INIT
u_int flags
+ PREINIT:
+ dMY_CXT;
CODE:
CurrentDB = db ;
DBT_clear(value) ;
@@ -1947,6 +1837,8 @@ db_put(db, key, value, flags=0)
DBTKEY key
DBT value
u_int flags
+ PREINIT:
+ dMY_CXT;
CODE:
CurrentDB = db ;
RETVAL = db_put(db, key, value, flags) ;
@@ -1963,16 +1855,20 @@ db_put(db, key, value, flags=0)
int
db_fd(db)
DB_File db
- int status = 0 ;
+ PREINIT:
+ dMY_CXT ;
CODE:
CurrentDB = db ;
#ifdef DB_VERSION_MAJOR
RETVAL = -1 ;
- status = (db->in_memory
- ? -1
- : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
- if (status != 0)
- RETVAL = -1 ;
+ {
+ int status = 0 ;
+ status = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
+ if (status != 0)
+ RETVAL = -1 ;
+ }
#else
RETVAL = (db->in_memory
? -1
@@ -1985,6 +1881,8 @@ int
db_sync(db, flags=0)
DB_File db
u_int flags
+ PREINIT:
+ dMY_CXT;
CODE:
CurrentDB = db ;
RETVAL = db_sync(db, flags) ;
@@ -2002,6 +1900,8 @@ db_seq(db, key, value, flags)
DBTKEY key
DBT value = NO_INIT
u_int flags
+ PREINIT:
+ dMY_CXT;
CODE:
CurrentDB = db ;
DBT_clear(value) ;
@@ -2017,33 +1917,13 @@ db_seq(db, key, value, flags)
key
value
-#ifdef DBM_FILTERING
-
-#define setFilter(type) \
- { \
- if (db->type) \
- RETVAL = sv_mortalcopy(db->type) ; \
- ST(0) = RETVAL ; \
- if (db->type && (code == &PL_sv_undef)) { \
- SvREFCNT_dec(db->type) ; \
- db->type = NULL ; \
- } \
- else if (code) { \
- if (db->type) \
- sv_setsv(db->type, code) ; \
- else \
- db->type = newSVsv(code) ; \
- } \
- }
-
-
SV *
filter_fetch_key(db, code)
DB_File db
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_fetch_key) ;
+ DBM_setFilter(db->filter_fetch_key, code) ;
SV *
filter_store_key(db, code)
@@ -2051,7 +1931,7 @@ filter_store_key(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_store_key) ;
+ DBM_setFilter(db->filter_store_key, code) ;
SV *
filter_fetch_value(db, code)
@@ -2059,7 +1939,7 @@ filter_fetch_value(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_fetch_value) ;
+ DBM_setFilter(db->filter_fetch_value, code) ;
SV *
filter_store_value(db, code)
@@ -2067,6 +1947,5 @@ filter_store_value(db, code)
SV * code
SV * RETVAL = &PL_sv_undef ;
CODE:
- setFilter(filter_store_value) ;
+ DBM_setFilter(db->filter_store_value, code) ;
-#endif /* DBM_FILTERING */
diff --git a/bdb/perl.DB_File/DB_File_BS b/bdb/perl/DB_File/DB_File_BS
index 9282c498811..9282c498811 100644
--- a/bdb/perl.DB_File/DB_File_BS
+++ b/bdb/perl/DB_File/DB_File_BS
diff --git a/bdb/perl.DB_File/MANIFEST b/bdb/perl/DB_File/MANIFEST
index 0cc30dbfb47..b3e1a7bd85b 100644
--- a/bdb/perl.DB_File/MANIFEST
+++ b/bdb/perl/DB_File/MANIFEST
@@ -1,19 +1,16 @@
-Makefile.PL
+Changes
DB_File.pm
DB_File.xs
DB_File_BS
-Changes
+MANIFEST
+Makefile.PL
+README
config.in
dbinfo
+fallback.h
+fallback.xs
hints/dynixptx.pl
hints/sco.pl
-MANIFEST
-README
-typemap
-t/db-btree.t
-t/db-hash.t
-t/db-recno.t
-version.c
patches/5.004
patches/5.004_01
patches/5.004_02
@@ -25,3 +22,9 @@ patches/5.005_01
patches/5.005_02
patches/5.005_03
patches/5.6.0
+ppport.h
+t/db-btree.t
+t/db-hash.t
+t/db-recno.t
+typemap
+version.c
diff --git a/bdb/perl/DB_File/Makefile.PL b/bdb/perl/DB_File/Makefile.PL
new file mode 100644
index 00000000000..4c1565d8d01
--- /dev/null
+++ b/bdb/perl/DB_File/Makefile.PL
@@ -0,0 +1,330 @@
+#! perl -w
+
+use strict ;
+use ExtUtils::MakeMaker 5.16 ;
+use Config ;
+
+die "DB_File needs Perl 5.004_05 or better. This is $]\n"
+ if $] <= 5.00404;
+
+my $VER_INFO ;
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+my $COMPAT185 = "" ;
+
+ParseCONFIG() ;
+
+my @files = ('DB_File.pm', glob "t/*.t") ;
+UpDowngrade(@files);
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# Solaris is special.
+#$LIBS .= " -lthread" if $^O eq 'solaris' ;
+
+# AIX is special.
+$LIBS .= " -lpthread" if $^O eq 'aix' ;
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'DB_File',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ #MAN3PODS => {}, # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'DB_File.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "-D_NOT_CORE $OS2 $VER_INFO $COMPAT185",
+ OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
+ #OPTIMIZE => '-g',
+ 'depend' => { 'Makefile' => 'config.in',
+ 'version$(OBJ_EXT)' => 'version.c'},
+ 'clean' => { FILES => 'constants.h constants.xs' },
+ 'macro' => { INSTALLDIRS => 'perl', my_files => "@files" },
+ 'dist' => { COMPRESS => 'gzip', SUFFIX => 'gz',
+ DIST_DEFAULT => 'MyDoubleCheck tardist'},
+ );
+
+
+my @names = qw(
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+ );
+
+if (eval {require ExtUtils::Constant; 1}) {
+ # Check the constants above all appear in @EXPORT in DB_File.pm
+ my %names = map { $_, 1} @names;
+ open F, "<DB_File.pm" or die "Cannot open DB_File.pm: $!\n";
+ while (<F>)
+ {
+ last if /^\s*\@EXPORT\s+=\s+qw\(/ ;
+ }
+
+ while (<F>)
+ {
+ last if /^\s*\)/ ;
+ /(\S+)/ ;
+ delete $names{$1} if defined $1 ;
+ }
+ close F ;
+
+ if ( keys %names )
+ {
+ my $missing = join ("\n\t", sort keys %names) ;
+ die "The following names are missing from \@EXPORT in DB_File.pm\n" .
+ "\t$missing\n" ;
+ }
+
+
+ ExtUtils::Constant::WriteConstants(
+ NAME => 'DB_File',
+ NAMES => \@names,
+ C_FILE => 'constants.h',
+ XS_FILE => 'constants.xs',
+
+ );
+}
+else {
+ use File::Copy;
+ copy ('fallback.h', 'constants.h')
+ or die "Can't copy fallback.h to constants.h: $!";
+ copy ('fallback.xs', 'constants.xs')
+ or die "Can't copy fallback.xs to constants.xs: $!";
+}
+
+exit;
+
+
+sub MY::postamble { <<'EOM' } ;
+
+MyDoubleCheck:
+ @echo Checking config.in is setup for a release
+ @(grep "^LIB.*/usr/local/BerkeleyDB" config.in && \
+ grep "^INCLUDE.*/usr/local/BerkeleyDB" config.in && \
+ grep "^#DBNAME.*" config.in) >/dev/null || \
+ (echo config.in needs fixing ; exit 1)
+ @echo config.in is ok
+ @echo
+ @echo Checking DB_File.xs is ok for a release.
+ @(perl -ne ' exit 1 if /^\s*#\s*define\s+TRACE/ ; ' DB_File.xs || \
+ (echo DB_File.xs needs fixing ; exit 1))
+ @echo DB_File.xs is ok
+ @echo
+ @echo Checking for $$^W in files: $(my_files)
+ @perl -ne ' \
+ exit 1 if /^\s*local\s*\(\s*\$$\^W\s*\)/;' $(my_files) || \
+ (echo found unexpected $$^W ; exit 1)
+ @echo No $$^W found.
+ @echo
+ @echo Checking for 'use vars' in files: $(my_files)
+ @perl -ne ' \
+ exit 0 if /^__(DATA|END)__/; \
+ exit 1 if /^\s*use\s+vars/;' $(my_files) || \
+ (echo found unexpected "use vars"; exit 1)
+ @echo No 'use vars' found.
+ @echo
+ @echo All files are OK for a release.
+ @echo
+
+EOM
+
+
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME & COMPAT185 are optional, so pretend they have
+ # been parsed.
+ delete $Parsed{'DBNAME'} ;
+ delete $Parsed{'COMPAT185'} ;
+ $Info{COMPAT185} = "No" ;
+
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
+ if (defined $ENV{'DB_FILE_COMPAT185'} &&
+ $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
+ $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
+ my $PREFIX = $Info{'PREFIX'} ;
+ my $HASH = $Info{'HASH'} ;
+
+ $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
+
+ print <<EOM if 0 ;
+ INCLUDE [$INC_DIR]
+ LIB [$LIB_DIR]
+ HASH [$HASH]
+ PREFIX [$PREFIX]
+ DBNAME [$DB_NAME]
+
+EOM
+
+ print "Looks Good.\n" ;
+
+}
+
+sub UpDowngrade
+{
+ my @files = @_ ;
+
+ # our is stable from 5.6.0 onward
+ # warnings is stable from 5.6.1 onward
+
+ # Note: this code assumes that each statement it modifies is not
+ # split across multiple lines.
+
+
+ my $warn_sub ;
+ my $our_sub ;
+
+ if ($] < 5.006001) {
+ # From: use|no warnings "blah"
+ # To: local ($^W) = 1; # use|no warnings "blah"
+ #
+ # and
+ #
+ # From: warnings::warnif(x,y);
+ # To: $^W && carp(y); # warnif -- x
+ $warn_sub = sub {
+ s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
+ s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
+
+ s/^(\s*)warnings::warnif\s*\((.*?)\s*,\s*(.*?)\)\s*;/${1}\$^W && carp($3); # warnif - $2/ ;
+ };
+ }
+ else {
+ # From: local ($^W) = 1; # use|no warnings "blah"
+ # To: use|no warnings "blah"
+ #
+ # and
+ #
+ # From: $^W && carp(y); # warnif -- x
+ # To: warnings::warnif(x,y);
+ $warn_sub = sub {
+ s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
+ s/^(\s*)\$\^W\s+\&\&\s*carp\s*\((.*?)\)\s*;\s*#\s*warnif\s*-\s*(.*)/${1}warnings::warnif($3, $2);/ ;
+ };
+ }
+
+ if ($] < 5.006000) {
+ $our_sub = sub {
+ if ( /^(\s*)our\s+\(\s*([^)]+\s*)\)/ ) {
+ my $indent = $1;
+ my $vars = join ' ', split /\s*,\s*/, $2;
+ $_ = "${indent}use vars qw($vars);\n";
+ }
+ };
+ }
+ else {
+ $our_sub = sub {
+ if ( /^(\s*)use\s+vars\s+qw\((.*?)\)/ ) {
+ my $indent = $1;
+ my $vars = join ', ', split ' ', $2;
+ $_ = "${indent}our ($vars);\n";
+ }
+ };
+ }
+
+ foreach (@files)
+ { doUpDown($our_sub, $warn_sub, $_) }
+}
+
+
+sub doUpDown
+{
+ my $our_sub = shift;
+ my $warn_sub = shift;
+
+ local ($^I) = ".bak" ;
+ local (@ARGV) = shift;
+
+ while (<>)
+ {
+ print, last if /^__(END|DATA)__/ ;
+
+ &{ $our_sub }();
+ &{ $warn_sub }();
+ print ;
+ }
+
+ return if eof ;
+
+ while (<>)
+ { print }
+}
+
+# end of file Makefile.PL
diff --git a/bdb/perl.DB_File/README b/bdb/perl/DB_File/README
index e780111b2e9..b09aa9d8aee 100644
--- a/bdb/perl.DB_File/README
+++ b/bdb/perl/DB_File/README
@@ -1,10 +1,10 @@
DB_File
- Version 1.76
+ Version 1.805
- 15th January 2001
+ 1st Sep 2002
- Copyright (c) 1995-2001 Paul Marquess. All rights reserved. This
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved. This
program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
@@ -22,11 +22,11 @@ DESCRIPTION
DB_File is a module which allows Perl programs to make use of the
facilities provided by Berkeley DB version 1. (DB_File can be built
-version 2 or 3 of Berkeley DB, but it will only support the 1.x
+version 2, 3 or 4 of Berkeley DB, but it will only support the 1.x
features),
If you want to make use of the new features available in Berkeley DB
-2.x or 3.x, use the Perl module BerkeleyDB instead.
+2.x, 3.x or 4.x, use the Perl module BerkeleyDB instead.
Berkeley DB is a C library which provides a consistent interface to a
number of database formats. DB_File provides an interface to all three
@@ -42,7 +42,7 @@ PREREQUISITES
Before you can build DB_File you must have the following installed on
your system:
- * Perl 5.004 or greater.
+ * Perl 5.004_05 or greater.
* Berkeley DB.
@@ -57,8 +57,8 @@ your system:
compatible version of Berkeley DB.
If you want to use Berkeley DB 2.x, you must have version 2.3.4
- or greater. If you want to use Berkeley DB 3.x, any version will
- do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
+ or greater. If you want to use Berkeley DB 3.x or 4.x, any version
+ will do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
BUILDING THE MODULE
@@ -68,7 +68,7 @@ Assuming you have met all the prerequisites, building the module should
be relatively straightforward.
Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
- to use Berkeley DB version 2 or 3, read either the Solaris Notes
+ to use Berkeley DB version 2, 3 or 4, read either the Solaris Notes
or HP-UX Notes sections below. If you are running Linux please
read the Linux Notes section before proceeding.
@@ -162,6 +162,35 @@ Solution: Setting the LIB & INCLUDE variables in config.in to point to the
problem is to either delete or temporarily rename the copies
of db.h and libdb.a that you don't want DB_File to use.
+
+Undefined symbol dbopen
+-----------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ ...
+ t/db-btree..........Can't load 'blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ dbopen at /usr/local/lib/perl5/5.6.1/i586-linux/DynaLoader.pm line 206.
+ at t/db-btree.t line 23
+ Compilation failed in require at t/db-btree.t line 23.
+ ...
+
+This error usually happens when you have both version 1 and a more recent
+version of Berkeley DB installed on your system and DB_File attempts
+to build using the db.h for Berkeley DB version 1 and the newer version
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because versions
+of Berkeley DB newer than version 1 doesn't have the symbol dbopen.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+
Incompatible versions of db.h and libdb
---------------------------------------
@@ -298,8 +327,8 @@ To find out if you have the patch installed, the command "showrev -p"
will display the patches that are currently installed on your system.
-HP-UX Notes
------------
+HP-UX 10 Notes
+--------------
Some people running HP-UX 10 have reported getting an error like this
when building DB_File with the native HP-UX compiler.
@@ -324,6 +353,22 @@ following steps should do the trick:
3: Build and install the Berkeley DB distribution as usual.
+HP-UX 11 Notes
+--------------
+
+Some people running the combination of HP-UX 11 and Berkeley DB 2.7.7 have
+reported getting this error when the run the test harness for DB_File
+
+ ...
+ lib/db-btree.........Can't call method "DELETE" on an undefined value at lib/db-btree.t line 216.
+ FAILED at test 26
+ lib/db-hash..........Can't call method "DELETE" on an undefined value at lib/db-hash.t line 183.
+ FAILED at test 22
+ ...
+
+The fix for this is to rebuild and install Berkeley DB with the bigfile
+option disabled.
+
IRIX NOTES
----------
@@ -338,21 +383,23 @@ FEEDBACK
How to report a problem with DB_File.
-To help me help you, I need the following information:
+When reporting any problem, I need the information requested below.
+
+ 1. The *complete* output from running this
- 1. The version of Perl and the operating system name and version you
- are running. The *complete* output from running "perl -V" will
- tell me all I need to know. Don't edit the output in any way. Note,
- I want you to run "perl -V" and NOT "perl -v".
+ perl -V
- If your perl does not understand the "-V" option it is too old. DB_File
- needs Perl version 5.004 or better.
+ Do not edit the output in any way.
+ Note, I want you to run "perl -V" and NOT "perl -v".
+
+ If your perl does not understand the "-V" option it is too
+ old. DB_File needs Perl version 5.00405 or better.
2. The version of DB_File you have.
If you have successfully installed DB_File, this one-liner will
tell you:
- perl -e 'use DB_File; print "DB_File ver $DB_File::VERSION\n"'
+ perl -e 'use DB_File; print qq{DB_File ver $DB_File::VERSION\n}'
If you haven't installed DB_File then search DB_File.pm for a line
like this:
@@ -367,12 +414,27 @@ To help me help you, I need the following information:
If you have successfully installed DB_File, this command will display
the version of Berkeley DB it was built with:
- perl -e 'use DB_File; print "Berkeley DB ver $DB_File::db_ver\n"'
+ perl -e 'use DB_File; print qq{Berkeley DB ver $DB_File::db_ver\n}'
+
+ 4. A copy the file config.in from the DB_File main source directory.
+
+ 5. A listing of directories where Berkeley DB is installed.
+ For example, if Berkeley DB is installed in /usr/BerkeleDB/lib and
+ /usr/BerkeleyDB/include, I need the output from running this
+
+ ls -l /usr/BerkeleyDB/lib
+ ls -l /usr/BerkeleyDB/include
+
+ 6. If you are having problems building DB_File, send me a complete log
+ of what happened. Start by unpacking the DB_File module into a fresh
+ directory and keep a log of all the steps
- 4. If you are having problems building DB_File, send me a complete log
- of what happened.
+ [edit config.in, if necessary]
+ perl Makefile.PL
+ make
+ make test TEST_VERBOSE=1
- 5. Now the difficult one. If you think you have found a bug in DB_File
+ 7. Now the difficult one. If you think you have found a bug in DB_File
and you want me to fix it, you will *greatly* enhance the chances
of me being able to track it down by sending me a small
self-contained Perl script that illustrates the problem you are
diff --git a/bdb/perl.DB_File/config.in b/bdb/perl/DB_File/config.in
index 5bda4a66762..292b09a5fb3 100644
--- a/bdb/perl.DB_File/config.in
+++ b/bdb/perl/DB_File/config.in
@@ -9,20 +9,18 @@
# Change the path below to point to the directory where db.h is
# installed on your system.
+INCLUDE = /usr/local/BerkeleyDB/include
#INCLUDE = /usr/local/include
-#INCLUDE = /usr/local/BerkeleyDB/include
#INCLUDE = /usr/include
-INCLUDE = ./libraries/3.2.7
# 2. Where is libdb?
#
# Change the path below to point to the directory where libdb is
# installed on your system.
+LIB = /usr/local/BerkeleyDB/lib
#LIB = /usr/local/lib
-#LIB = /usr/local/BerkeleyDB/lib
#LIB = /usr/lib
-LIB = ./libraries/3.2.7
# 3. What version of Berkely DB have you got?
#
diff --git a/bdb/perl.DB_File/dbinfo b/bdb/perl/DB_File/dbinfo
index 5a4df15907e..af2c45facf5 100644
--- a/bdb/perl.DB_File/dbinfo
+++ b/bdb/perl/DB_File/dbinfo
@@ -7,7 +7,7 @@
# Version: 1.03
# Date 17th September 2000
#
-# Copyright (c) 1998-2000 Paul Marquess. All rights reserved.
+# Copyright (c) 1998-2002 Paul Marquess. All rights reserved.
# This program is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself.
@@ -29,7 +29,8 @@ my %Data =
5 => "2.0.0 -> 2.3.0",
6 => "2.3.1 -> 2.7.7",
7 => "3.0.x",
- 8 => "3.1.x or greater",
+ 8 => "3.1.x -> 4.0.x",
+ 9 => "4.1.x or greater",
}
},
0x061561 => {
@@ -42,7 +43,8 @@ my %Data =
4 => "2.0.0 -> 2.1.0",
5 => "2.2.6 -> 2.7.7",
6 => "3.0.x",
- 7 => "3.1.x or greater",
+ 7 => "3.1.x -> 4.0.x",
+ 8 => "4.1.x or greater",
}
},
0x042253 => {
@@ -51,7 +53,8 @@ my %Data =
{
1 => "3.0.x",
2 => "3.1.x",
- 3 => "3.2.x or greater",
+ 3 => "3.2.x -> 4.0.x",
+ 4 => "4.1.x or greater",
}
},
) ;
diff --git a/bdb/perl/DB_File/fallback.h b/bdb/perl/DB_File/fallback.h
new file mode 100644
index 00000000000..0213308a0ee
--- /dev/null
+++ b/bdb/perl/DB_File/fallback.h
@@ -0,0 +1,455 @@
+#define PERL_constant_NOTFOUND 1
+#define PERL_constant_NOTDEF 2
+#define PERL_constant_ISIV 3
+#define PERL_constant_ISNO 4
+#define PERL_constant_ISNV 5
+#define PERL_constant_ISPV 6
+#define PERL_constant_ISPVN 7
+#define PERL_constant_ISSV 8
+#define PERL_constant_ISUNDEF 9
+#define PERL_constant_ISUV 10
+#define PERL_constant_ISYES 11
+
+#ifndef NVTYPE
+typedef double NV; /* 5.6 and later define NVTYPE, and typedef NV to it. */
+#endif
+#ifndef aTHX_
+#define aTHX_ /* 5.6 or later define this for threading support. */
+#endif
+#ifndef pTHX_
+#define pTHX_ /* 5.6 or later define this for threading support. */
+#endif
+
+static int
+constant_6 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_TXN R_LAST R_NEXT R_PREV */
+ /* Offset 2 gives the best switch position. */
+ switch (name[2]) {
+ case 'L':
+ if (memEQ(name, "R_LAST", 6)) {
+ /* ^ */
+#ifdef R_LAST
+ *iv_return = R_LAST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "R_NEXT", 6)) {
+ /* ^ */
+#ifdef R_NEXT
+ *iv_return = R_NEXT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_PREV", 6)) {
+ /* ^ */
+#ifdef R_PREV
+ *iv_return = R_PREV;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case '_':
+ if (memEQ(name, "DB_TXN", 6)) {
+ /* ^ */
+#ifdef DB_TXN
+ *iv_return = DB_TXN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_7 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_LOCK R_FIRST R_NOKEY */
+ /* Offset 3 gives the best switch position. */
+ switch (name[3]) {
+ case 'I':
+ if (memEQ(name, "R_FIRST", 7)) {
+ /* ^ */
+#ifdef R_FIRST
+ *iv_return = R_FIRST;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "DB_LOCK", 7)) {
+ /* ^ */
+#ifdef DB_LOCK
+ *iv_return = DB_LOCK;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "R_NOKEY", 7)) {
+ /* ^ */
+#ifdef R_NOKEY
+ *iv_return = R_NOKEY;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_8 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ DB_SHMEM R_CURSOR R_IAFTER */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'M':
+ if (memEQ(name, "DB_SHMEM", 8)) {
+ /* ^ */
+#ifdef DB_SHMEM
+ *iv_return = DB_SHMEM;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "R_CURSOR", 8)) {
+ /* ^ */
+#ifdef R_CURSOR
+ *iv_return = R_CURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'T':
+ if (memEQ(name, "R_IAFTER", 8)) {
+ /* ^ */
+#ifdef R_IAFTER
+ *iv_return = R_IAFTER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_9 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHMAGIC RET_ERROR R_IBEFORE */
+ /* Offset 7 gives the best switch position. */
+ switch (name[7]) {
+ case 'I':
+ if (memEQ(name, "HASHMAGIC", 9)) {
+ /* ^ */
+#ifdef HASHMAGIC
+ *iv_return = HASHMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "RET_ERROR", 9)) {
+ /* ^ */
+#ifdef RET_ERROR
+ *iv_return = RET_ERROR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_IBEFORE", 9)) {
+ /* ^ */
+#ifdef R_IBEFORE
+ *iv_return = R_IBEFORE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_10 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ BTREEMAGIC R_FIXEDLEN R_SNAPSHOT __R_UNUSED */
+ /* Offset 5 gives the best switch position. */
+ switch (name[5]) {
+ case 'E':
+ if (memEQ(name, "R_FIXEDLEN", 10)) {
+ /* ^ */
+#ifdef R_FIXEDLEN
+ *iv_return = R_FIXEDLEN;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'M':
+ if (memEQ(name, "BTREEMAGIC", 10)) {
+ /* ^ */
+#ifdef BTREEMAGIC
+ *iv_return = BTREEMAGIC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "__R_UNUSED", 10)) {
+ /* ^ */
+#ifdef __R_UNUSED
+ *iv_return = __R_UNUSED;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'P':
+ if (memEQ(name, "R_SNAPSHOT", 10)) {
+ /* ^ */
+#ifdef R_SNAPSHOT
+ *iv_return = R_SNAPSHOT;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant_11 (pTHX_ const char *name, IV *iv_return) {
+ /* When generated this function returned values for the list of names given
+ here. However, subsequent manual editing may have added or removed some.
+ HASHVERSION RET_SPECIAL RET_SUCCESS R_RECNOSYNC R_SETCURSOR */
+ /* Offset 10 gives the best switch position. */
+ switch (name[10]) {
+ case 'C':
+ if (memEQ(name, "R_RECNOSYNC", 11)) {
+ /* ^ */
+#ifdef R_RECNOSYNC
+ *iv_return = R_RECNOSYNC;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'L':
+ if (memEQ(name, "RET_SPECIAL", 11)) {
+ /* ^ */
+#ifdef RET_SPECIAL
+ *iv_return = RET_SPECIAL;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'N':
+ if (memEQ(name, "HASHVERSION", 11)) {
+ /* ^ */
+#ifdef HASHVERSION
+ *iv_return = HASHVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'R':
+ if (memEQ(name, "R_SETCURSOR", 11)) {
+ /* ^ */
+#ifdef R_SETCURSOR
+ *iv_return = R_SETCURSOR;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'S':
+ if (memEQ(name, "RET_SUCCESS", 11)) {
+ /* ^ */
+#ifdef RET_SUCCESS
+ *iv_return = RET_SUCCESS;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
+static int
+constant (pTHX_ const char *name, STRLEN len, IV *iv_return) {
+ /* Initially switch on the length of the name. */
+ /* When generated this function returned values for the list of names given
+ in this section of perl code. Rather than manually editing these functions
+ to add or remove constants, which would result in this comment and section
+ of code becoming inaccurate, we recommend that you edit this section of
+ code, and use it to regenerate a new set of constant functions which you
+ then use to replace the originals.
+
+ Regenerate these constant functions by feeding this entire source file to
+ perl -x
+
+#!bleedperl -w
+use ExtUtils::Constant qw (constant_types C_constant XS_constant);
+
+my $types = {map {($_, 1)} qw(IV)};
+my @names = (qw(BTREEMAGIC BTREEVERSION DB_LOCK DB_SHMEM DB_TXN HASHMAGIC
+ HASHVERSION MAX_PAGE_NUMBER MAX_PAGE_OFFSET MAX_REC_NUMBER
+ RET_ERROR RET_SPECIAL RET_SUCCESS R_CURSOR R_DUP R_FIRST
+ R_FIXEDLEN R_IAFTER R_IBEFORE R_LAST R_NEXT R_NOKEY
+ R_NOOVERWRITE R_PREV R_RECNOSYNC R_SETCURSOR R_SNAPSHOT
+ __R_UNUSED));
+
+print constant_types(); # macro defs
+foreach (C_constant ("DB_File", 'constant', 'IV', $types, undef, 3, @names) ) {
+ print $_, "\n"; # C constant subs
+}
+print "#### XS Section:\n";
+print XS_constant ("DB_File", $types);
+__END__
+ */
+
+ switch (len) {
+ case 5:
+ if (memEQ(name, "R_DUP", 5)) {
+#ifdef R_DUP
+ *iv_return = R_DUP;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 6:
+ return constant_6 (aTHX_ name, iv_return);
+ break;
+ case 7:
+ return constant_7 (aTHX_ name, iv_return);
+ break;
+ case 8:
+ return constant_8 (aTHX_ name, iv_return);
+ break;
+ case 9:
+ return constant_9 (aTHX_ name, iv_return);
+ break;
+ case 10:
+ return constant_10 (aTHX_ name, iv_return);
+ break;
+ case 11:
+ return constant_11 (aTHX_ name, iv_return);
+ break;
+ case 12:
+ if (memEQ(name, "BTREEVERSION", 12)) {
+#ifdef BTREEVERSION
+ *iv_return = BTREEVERSION;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 13:
+ if (memEQ(name, "R_NOOVERWRITE", 13)) {
+#ifdef R_NOOVERWRITE
+ *iv_return = R_NOOVERWRITE;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 14:
+ if (memEQ(name, "MAX_REC_NUMBER", 14)) {
+#ifdef MAX_REC_NUMBER
+ *iv_return = MAX_REC_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 15:
+ /* Names all of length 15. */
+ /* MAX_PAGE_NUMBER MAX_PAGE_OFFSET */
+ /* Offset 9 gives the best switch position. */
+ switch (name[9]) {
+ case 'N':
+ if (memEQ(name, "MAX_PAGE_NUMBER", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_NUMBER
+ *iv_return = MAX_PAGE_NUMBER;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ case 'O':
+ if (memEQ(name, "MAX_PAGE_OFFSET", 15)) {
+ /* ^ */
+#ifdef MAX_PAGE_OFFSET
+ *iv_return = MAX_PAGE_OFFSET;
+ return PERL_constant_ISIV;
+#else
+ return PERL_constant_NOTDEF;
+#endif
+ }
+ break;
+ }
+ break;
+ }
+ return PERL_constant_NOTFOUND;
+}
+
diff --git a/bdb/perl/DB_File/fallback.xs b/bdb/perl/DB_File/fallback.xs
new file mode 100644
index 00000000000..8650cdf7646
--- /dev/null
+++ b/bdb/perl/DB_File/fallback.xs
@@ -0,0 +1,88 @@
+void
+constant(sv)
+ PREINIT:
+#ifdef dXSTARG
+ dXSTARG; /* Faster if we have it. */
+#else
+ dTARGET;
+#endif
+ STRLEN len;
+ int type;
+ IV iv;
+ /* NV nv; Uncomment this if you need to return NVs */
+ /* const char *pv; Uncomment this if you need to return PVs */
+ INPUT:
+ SV * sv;
+ const char * s = SvPV(sv, len);
+ PPCODE:
+ /* Change this to constant(aTHX_ s, len, &iv, &nv);
+ if you need to return both NVs and IVs */
+ type = constant(aTHX_ s, len, &iv);
+ /* Return 1 or 2 items. First is error message, or undef if no error.
+ Second, if present, is found value */
+ switch (type) {
+ case PERL_constant_NOTFOUND:
+ sv = sv_2mortal(newSVpvf("%s is not a valid DB_File macro", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_NOTDEF:
+ sv = sv_2mortal(newSVpvf(
+ "Your vendor has not defined DB_File macro %s, used", s));
+ PUSHs(sv);
+ break;
+ case PERL_constant_ISIV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHi(iv);
+ break;
+ /* Uncomment this if you need to return NOs
+ case PERL_constant_ISNO:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_no);
+ break; */
+ /* Uncomment this if you need to return NVs
+ case PERL_constant_ISNV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHn(nv);
+ break; */
+ /* Uncomment this if you need to return PVs
+ case PERL_constant_ISPV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, strlen(pv));
+ break; */
+ /* Uncomment this if you need to return PVNs
+ case PERL_constant_ISPVN:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHp(pv, iv);
+ break; */
+ /* Uncomment this if you need to return SVs
+ case PERL_constant_ISSV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(sv);
+ break; */
+ /* Uncomment this if you need to return UNDEFs
+ case PERL_constant_ISUNDEF:
+ break; */
+ /* Uncomment this if you need to return UVs
+ case PERL_constant_ISUV:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHu((UV)iv);
+ break; */
+ /* Uncomment this if you need to return YESs
+ case PERL_constant_ISYES:
+ EXTEND(SP, 1);
+ PUSHs(&PL_sv_undef);
+ PUSHs(&PL_sv_yes);
+ break; */
+ default:
+ sv = sv_2mortal(newSVpvf(
+ "Unexpected return type %d while processing DB_File macro %s, used",
+ type, s));
+ PUSHs(sv);
+ }
diff --git a/bdb/perl.DB_File/hints/dynixptx.pl b/bdb/perl/DB_File/hints/dynixptx.pl
index bb5ffa56e6b..bb5ffa56e6b 100644
--- a/bdb/perl.DB_File/hints/dynixptx.pl
+++ b/bdb/perl/DB_File/hints/dynixptx.pl
diff --git a/bdb/perl.DB_File/hints/sco.pl b/bdb/perl/DB_File/hints/sco.pl
index ff604409496..ff604409496 100644
--- a/bdb/perl.DB_File/hints/sco.pl
+++ b/bdb/perl/DB_File/hints/sco.pl
diff --git a/bdb/perl.DB_File/patches/5.004 b/bdb/perl/DB_File/patches/5.004
index 143ec95afbc..143ec95afbc 100644
--- a/bdb/perl.DB_File/patches/5.004
+++ b/bdb/perl/DB_File/patches/5.004
diff --git a/bdb/perl.DB_File/patches/5.004_01 b/bdb/perl/DB_File/patches/5.004_01
index 1b05eb4e02b..1b05eb4e02b 100644
--- a/bdb/perl.DB_File/patches/5.004_01
+++ b/bdb/perl/DB_File/patches/5.004_01
diff --git a/bdb/perl.DB_File/patches/5.004_02 b/bdb/perl/DB_File/patches/5.004_02
index 238f8737941..238f8737941 100644
--- a/bdb/perl.DB_File/patches/5.004_02
+++ b/bdb/perl/DB_File/patches/5.004_02
diff --git a/bdb/perl.DB_File/patches/5.004_03 b/bdb/perl/DB_File/patches/5.004_03
index 06331eac922..06331eac922 100644
--- a/bdb/perl.DB_File/patches/5.004_03
+++ b/bdb/perl/DB_File/patches/5.004_03
diff --git a/bdb/perl.DB_File/patches/5.004_04 b/bdb/perl/DB_File/patches/5.004_04
index a227dc700d9..a227dc700d9 100644
--- a/bdb/perl.DB_File/patches/5.004_04
+++ b/bdb/perl/DB_File/patches/5.004_04
diff --git a/bdb/perl.DB_File/patches/5.004_05 b/bdb/perl/DB_File/patches/5.004_05
index 51c8bf35009..51c8bf35009 100644
--- a/bdb/perl.DB_File/patches/5.004_05
+++ b/bdb/perl/DB_File/patches/5.004_05
diff --git a/bdb/perl.DB_File/patches/5.005 b/bdb/perl/DB_File/patches/5.005
index effee3e8275..effee3e8275 100644
--- a/bdb/perl.DB_File/patches/5.005
+++ b/bdb/perl/DB_File/patches/5.005
diff --git a/bdb/perl.DB_File/patches/5.005_01 b/bdb/perl/DB_File/patches/5.005_01
index 2a05dd545f6..2a05dd545f6 100644
--- a/bdb/perl.DB_File/patches/5.005_01
+++ b/bdb/perl/DB_File/patches/5.005_01
diff --git a/bdb/perl.DB_File/patches/5.005_02 b/bdb/perl/DB_File/patches/5.005_02
index 5dd57ddc03f..5dd57ddc03f 100644
--- a/bdb/perl.DB_File/patches/5.005_02
+++ b/bdb/perl/DB_File/patches/5.005_02
diff --git a/bdb/perl.DB_File/patches/5.005_03 b/bdb/perl/DB_File/patches/5.005_03
index 115f9f5b909..115f9f5b909 100644
--- a/bdb/perl.DB_File/patches/5.005_03
+++ b/bdb/perl/DB_File/patches/5.005_03
diff --git a/bdb/perl.DB_File/patches/5.6.0 b/bdb/perl/DB_File/patches/5.6.0
index 1f9b3b620de..1f9b3b620de 100644
--- a/bdb/perl.DB_File/patches/5.6.0
+++ b/bdb/perl/DB_File/patches/5.6.0
diff --git a/bdb/perl/DB_File/ppport.h b/bdb/perl/DB_File/ppport.h
new file mode 100644
index 00000000000..0887c2159a9
--- /dev/null
+++ b/bdb/perl/DB_File/ppport.h
@@ -0,0 +1,329 @@
+/* This file is Based on output from
+ * Perl/Pollution/Portability Version 2.0000 */
+
+#ifndef _P_P_PORTABILITY_H_
+#define _P_P_PORTABILITY_H_
+
+#ifndef PERL_REVISION
+# ifndef __PATCHLEVEL_H_INCLUDED__
+# include "patchlevel.h"
+# endif
+# ifndef PERL_REVISION
+# define PERL_REVISION (5)
+ /* Replace: 1 */
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+# endif
+#endif
+
+#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
+
+#ifndef ERRSV
+# define ERRSV perl_get_sv("@",FALSE)
+#endif
+
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
+/* Replace: 1 */
+# define PL_Sv Sv
+# define PL_compiling compiling
+# define PL_copline copline
+# define PL_curcop curcop
+# define PL_curstash curstash
+# define PL_defgv defgv
+# define PL_dirty dirty
+# define PL_hints hints
+# define PL_na na
+# define PL_perldb perldb
+# define PL_rsfp_filters rsfp_filters
+# define PL_rsfp rsfp
+# define PL_stdingv stdingv
+# define PL_sv_no sv_no
+# define PL_sv_undef sv_undef
+# define PL_sv_yes sv_yes
+/* Replace: 0 */
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif
+
+#ifndef dTHR
+# ifdef WIN32
+# define dTHR extern int Perl___notused
+# else
+# define dTHR extern int errno
+# endif
+#endif
+
+#ifndef boolSV
+# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#endif
+
+#ifndef gv_stashpvn
+# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#endif
+
+#ifndef newRV_inc
+/* Replace: 1 */
+# define newRV_inc(sv) newRV(sv)
+/* Replace: 0 */
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(PL_defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#endif
+
+#ifndef newRV_noinc
+# ifdef __GNUC__
+# define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+# else
+# if defined(CRIPPLED_CC) || defined(USE_THREADS)
+static SV * newRV_noinc (SV * sv)
+{
+ SV *nsv = (SV*)newRV(sv);
+ SvREFCNT_dec(sv);
+ return nsv;
+}
+# else
+# define newRV_noinc(sv) \
+ ((PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+# endif
+# endif
+#endif
+
+/* Provide: newCONSTSUB */
+
+/* newCONSTSUB from IO.xs is in the core starting with 5.004_63 */
+#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION < 63))
+
+#if defined(NEED_newCONSTSUB)
+static
+#else
+extern void newCONSTSUB _((HV * stash, char * name, SV *sv));
+#endif
+
+#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
+void
+newCONSTSUB(stash,name,sv)
+HV *stash;
+char *name;
+SV *sv;
+{
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+ PL_curcop->cop_line = PL_copline;
+
+ PL_hints &= ~HINT_BLOCK_SCOPE;
+ if (stash)
+ PL_curstash = PL_curcop->cop_stash = stash;
+
+ newSUB(
+
+#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
+ /* before 5.003_22 */
+ start_subparse(),
+#else
+# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+# else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+# endif
+#endif
+
+ newSVOP(OP_CONST, 0, newSVpv(name,0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
+
+ PL_hints = oldhints;
+ PL_curcop->cop_stash = old_cop_stash;
+ PL_curstash = old_curstash;
+ PL_curcop->cop_line = oldline;
+}
+#endif
+
+#endif /* newCONSTSUB */
+
+
+#ifndef START_MY_CXT
+
+/*
+ * Boilerplate macros for initializing and accessing interpreter-local
+ * data from C. All statics in extensions should be reworked to use
+ * this, if you want to make the extension thread-safe. See ext/re/re.xs
+ * for an example of the use of these macros.
+ *
+ * Code that uses these macros is responsible for the following:
+ * 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
+ * 2. Declare a typedef named my_cxt_t that is a structure that contains
+ * all the data that needs to be interpreter-local.
+ * 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
+ * 4. Use the MY_CXT_INIT macro such that it is called exactly once
+ * (typically put in the BOOT: section).
+ * 5. Use the members of the my_cxt_t structure everywhere as
+ * MY_CXT.member.
+ * 6. Use the dMY_CXT macro (a declaration) in all the functions that
+ * access MY_CXT.
+ */
+
+#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+
+/* This must appear in all extensions that define a my_cxt_t structure,
+ * right after the definition (i.e. at file scope). The non-threads
+ * case below uses it to declare the data as static. */
+#define START_MY_CXT
+
+#if PERL_REVISION == 5 && \
+ (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION < 68 ))
+/* Fetches the SV that keeps the per-interpreter data. */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
+#else /* >= perl5.004_68 */
+#define dMY_CXT_SV \
+ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
+ sizeof(MY_CXT_KEY)-1, TRUE)
+#endif /* < perl5.004_68 */
+
+/* This declaration should be used within all functions that use the
+ * interpreter-local data. */
+#define dMY_CXT \
+ dMY_CXT_SV; \
+ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
+
+/* Creates and zeroes the per-interpreter data.
+ * (We allocate my_cxtp in a Perl SV so that it will be released when
+ * the interpreter goes away.) */
+#define MY_CXT_INIT \
+ dMY_CXT_SV; \
+ /* newSV() allocates one more than needed */ \
+ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\
+ Zero(my_cxtp, 1, my_cxt_t); \
+ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp))
+
+/* This macro must be used to access members of the my_cxt_t structure.
+ * e.g. MYCXT.some_data */
+#define MY_CXT (*my_cxtp)
+
+/* Judicious use of these macros can reduce the number of times dMY_CXT
+ * is used. Use is similar to pTHX, aTHX etc. */
+#define pMY_CXT my_cxt_t *my_cxtp
+#define pMY_CXT_ pMY_CXT,
+#define _pMY_CXT ,pMY_CXT
+#define aMY_CXT my_cxtp
+#define aMY_CXT_ aMY_CXT,
+#define _aMY_CXT ,aMY_CXT
+
+#else /* single interpreter */
+
+#ifndef NOOP
+# define NOOP (void)0
+#endif
+
+#ifdef HASATTRIBUTE
+# define PERL_UNUSED_DECL __attribute__((unused))
+#else
+# define PERL_UNUSED_DECL
+#endif
+
+#ifndef dNOOP
+# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#endif
+
+#define START_MY_CXT static my_cxt_t my_cxt;
+#define dMY_CXT_SV dNOOP
+#define dMY_CXT dNOOP
+#define MY_CXT_INIT NOOP
+#define MY_CXT my_cxt
+
+#define pMY_CXT void
+#define pMY_CXT_
+#define _pMY_CXT
+#define aMY_CXT
+#define aMY_CXT_
+#define _aMY_CXT
+
+#endif
+
+#endif /* START_MY_CXT */
+
+
+#ifndef DBM_setFilter
+
+/*
+ The DBM_setFilter & DBM_ckFilter macros are only used by
+ the *DB*_File modules
+*/
+
+#define DBM_setFilter(db_type,code) \
+ { \
+ if (db_type) \
+ RETVAL = sv_mortalcopy(db_type) ; \
+ ST(0) = RETVAL ; \
+ if (db_type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db_type) ; \
+ db_type = NULL ; \
+ } \
+ else if (code) { \
+ if (db_type) \
+ sv_setsv(db_type, code) ; \
+ else \
+ db_type = newSVsv(code) ; \
+ } \
+ }
+
+#define DBM_ckFilter(arg,type,name) \
+ if (db->type) { \
+ if (db->filtering) { \
+ croak("recursion detected in %s", name) ; \
+ } \
+ ENTER ; \
+ SAVETMPS ; \
+ SAVEINT(db->filtering) ; \
+ db->filtering = TRUE ; \
+ SAVESPTR(DEFSV) ; \
+ DEFSV = arg ; \
+ SvTEMP_off(arg) ; \
+ PUSHMARK(SP) ; \
+ PUTBACK ; \
+ (void) perl_call_sv(db->type, G_DISCARD); \
+ SPAGAIN ; \
+ PUTBACK ; \
+ FREETMPS ; \
+ LEAVE ; \
+ }
+
+#endif /* DBM_setFilter */
+
+#endif /* _P_P_PORTABILITY_H_ */
diff --git a/bdb/perl.DB_File/t/db-btree.t b/bdb/perl/DB_File/t/db-btree.t
index 377cfe45c9b..a990a5c4ba5 100644
--- a/bdb/perl.DB_File/t/db-btree.t
+++ b/bdb/perl/DB_File/t/db-btree.t
@@ -1,8 +1,5 @@
#!./perl -w
-use warnings;
-use strict;
-
BEGIN {
unless(grep /blib/, @INC) {
chdir 't' if -d 't';
@@ -10,21 +7,34 @@ BEGIN {
}
}
+use warnings;
+use strict;
use Config;
BEGIN {
if(-d "lib" && -f "TEST") {
if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
- print "1..157\n";
+ print "1..0 # Skip: DB_File was not built\n";
exit 0;
}
}
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ warn <<EOM;
+#
+# This test is known to crash in Mac OS X versions 10.1.4 (or earlier)
+# because of the buggy Berkeley DB version included with the OS.
+#
+EOM
+ }
}
use DB_File;
use Fcntl;
-print "1..157\n";
+print "1..177\n";
sub ok
{
@@ -75,24 +85,41 @@ sub lexical
sub docat
{
my $file = shift;
- #local $/ = undef unless wantarray ;
+ local $/ = undef ;
open(CAT,$file) || die "Cannot open $file: $!";
- my @result = <CAT>;
+ my $result = <CAT>;
close(CAT);
- wantarray ? @result : join("", @result) ;
+ $result = normalise($result) ;
+ return $result ;
}
sub docat_del
{
my $file = shift;
- #local $/ = undef unless wantarray ;
- open(CAT,$file) || die "Cannot open $file: $!";
- my @result = <CAT>;
- close(CAT);
+ my $result = docat($file);
unlink $file ;
- wantarray ? @result : join("", @result) ;
+ return $result ;
}
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
my $db185mode = ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
my $null_keys_allowed = ($DB_File::db_ver < 2.004010
@@ -133,40 +160,39 @@ ok(13, $dbh->{minkeypage} == 123) ;
$dbh->{maxkeypage} = 1234 ;
ok(14, $dbh->{maxkeypage} == 1234 );
-$dbh->{compare} = 1234 ;
-ok(15, $dbh->{compare} == 1234) ;
-
-$dbh->{prefix} = 1234 ;
-ok(16, $dbh->{prefix} == 1234 );
-
# Check that an invalid entry is caught both for store & fetch
eval '$dbh->{fred} = 1234' ;
-ok(17, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
+ok(15, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
eval 'my $q = $dbh->{fred}' ;
-ok(18, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
+ok(16, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
# Now check the interface to BTREE
my ($X, %h) ;
-ok(19, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+ok(17, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+die "Could not tie: $!" unless $X;
my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
$blksize,$blocks) = stat($Dfile);
-ok(20, ($mode & 0777) == ($^O eq 'os2' ? 0666 : 0640) || $^O eq 'amigaos' || $^O eq 'MSWin32');
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
my ($key, $value, $i);
while (($key,$value) = each(%h)) {
$i++;
}
-ok(21, !$i ) ;
+ok(19, !$i ) ;
$h{'goner1'} = 'snork';
$h{'abc'} = 'ABC';
-ok(22, $h{'abc'} eq 'ABC' );
-ok(23, ! defined $h{'jimmy'} ) ;
-ok(24, ! exists $h{'jimmy'} ) ;
-ok(25, defined $h{'abc'} ) ;
+ok(20, $h{'abc'} eq 'ABC' );
+ok(21, ! defined $h{'jimmy'} ) ;
+ok(22, ! exists $h{'jimmy'} ) ;
+ok(23, defined $h{'abc'} ) ;
$h{'def'} = 'DEF';
$h{'jkl','mno'} = "JKL\034MNO";
@@ -197,7 +223,7 @@ undef $X ;
untie(%h);
# tie to the same file again
-ok(26, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
+ok(24, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
# Modify an entry from the previous tie
$h{'g'} = 'G';
@@ -228,7 +254,7 @@ $X->DELETE('goner3');
my @keys = keys(%h);
my @values = values(%h);
-ok(27, $#keys == 29 && $#values == 29) ;
+ok(25, $#keys == 29 && $#values == 29) ;
$i = 0 ;
while (($key,$value) = each(%h)) {
@@ -238,18 +264,18 @@ while (($key,$value) = each(%h)) {
}
}
-ok(28, $i == 30) ;
+ok(26, $i == 30) ;
@keys = ('blurfl', keys(%h), 'dyick');
-ok(29, $#keys == 31) ;
+ok(27, $#keys == 31) ;
#Check that the keys can be retrieved in order
my @b = keys %h ;
my @c = sort lexical @b ;
-ok(30, ArrayCompare(\@b, \@c)) ;
+ok(28, ArrayCompare(\@b, \@c)) ;
$h{'foo'} = '';
-ok(31, $h{'foo'} eq '' ) ;
+ok(29, $h{'foo'} eq '' ) ;
# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
# This feature was reenabled in version 3.1 of Berkeley DB.
@@ -260,21 +286,21 @@ if ($null_keys_allowed) {
}
else
{ $result = 1 }
-ok(32, $result) ;
+ok(30, $result) ;
# check cache overflow and numeric keys and contents
my $ok = 1;
for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
-ok(33, $ok);
+ok(31, $ok);
($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
$blksize,$blocks) = stat($Dfile);
-ok(34, $size > 0 );
+ok(32, $size > 0 );
@h{0..200} = 200..400;
my @foo = @h{0..200};
-ok(35, join(':',200..400) eq join(':',@foo) );
+ok(33, join(':',200..400) eq join(':',@foo) );
# Now check all the non-tie specific stuff
@@ -283,57 +309,57 @@ ok(35, join(':',200..400) eq join(':',@foo) );
# an existing record.
my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
-ok(36, $status == 1 );
+ok(34, $status == 1 );
# check that the value of the key 'x' has not been changed by the
# previous test
-ok(37, $h{'x'} eq 'X' );
+ok(35, $h{'x'} eq 'X' );
# standard put
$status = $X->put('key', 'value') ;
-ok(38, $status == 0 );
+ok(36, $status == 0 );
#check that previous put can be retrieved
$value = 0 ;
$status = $X->get('key', $value) ;
-ok(39, $status == 0 );
-ok(40, $value eq 'value' );
+ok(37, $status == 0 );
+ok(38, $value eq 'value' );
# Attempting to delete an existing key should work
$status = $X->del('q') ;
-ok(41, $status == 0 );
+ok(39, $status == 0 );
if ($null_keys_allowed) {
$status = $X->del('') ;
} else {
$status = 0 ;
}
-ok(42, $status == 0 );
+ok(40, $status == 0 );
# Make sure that the key deleted, cannot be retrieved
-ok(43, ! defined $h{'q'}) ;
-ok(44, ! defined $h{''}) ;
+ok(41, ! defined $h{'q'}) ;
+ok(42, ! defined $h{''}) ;
undef $X ;
untie %h ;
-ok(45, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
+ok(43, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
# Attempting to delete a non-existant key should fail
$status = $X->del('joe') ;
-ok(46, $status == 1 );
+ok(44, $status == 1 );
# Check the get interface
# First a non-existing key
$status = $X->get('aaaa', $value) ;
-ok(47, $status == 1 );
+ok(45, $status == 1 );
# Next an existing key
$status = $X->get('a', $value) ;
-ok(48, $status == 0 );
-ok(49, $value eq 'A' );
+ok(46, $status == 0 );
+ok(47, $value eq 'A' );
# seq
# ###
@@ -342,15 +368,15 @@ ok(49, $value eq 'A' );
$key = 'ke' ;
$value = '' ;
$status = $X->seq($key, $value, R_CURSOR) ;
-ok(50, $status == 0 );
-ok(51, $key eq 'key' );
-ok(52, $value eq 'value' );
+ok(48, $status == 0 );
+ok(49, $key eq 'key' );
+ok(50, $value eq 'value' );
# seq when the key does not match
$key = 'zzz' ;
$value = '' ;
$status = $X->seq($key, $value, R_CURSOR) ;
-ok(53, $status == 1 );
+ok(51, $status == 1 );
# use seq to set the cursor, then delete the record @ the cursor.
@@ -358,36 +384,36 @@ ok(53, $status == 1 );
$key = 'x' ;
$value = '' ;
$status = $X->seq($key, $value, R_CURSOR) ;
-ok(54, $status == 0 );
-ok(55, $key eq 'x' );
-ok(56, $value eq 'X' );
+ok(52, $status == 0 );
+ok(53, $key eq 'x' );
+ok(54, $value eq 'X' );
$status = $X->del(0, R_CURSOR) ;
-ok(57, $status == 0 );
+ok(55, $status == 0 );
$status = $X->get('x', $value) ;
-ok(58, $status == 1 );
+ok(56, $status == 1 );
# ditto, but use put to replace the key/value pair.
$key = 'y' ;
$value = '' ;
$status = $X->seq($key, $value, R_CURSOR) ;
-ok(59, $status == 0 );
-ok(60, $key eq 'y' );
-ok(61, $value eq 'Y' );
+ok(57, $status == 0 );
+ok(58, $key eq 'y' );
+ok(59, $value eq 'Y' );
$key = "replace key" ;
$value = "replace value" ;
$status = $X->put($key, $value, R_CURSOR) ;
-ok(62, $status == 0 );
-ok(63, $key eq 'replace key' );
-ok(64, $value eq 'replace value' );
+ok(60, $status == 0 );
+ok(61, $key eq 'replace key' );
+ok(62, $value eq 'replace value' );
$status = $X->get('y', $value) ;
-ok(65, 1) ; # hard-wire to always pass. the previous test ($status == 1)
+ok(63, 1) ; # hard-wire to always pass. the previous test ($status == 1)
# only worked because of a bug in 1.85/6
# use seq to walk forwards through a file
$status = $X->seq($key, $value, R_FIRST) ;
-ok(66, $status == 0 );
+ok(64, $status == 0 );
my $previous = $key ;
$ok = 1 ;
@@ -396,12 +422,12 @@ while (($status = $X->seq($key, $value, R_NEXT)) == 0)
($ok = 0), last if ($previous cmp $key) == 1 ;
}
-ok(67, $status == 1 );
-ok(68, $ok == 1 );
+ok(65, $status == 1 );
+ok(66, $ok == 1 );
# use seq to walk backwards through a file
$status = $X->seq($key, $value, R_LAST) ;
-ok(69, $status == 0 );
+ok(67, $status == 0 );
$previous = $key ;
$ok = 1 ;
@@ -411,8 +437,8 @@ while (($status = $X->seq($key, $value, R_PREV)) == 0)
#print "key = [$key] value = [$value]\n" ;
}
-ok(70, $status == 1 );
-ok(71, $ok == 1 );
+ok(68, $status == 1 );
+ok(69, $ok == 1 );
# check seq FIRST/LAST
@@ -421,14 +447,14 @@ ok(71, $ok == 1 );
# ####
$status = $X->sync ;
-ok(72, $status == 0 );
+ok(70, $status == 0 );
# fd
# ##
$status = $X->fd ;
-ok(73, $status != 0 );
+ok(71, $status != 0 );
undef $X ;
@@ -438,11 +464,11 @@ unlink $Dfile;
# Now try an in memory file
my $Y;
-ok(74, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
+ok(72, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
# fd with an in memory file should return failure
$status = $Y->fd ;
-ok(75, $status == -1 );
+ok(73, $status == -1 );
undef $Y ;
@@ -452,7 +478,7 @@ untie %h ;
my $bt = new DB_File::BTREEINFO ;
$bt->{flags} = R_DUP ;
my ($YY, %hh);
-ok(76, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
+ok(74, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
$hh{'Wall'} = 'Larry' ;
$hh{'Wall'} = 'Stone' ; # Note the duplicate key
@@ -462,33 +488,33 @@ $hh{'Smith'} = 'John' ;
$hh{'mouse'} = 'mickey' ;
# first work in scalar context
-ok(77, scalar $YY->get_dup('Unknown') == 0 );
-ok(78, scalar $YY->get_dup('Smith') == 1 );
-ok(79, scalar $YY->get_dup('Wall') == 4 );
+ok(75, scalar $YY->get_dup('Unknown') == 0 );
+ok(76, scalar $YY->get_dup('Smith') == 1 );
+ok(77, scalar $YY->get_dup('Wall') == 4 );
# now in list context
my @unknown = $YY->get_dup('Unknown') ;
-ok(80, "@unknown" eq "" );
+ok(78, "@unknown" eq "" );
my @smith = $YY->get_dup('Smith') ;
-ok(81, "@smith" eq "John" );
+ok(79, "@smith" eq "John" );
{
my @wall = $YY->get_dup('Wall') ;
my %wall ;
@wall{@wall} = @wall ;
-ok(82, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
+ok(80, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
}
# hash
my %unknown = $YY->get_dup('Unknown', 1) ;
-ok(83, keys %unknown == 0 );
+ok(81, keys %unknown == 0 );
my %smith = $YY->get_dup('Smith', 1) ;
-ok(84, keys %smith == 1 && $smith{'John'}) ;
+ok(82, keys %smith == 1 && $smith{'John'}) ;
my %wall = $YY->get_dup('Wall', 1) ;
-ok(85, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ok(83, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
&& $wall{'Brick'} == 2);
undef $YY ;
@@ -514,9 +540,9 @@ $dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
my (%g, %k);
-tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) ;
-tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) ;
-tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) ;
+tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) or die $!;
+tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) or die $!;
+tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) or die $!;
my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
my (@srt_1, @srt_2, @srt_3);
@@ -547,9 +573,9 @@ sub ArrayCompare
1 ;
}
-ok(86, ArrayCompare (\@srt_1, [keys %h]) );
-ok(87, ArrayCompare (\@srt_2, [keys %g]) );
-ok(88, ArrayCompare (\@srt_3, [keys %k]) );
+ok(84, ArrayCompare (\@srt_1, [keys %h]) );
+ok(85, ArrayCompare (\@srt_2, [keys %g]) );
+ok(86, ArrayCompare (\@srt_3, [keys %k]) );
untie %h ;
untie %g ;
@@ -559,7 +585,7 @@ unlink $Dfile1, $Dfile2, $Dfile3 ;
# clear
# #####
-ok(89, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ok(87, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
foreach (1 .. 10)
{ $h{$_} = $_ * 100 }
@@ -568,7 +594,7 @@ $i = 0 ;
while (($key,$value) = each(%h)) {
$i++;
}
-ok(90, $i == 10);
+ok(88, $i == 10);
# now clear the hash
%h = () ;
@@ -578,7 +604,7 @@ $i = 0 ;
while (($key,$value) = each(%h)) {
$i++;
}
-ok(91, $i == 0);
+ok(89, $i == 0);
untie %h ;
unlink $Dfile1 ;
@@ -589,7 +615,7 @@ unlink $Dfile1 ;
my $filename = "xyz" ;
my @x ;
eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
- ok(92, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
+ ok(90, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
unlink $filename ;
}
@@ -608,7 +634,7 @@ unlink $Dfile1 ;
use warnings ;
use strict ;
- use vars qw( @ISA @EXPORT) ;
+ our (@ISA, @EXPORT);
require Exporter ;
use DB_File;
@@ -656,31 +682,31 @@ EOM
BEGIN { push @INC, '.'; }
eval 'use SubDB ; ';
- main::ok(93, $@ eq "") ;
+ main::ok(91, $@ eq "") ;
my %h ;
my $X ;
eval '
$X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
' ;
- main::ok(94, $@ eq "") ;
+ main::ok(92, $@ eq "") ;
my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
- main::ok(95, $@ eq "") ;
- main::ok(96, $ret == 5) ;
+ main::ok(93, $@ eq "") ;
+ main::ok(94, $ret == 5) ;
my $value = 0;
$ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
- main::ok(97, $@ eq "") ;
- main::ok(98, $ret == 10) ;
+ main::ok(95, $@ eq "") ;
+ main::ok(96, $ret == 10) ;
$ret = eval ' R_NEXT eq main::R_NEXT ' ;
- main::ok(99, $@ eq "" ) ;
- main::ok(100, $ret == 1) ;
+ main::ok(97, $@ eq "" ) ;
+ main::ok(98, $ret == 1) ;
$ret = eval '$X->A_new_method("joe") ' ;
- main::ok(101, $@ eq "") ;
- main::ok(102, $ret eq "[[11]]") ;
+ main::ok(99, $@ eq "") ;
+ main::ok(100, $ret eq "[[11]]") ;
undef $X;
untie(%h);
@@ -705,7 +731,7 @@ EOM
$_ eq 'original' ;
}
- ok(103, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ ok(101, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
$db->filter_fetch_key (sub { $fetch_key = $_ }) ;
$db->filter_store_key (sub { $store_key = $_ }) ;
@@ -716,17 +742,17 @@ EOM
$h{"fred"} = "joe" ;
# fk sk fv sv
- ok(104, checkOutput( "", "fred", "", "joe")) ;
+ ok(102, checkOutput( "", "fred", "", "joe")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(105, $h{"fred"} eq "joe");
+ ok(103, $h{"fred"} eq "joe");
# fk sk fv sv
- ok(106, checkOutput( "", "fred", "joe", "")) ;
+ ok(104, checkOutput( "", "fred", "joe", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(107, $db->FIRSTKEY() eq "fred") ;
+ ok(105, $db->FIRSTKEY() eq "fred") ;
# fk sk fv sv
- ok(108, checkOutput( "fred", "", "", "")) ;
+ ok(106, checkOutput( "fred", "", "", "")) ;
# replace the filters, but remember the previous set
my ($old_fk) = $db->filter_fetch_key
@@ -741,17 +767,17 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"Fred"} = "Joe" ;
# fk sk fv sv
- ok(109, checkOutput( "", "fred", "", "Jxe")) ;
+ ok(107, checkOutput( "", "fred", "", "Jxe")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(110, $h{"Fred"} eq "[Jxe]");
+ ok(108, $h{"Fred"} eq "[Jxe]");
# fk sk fv sv
- ok(111, checkOutput( "", "fred", "[Jxe]", "")) ;
+ ok(109, checkOutput( "", "fred", "[Jxe]", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(112, $db->FIRSTKEY() eq "FRED") ;
+ ok(110, $db->FIRSTKEY() eq "FRED") ;
# fk sk fv sv
- ok(113, checkOutput( "FRED", "", "", "")) ;
+ ok(111, checkOutput( "FRED", "", "", "")) ;
# put the original filters back
$db->filter_fetch_key ($old_fk);
@@ -761,15 +787,15 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"fred"} = "joe" ;
- ok(114, checkOutput( "", "fred", "", "joe")) ;
+ ok(112, checkOutput( "", "fred", "", "joe")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(115, $h{"fred"} eq "joe");
- ok(116, checkOutput( "", "fred", "joe", "")) ;
+ ok(113, $h{"fred"} eq "joe");
+ ok(114, checkOutput( "", "fred", "joe", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(117, $db->FIRSTKEY() eq "fred") ;
- ok(118, checkOutput( "fred", "", "", "")) ;
+ ok(115, $db->FIRSTKEY() eq "fred") ;
+ ok(116, checkOutput( "fred", "", "", "")) ;
# delete the filters
$db->filter_fetch_key (undef);
@@ -779,15 +805,15 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"fred"} = "joe" ;
- ok(119, checkOutput( "", "", "", "")) ;
+ ok(117, checkOutput( "", "", "", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(120, $h{"fred"} eq "joe");
- ok(121, checkOutput( "", "", "", "")) ;
+ ok(118, $h{"fred"} eq "joe");
+ ok(119, checkOutput( "", "", "", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(122, $db->FIRSTKEY() eq "fred") ;
- ok(123, checkOutput( "", "", "", "")) ;
+ ok(120, $db->FIRSTKEY() eq "fred") ;
+ ok(121, checkOutput( "", "", "", "")) ;
undef $db ;
untie %h;
@@ -802,7 +828,7 @@ EOM
my (%h, $db) ;
unlink $Dfile;
- ok(124, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ ok(122, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
my %result = () ;
@@ -826,32 +852,32 @@ EOM
$_ = "original" ;
$h{"fred"} = "joe" ;
- ok(125, $result{"store key"} eq "store key - 1: [fred]");
- ok(126, $result{"store value"} eq "store value - 1: [joe]");
- ok(127, ! defined $result{"fetch key"} );
- ok(128, ! defined $result{"fetch value"} );
- ok(129, $_ eq "original") ;
-
- ok(130, $db->FIRSTKEY() eq "fred") ;
- ok(131, $result{"store key"} eq "store key - 1: [fred]");
- ok(132, $result{"store value"} eq "store value - 1: [joe]");
- ok(133, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(134, ! defined $result{"fetch value"} );
- ok(135, $_ eq "original") ;
+ ok(123, $result{"store key"} eq "store key - 1: [fred]");
+ ok(124, $result{"store value"} eq "store value - 1: [joe]");
+ ok(125, ! defined $result{"fetch key"} );
+ ok(126, ! defined $result{"fetch value"} );
+ ok(127, $_ eq "original") ;
+
+ ok(128, $db->FIRSTKEY() eq "fred") ;
+ ok(129, $result{"store key"} eq "store key - 1: [fred]");
+ ok(130, $result{"store value"} eq "store value - 1: [joe]");
+ ok(131, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(132, ! defined $result{"fetch value"} );
+ ok(133, $_ eq "original") ;
$h{"jim"} = "john" ;
- ok(136, $result{"store key"} eq "store key - 2: [fred jim]");
- ok(137, $result{"store value"} eq "store value - 2: [joe john]");
- ok(138, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(139, ! defined $result{"fetch value"} );
- ok(140, $_ eq "original") ;
-
- ok(141, $h{"fred"} eq "joe");
- ok(142, $result{"store key"} eq "store key - 3: [fred jim fred]");
- ok(143, $result{"store value"} eq "store value - 2: [joe john]");
- ok(144, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(145, $result{"fetch value"} eq "fetch value - 1: [joe]");
- ok(146, $_ eq "original") ;
+ ok(134, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(135, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(136, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(137, ! defined $result{"fetch value"} );
+ ok(138, $_ eq "original") ;
+
+ ok(139, $h{"fred"} eq "joe");
+ ok(140, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(141, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(142, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(143, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(144, $_ eq "original") ;
undef $db ;
untie %h;
@@ -865,12 +891,12 @@ EOM
my (%h, $db) ;
unlink $Dfile;
- ok(147, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ ok(145, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
$db->filter_store_key (sub { $_ = $h{$_} }) ;
eval '$h{1} = 1234' ;
- ok(148, $@ =~ /^recursion detected in filter_store_key at/ );
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
undef $db ;
untie %h;
@@ -930,7 +956,7 @@ EOM
delete $DB_BTREE->{'compare'} ;
- ok(149, docat_del($file) eq <<'EOM') ;
+ ok(147, docat_del($file) eq <<'EOM') ;
mouse
Smith
Wall
@@ -946,7 +972,7 @@ EOM
use strict ;
use DB_File ;
- use vars qw($filename %h ) ;
+ my ($filename, %h);
$filename = "tree" ;
unlink $filename ;
@@ -974,7 +1000,7 @@ EOM
unlink $filename ;
}
- ok(150, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
+ ok(148, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
Smith -> John
Wall -> Brick
Wall -> Brick
@@ -998,7 +1024,7 @@ EOM
use strict ;
use DB_File ;
- use vars qw($filename $x %h $status $key $value) ;
+ my ($filename, $x, %h, $status, $key, $value);
$filename = "tree" ;
unlink $filename ;
@@ -1029,7 +1055,7 @@ EOM
untie %h ;
}
- ok(151, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
+ ok(149, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
Smith -> John
Wall -> Brick
Wall -> Brick
@@ -1054,7 +1080,7 @@ EOM
use strict ;
use DB_File ;
- use vars qw($filename $x %h ) ;
+ my ($filename, $x, %h);
$filename = "tree" ;
@@ -1084,7 +1110,7 @@ EOM
untie %h ;
}
- ok(152, docat_del($file) eq <<'EOM') ;
+ ok(150, docat_del($file) eq <<'EOM') ;
Wall occurred 3 times
Larry is there
There are 2 Brick Walls
@@ -1103,9 +1129,9 @@ EOM
use strict ;
use DB_File ;
- use vars qw($filename $x %h $found) ;
+ my ($filename, $x, %h, $found);
- my $filename = "tree" ;
+ $filename = "tree" ;
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
@@ -1123,7 +1149,7 @@ EOM
untie %h ;
}
- ok(153, docat_del($file) eq <<'EOM') ;
+ ok(151, docat_del($file) eq <<'EOM') ;
Larry Wall is there
Harry Wall is not there
EOM
@@ -1138,9 +1164,9 @@ EOM
use strict ;
use DB_File ;
- use vars qw($filename $x %h $found) ;
+ my ($filename, $x, %h, $found);
- my $filename = "tree" ;
+ $filename = "tree" ;
# Enable duplicate records
$DB_BTREE->{'flags'} = R_DUP ;
@@ -1159,7 +1185,7 @@ EOM
unlink $filename ;
}
- ok(154, docat_del($file) eq <<'EOM') ;
+ ok(152, docat_del($file) eq <<'EOM') ;
Larry Wall is not there
EOM
@@ -1174,7 +1200,7 @@ EOM
use DB_File ;
use Fcntl ;
- use vars qw($filename $x %h $st $key $value) ;
+ my ($filename, $x, %h, $st, $key, $value);
sub match
{
@@ -1219,7 +1245,7 @@ EOM
}
- ok(155, docat_del($file) eq <<'EOM') ;
+ ok(153, docat_del($file) eq <<'EOM') ;
IN ORDER
Smith -> John
Wall -> Larry
@@ -1278,7 +1304,7 @@ EOM
tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
or die "Can't open file: $!\n" ;
$h{ABC} = undef;
- ok(156, $a eq "") ;
+ ok(154, $a eq "") ;
untie %h ;
unlink $Dfile;
}
@@ -1298,9 +1324,166 @@ EOM
tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
or die "Can't open file: $!\n" ;
%h = (); ;
- ok(157, $a eq "") ;
+ ok(155, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(157, $h{'Alpha_ABC'} == 2);
+ ok(158, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(159, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(160, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(161, $bad_key == 0);
+
+ undef $db ;
untie %h ;
unlink $Dfile;
}
+{
+ # now an error to pass 'compare' a non-code reference
+ my $dbh = new DB_File::BTREEINFO ;
+
+ eval { $dbh->{compare} = 2 };
+ ok(162, $@ =~ /^Key 'compare' not associated with a code reference at/);
+
+ eval { $dbh->{prefix} = 2 };
+ ok(163, $@ =~ /^Key 'prefix' not associated with a code reference at/);
+
+}
+
+
+{
+ # recursion detection in btree
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::BTREEINFO ;
+ $dbh->{compare} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(164, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(165, $@ =~ /^DB_File btree_compare: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two callbacks don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::BTREEINFO ;
+ $dbh1->{compare} = sub { ++ $h1_count ; $_[0] cmp $_[1] } ;
+
+ my $dbh2 = new DB_File::BTREEINFO ;
+ $dbh2->{compare} = sub { ;++ $h2_count ; $_[0] cmp $_[1] } ;
+
+
+
+ my (%h);
+ ok(166, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(167, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(168, $h1_count > 0);
+ ok(169, $h1_count == $h2_count);
+
+ ok(170, safeUntie \%hash1);
+ ok(171, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(172, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(173, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (174, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(175, $h{"fred"} eq "joe");
+
+ ok(176, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (177, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
exit ;
diff --git a/bdb/perl.DB_File/t/db-hash.t b/bdb/perl/DB_File/t/db-hash.t
index a6efd981004..10623cc82a7 100644
--- a/bdb/perl.DB_File/t/db-hash.t
+++ b/bdb/perl/DB_File/t/db-hash.t
@@ -1,8 +1,5 @@
#!./perl
-use warnings ;
-use strict ;
-
BEGIN {
unless(grep /blib/, @INC) {
chdir 't' if -d 't';
@@ -10,12 +7,14 @@ BEGIN {
}
}
+use warnings;
+use strict;
use Config;
BEGIN {
if(-d "lib" && -f "TEST") {
if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
- print "1..111\n";
+ print "1..0 # Skip: DB_File was not built\n";
exit 0;
}
}
@@ -24,7 +23,7 @@ BEGIN {
use DB_File;
use Fcntl;
-print "1..111\n";
+print "1..143\n";
sub ok
{
@@ -64,11 +63,31 @@ sub docat_del
open(CAT,$file) || die "Cannot open $file: $!";
my $result = <CAT>;
close(CAT);
+ $result = normalise($result) ;
unlink $file ;
return $result;
}
+sub normalise
+{
+ my $data = shift ;
+ $data =~ s#\r\n#\n#g
+ if $^O eq 'cygwin' ;
+ return $data ;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie %$hashref;
+ return $no_inner;
+}
+
+
my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
my $null_keys_allowed = ($DB_File::db_ver < 2.004010
|| $DB_File::db_ver >= 3.1 );
@@ -99,8 +118,9 @@ ok(9, $dbh->{nelem} == 400 );
$dbh->{cachesize} = 65 ;
ok(10, $dbh->{cachesize} == 65 );
-$dbh->{hash} = "abc" ;
-ok(11, $dbh->{hash} eq "abc" );
+my $some_sub = sub {} ;
+$dbh->{hash} = $some_sub;
+ok(11, $dbh->{hash} eq $some_sub );
$dbh->{lorder} = 1234 ;
ok(12, $dbh->{lorder} == 1234 );
@@ -115,10 +135,15 @@ ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
# Now check the interface to HASH
my ($X, %h);
ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+die "Could not tie: $!" unless $X;
my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
$blksize,$blocks) = stat($Dfile);
-ok(16, ($mode & 0777) == ($^O eq 'os2' ? 0666 : 0640) || $^O eq 'amigaos' || $^O eq 'MSWin32');
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(16, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640) ||
+ $noMode{$^O} );
my ($key, $value, $i);
while (($key,$value) = each(%h)) {
@@ -393,7 +418,7 @@ untie %h ;
use warnings ;
use strict ;
- use vars qw( @ISA @EXPORT) ;
+ our (@ISA, @EXPORT);
require Exporter ;
use DB_File;
@@ -483,9 +508,22 @@ EOM
sub checkOutput
{
+ no warnings 'uninitialized';
my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
return
- $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_key eq $fk && $store_key eq $sk &&
$fetch_value eq $fv && $store_value eq $sv &&
$_ eq 'original' ;
}
@@ -509,9 +547,13 @@ EOM
ok(66, checkOutput( "", "fred", "joe", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(67, $db->FIRSTKEY() eq "fred") ;
+ my ($k, $v) ;
+ $k = 'fred';
+ ok(67, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(68, $k eq "fred") ;
+ ok(69, $v eq "joe") ;
# fk sk fv sv
- ok(68, checkOutput( "fred", "", "", "")) ;
+ ok(70, checkOutput( "fred", "fred", "joe", "")) ;
# replace the filters, but remember the previous set
my ($old_fk) = $db->filter_fetch_key
@@ -526,17 +568,20 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"Fred"} = "Joe" ;
# fk sk fv sv
- ok(69, checkOutput( "", "fred", "", "Jxe")) ;
+ ok(71, checkOutput( "", "fred", "", "Jxe")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(70, $h{"Fred"} eq "[Jxe]");
+ ok(72, $h{"Fred"} eq "[Jxe]");
# fk sk fv sv
- ok(71, checkOutput( "", "fred", "[Jxe]", "")) ;
+ ok(73, checkOutput( "", "fred", "[Jxe]", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(72, $db->FIRSTKEY() eq "FRED") ;
+ $k = 'Fred'; $v ='';
+ ok(74, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(75, $k eq "FRED") ;
+ ok(76, $v eq "[Jxe]") ;
# fk sk fv sv
- ok(73, checkOutput( "FRED", "", "", "")) ;
+ ok(77, checkOutput( "FRED", "fred", "[Jxe]", "")) ;
# put the original filters back
$db->filter_fetch_key ($old_fk);
@@ -546,15 +591,20 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"fred"} = "joe" ;
- ok(74, checkOutput( "", "fred", "", "joe")) ;
+ ok(78, checkOutput( "", "fred", "", "joe")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(75, $h{"fred"} eq "joe");
- ok(76, checkOutput( "", "fred", "joe", "")) ;
+ ok(79, $h{"fred"} eq "joe");
+ ok(80, checkOutput( "", "fred", "joe", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(77, $db->FIRSTKEY() eq "fred") ;
- ok(78, checkOutput( "fred", "", "", "")) ;
+ #ok(77, $db->FIRSTKEY() eq "fred") ;
+ $k = 'fred';
+ ok(81, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(82, $k eq "fred") ;
+ ok(83, $v eq "joe") ;
+ # fk sk fv sv
+ ok(84, checkOutput( "fred", "fred", "joe", "")) ;
# delete the filters
$db->filter_fetch_key (undef);
@@ -564,15 +614,18 @@ EOM
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
$h{"fred"} = "joe" ;
- ok(79, checkOutput( "", "", "", "")) ;
+ ok(85, checkOutput( "", "", "", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(80, $h{"fred"} eq "joe");
- ok(81, checkOutput( "", "", "", "")) ;
+ ok(86, $h{"fred"} eq "joe");
+ ok(87, checkOutput( "", "", "", "")) ;
($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
- ok(82, $db->FIRSTKEY() eq "fred") ;
- ok(83, checkOutput( "", "", "", "")) ;
+ $k = 'fred';
+ ok(88, ! $db->seq($k, $v, R_FIRST) ) ;
+ ok(89, $k eq "fred") ;
+ ok(90, $v eq "joe") ;
+ ok(91, checkOutput( "", "", "", "")) ;
undef $db ;
untie %h;
@@ -587,7 +640,7 @@ EOM
my (%h, $db) ;
unlink $Dfile;
- ok(84, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+ ok(92, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
my %result = () ;
@@ -611,32 +664,32 @@ EOM
$_ = "original" ;
$h{"fred"} = "joe" ;
- ok(85, $result{"store key"} eq "store key - 1: [fred]");
- ok(86, $result{"store value"} eq "store value - 1: [joe]");
- ok(87, ! defined $result{"fetch key"} );
- ok(88, ! defined $result{"fetch value"} );
- ok(89, $_ eq "original") ;
-
- ok(90, $db->FIRSTKEY() eq "fred") ;
- ok(91, $result{"store key"} eq "store key - 1: [fred]");
- ok(92, $result{"store value"} eq "store value - 1: [joe]");
- ok(93, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(94, ! defined $result{"fetch value"} );
- ok(95, $_ eq "original") ;
+ ok(93, $result{"store key"} eq "store key - 1: [fred]");
+ ok(94, $result{"store value"} eq "store value - 1: [joe]");
+ ok(95, ! defined $result{"fetch key"} );
+ ok(96, ! defined $result{"fetch value"} );
+ ok(97, $_ eq "original") ;
+
+ ok(98, $db->FIRSTKEY() eq "fred") ;
+ ok(99, $result{"store key"} eq "store key - 1: [fred]");
+ ok(100, $result{"store value"} eq "store value - 1: [joe]");
+ ok(101, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(102, ! defined $result{"fetch value"} );
+ ok(103, $_ eq "original") ;
$h{"jim"} = "john" ;
- ok(96, $result{"store key"} eq "store key - 2: [fred jim]");
- ok(97, $result{"store value"} eq "store value - 2: [joe john]");
- ok(98, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(99, ! defined $result{"fetch value"} );
- ok(100, $_ eq "original") ;
-
- ok(101, $h{"fred"} eq "joe");
- ok(102, $result{"store key"} eq "store key - 3: [fred jim fred]");
- ok(103, $result{"store value"} eq "store value - 2: [joe john]");
- ok(104, $result{"fetch key"} eq "fetch key - 1: [fred]");
- ok(105, $result{"fetch value"} eq "fetch value - 1: [joe]");
- ok(106, $_ eq "original") ;
+ ok(104, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(105, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(106, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(107, ! defined $result{"fetch value"} );
+ ok(108, $_ eq "original") ;
+
+ ok(109, $h{"fred"} eq "joe");
+ ok(110, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(111, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(112, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(113, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(114, $_ eq "original") ;
undef $db ;
untie %h;
@@ -650,12 +703,12 @@ EOM
my (%h, $db) ;
unlink $Dfile;
- ok(107, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+ ok(115, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
$db->filter_store_key (sub { $_ = $h{$_} }) ;
eval '$h{1} = 1234' ;
- ok(108, $@ =~ /^recursion detected in filter_store_key at/ );
+ ok(116, $@ =~ /^recursion detected in filter_store_key at/ );
undef $db ;
untie %h;
@@ -673,7 +726,7 @@ EOM
use warnings FATAL => qw(all);
use strict ;
use DB_File ;
- use vars qw( %h $k $v ) ;
+ our (%h, $k, $v);
unlink "fruit" ;
tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
@@ -700,7 +753,7 @@ EOM
unlink "fruit" ;
}
- ok(109, docat_del($file) eq <<'EOM') ;
+ ok(117, docat_del($file) eq <<'EOM') ;
Banana Exists
orange -> orange
@@ -726,7 +779,7 @@ EOM
tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
$h{ABC} = undef;
- ok(110, $a eq "") ;
+ ok(118, $a eq "") ;
untie %h ;
unlink $Dfile;
}
@@ -745,9 +798,184 @@ EOM
tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
%h = (); ;
- ok(111, $a eq "") ;
+ ok(119, $a eq "") ;
untie %h ;
unlink $Dfile;
}
+{
+ # When iterating over a tied hash using "each", the key passed to FETCH
+ # will be recycled and passed to NEXTKEY. If a Source Filter modifies the
+ # key in FETCH via a filter_fetch_key method we need to check that the
+ # modified key doesn't get passed to NEXTKEY.
+ # Also Test "keys" & "values" while we are at it.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my $bad_key = 0 ;
+ my %h = () ;
+ my $db ;
+ ok(120, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+ $db->filter_fetch_key (sub { $_ =~ s/^Beta_/Alpha_/ if defined $_}) ;
+ $db->filter_store_key (sub { $bad_key = 1 if /^Beta_/ ; $_ =~ s/^Alpha_/Beta_/}) ;
+
+ $h{'Alpha_ABC'} = 2 ;
+ $h{'Alpha_DEF'} = 5 ;
+
+ ok(121, $h{'Alpha_ABC'} == 2);
+ ok(122, $h{'Alpha_DEF'} == 5);
+
+ my ($k, $v) = ("","");
+ while (($k, $v) = each %h) {}
+ ok(123, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $k (keys %h) {}
+ ok(124, $bad_key == 0);
+
+ $bad_key = 0 ;
+ foreach $v (values %h) {}
+ ok(125, $bad_key == 0);
+
+ undef $db ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # now an error to pass 'hash' a non-code reference
+ my $dbh = new DB_File::HASHINFO ;
+
+ eval { $dbh->{hash} = 2 };
+ ok(126, $@ =~ /^Key 'hash' not associated with a code reference at/);
+
+}
+
+{
+ # recursion detection in hash
+ my %hash ;
+ unlink $Dfile;
+ my $dbh = new DB_File::HASHINFO ;
+ $dbh->{hash} = sub { $hash{3} = 4 ; length $_[0] } ;
+
+
+ my (%h);
+ ok(127, tie(%hash, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh ) );
+
+ eval { $hash{1} = 2;
+ $hash{4} = 5;
+ };
+
+ ok(128, $@ =~ /^DB_File hash callback: recursion detected/);
+ {
+ no warnings;
+ untie %hash;
+ }
+ unlink $Dfile;
+}
+
+{
+ # Check that two hash's don't interact
+ my %hash1 ;
+ my %hash2 ;
+ my $h1_count = 0;
+ my $h2_count = 0;
+ unlink $Dfile, $Dfile2;
+ my $dbh1 = new DB_File::HASHINFO ;
+ $dbh1->{hash} = sub { ++ $h1_count ; length $_[0] } ;
+
+ my $dbh2 = new DB_File::HASHINFO ;
+ $dbh2->{hash} = sub { ++ $h2_count ; length $_[0] } ;
+
+
+
+ my (%h);
+ ok(129, tie(%hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $dbh1 ) );
+ ok(130, tie(%hash2, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) );
+
+ $hash1{DEFG} = 5;
+ $hash1{XYZ} = 2;
+ $hash1{ABCDE} = 5;
+
+ $hash2{defg} = 5;
+ $hash2{xyz} = 2;
+ $hash2{abcde} = 5;
+
+ ok(131, $h1_count > 0);
+ ok(132, $h1_count == $h2_count);
+
+ ok(133, safeUntie \%hash1);
+ ok(134, safeUntie \%hash2);
+ unlink $Dfile, $Dfile2;
+}
+
+{
+ # Passing undef for flags and/or mode when calling tie could cause
+ # Use of uninitialized value in subroutine entry
+
+
+ my $warn_count = 0 ;
+ #local $SIG{__WARN__} = sub { ++ $warn_count };
+ my %hash1;
+ unlink $Dfile;
+
+ tie %hash1, 'DB_File',$Dfile, undef;
+ ok(135, $warn_count == 0);
+ $warn_count = 0;
+ tie %hash1, 'DB_File',$Dfile, O_RDWR|O_CREAT, undef;
+ ok(136, $warn_count == 0);
+ tie %hash1, 'DB_File',$Dfile, undef, undef;
+ ok(137, $warn_count == 0);
+ $warn_count = 0;
+
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(138, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(139, $h{"fred"} eq "joe");
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (140, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h{"fred"} = "joe" ;
+
+ ok(141, $h{"fred"} eq "joe");
+
+ ok(142, $db->FIRSTKEY() eq "fred") ;
+
+ eval { grep { $h{$_} } (1, 2, 3) };
+ ok (143, ! $@);
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
exit ;
diff --git a/bdb/perl/DB_File/t/db-recno.t b/bdb/perl/DB_File/t/db-recno.t
new file mode 100644
index 00000000000..5390b549376
--- /dev/null
+++ b/bdb/perl/DB_File/t/db-recno.t
@@ -0,0 +1,1428 @@
+#!./perl -w
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use warnings;
+use strict;
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..0 # Skip: DB_File was not built\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+our ($dbh, $Dfile, $bad_ones, $FA);
+
+# full tied array support started in Perl 5.004_57
+# Double check to see if it is available.
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+
+ return $result ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ normalise($result) ;
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ my $result = docat($file);
+ unlink $file ;
+ return $result;
+}
+
+sub safeUntie
+{
+ my $hashref = shift ;
+ my $no_inner = 1;
+ local $SIG{__WARN__} = sub {-- $no_inner } ;
+ untie @$hashref;
+ return $no_inner;
+}
+
+sub bad_one
+{
+ unless ($bad_ones++) {
+ print STDERR <<EOM ;
+#
+# Some older versions of Berkeley DB version 1 will fail db-recno
+# tests 61, 63 and 65.
+EOM
+ if ($^O eq 'darwin'
+ && $Config{db_version_major} == 1
+ && $Config{db_version_minor} == 0
+ && $Config{db_version_patch} == 0) {
+ print STDERR <<EOM ;
+#
+# For example Mac OS X 10.1.4 (or earlier) has such an old
+# version of Berkeley DB.
+EOM
+ }
+
+ print STDERR <<EOM ;
+#
+# You can safely ignore the errors if you're never going to use the
+# broken functionality (recno databases with a modified bval).
+# Otherwise you'll have to upgrade your DB library.
+#
+# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
+# last versions that were released. Berkeley DB version 2 is continually
+# being updated -- Check out http://www.sleepycat.com/ for more details.
+#
+EOM
+ }
+}
+
+sub normalise
+{
+ return unless $^O eq 'cygwin' ;
+ foreach (@_)
+ { s#\r\n#\n#g }
+}
+
+BEGIN
+{
+ {
+ local $SIG{__DIE__} ;
+ eval { require Data::Dumper ; import Data::Dumper } ;
+ }
+
+ if ($@) {
+ *Dumper = sub { my $a = shift; return "[ @{ $a } ]" } ;
+ }
+}
+
+my $splice_tests = 10 + 12 + 1; # ten regressions, plus the randoms
+my $total_tests = 158 ;
+$total_tests += $splice_tests if $FA ;
+print "1..$total_tests\n";
+
+$Dfile = "recno.tmp";
+unlink $Dfile ;
+
+umask(0);
+
+# Check the interface to RECNOINFO
+
+$dbh = new DB_File::RECNOINFO ;
+ok(1, ! defined $dbh->{bval}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{flags}) ;
+ok(5, ! defined $dbh->{lorder}) ;
+ok(6, ! defined $dbh->{reclen}) ;
+ok(7, ! defined $dbh->{bfname}) ;
+
+$dbh->{bval} = 3000 ;
+ok(8, $dbh->{bval} == 3000 );
+
+$dbh->{cachesize} = 9000 ;
+ok(9, $dbh->{cachesize} == 9000 );
+
+$dbh->{psize} = 400 ;
+ok(10, $dbh->{psize} == 400 );
+
+$dbh->{flags} = 65 ;
+ok(11, $dbh->{flags} == 65 );
+
+$dbh->{lorder} = 123 ;
+ok(12, $dbh->{lorder} == 123 );
+
+$dbh->{reclen} = 1234 ;
+ok(13, $dbh->{reclen} == 1234 );
+
+$dbh->{bfname} = 1234 ;
+ok(14, $dbh->{bfname} == 1234 );
+
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
+
+# Now check the interface to RECNOINFO
+
+my $X ;
+my @h ;
+ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+
+my %noMode = map { $_, 1} qw( amigaos MSWin32 NetWare cygwin ) ;
+
+ok(18, ((stat($Dfile))[2] & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $noMode{$^O} );
+
+#my $l = @h ;
+my $l = $X->length ;
+ok(19, ($FA ? @h == 0 : !$l) );
+
+my @data = qw( a b c d ever f g h i j k longername m n o p) ;
+
+$h[0] = shift @data ;
+ok(20, $h[0] eq 'a' );
+
+my $ i;
+foreach (@data)
+ { $h[++$i] = $_ }
+
+unshift (@data, 'a') ;
+
+ok(21, defined $h[1] );
+ok(22, ! defined $h[16] );
+ok(23, $FA ? @h == @data : $X->length == @data );
+
+
+# Overwrite an entry & check fetch it
+$h[3] = 'replaced' ;
+$data[3] = 'replaced' ;
+ok(24, $h[3] eq 'replaced' );
+
+#PUSH
+my @push_data = qw(added to the end) ;
+($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
+push (@data, @push_data) ;
+ok(25, $h[++$i] eq 'added' );
+ok(26, $h[++$i] eq 'to' );
+ok(27, $h[++$i] eq 'the' );
+ok(28, $h[++$i] eq 'end' );
+
+# POP
+my $popped = pop (@data) ;
+my $value = ($FA ? pop @h : $X->pop) ;
+ok(29, $value eq $popped) ;
+
+# SHIFT
+$value = ($FA ? shift @h : $X->shift) ;
+my $shifted = shift @data ;
+ok(30, $value eq $shifted );
+
+# UNSHIFT
+
+# empty list
+($FA ? unshift @h,() : $X->unshift) ;
+ok(31, ($FA ? @h == @data : $X->length == @data ));
+
+my @new_data = qw(add this to the start of the array) ;
+$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
+unshift (@data, @new_data) ;
+ok(32, $FA ? @h == @data : $X->length == @data );
+ok(33, $h[0] eq "add") ;
+ok(34, $h[1] eq "this") ;
+ok(35, $h[2] eq "to") ;
+ok(36, $h[3] eq "the") ;
+ok(37, $h[4] eq "start") ;
+ok(38, $h[5] eq "of") ;
+ok(39, $h[6] eq "the") ;
+ok(40, $h[7] eq "array") ;
+ok(41, $h[8] eq $data[8]) ;
+
+# Brief test for SPLICE - more thorough 'soak test' is later.
+my @old;
+if ($FA) {
+ @old = splice(@h, 1, 2, qw(bananas just before));
+}
+else {
+ @old = $X->splice(1, 2, qw(bananas just before));
+}
+ok(42, $h[0] eq "add") ;
+ok(43, $h[1] eq "bananas") ;
+ok(44, $h[2] eq "just") ;
+ok(45, $h[3] eq "before") ;
+ok(46, $h[4] eq "the") ;
+ok(47, $h[5] eq "start") ;
+ok(48, $h[6] eq "of") ;
+ok(49, $h[7] eq "the") ;
+ok(50, $h[8] eq "array") ;
+ok(51, $h[9] eq $data[8]) ;
+$FA ? splice(@h, 1, 3, @old) : $X->splice(1, 3, @old);
+
+# Now both arrays should be identical
+
+my $ok = 1 ;
+my $j = 0 ;
+foreach (@data)
+{
+ $ok = 0, last if $_ ne $h[$j ++] ;
+}
+ok(52, $ok );
+
+# Neagtive subscripts
+
+# get the last element of the array
+ok(53, $h[-1] eq $data[-1] );
+ok(54, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
+
+# get the first element using a negative subscript
+eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
+ok(55, $@ eq "" );
+ok(56, $h[0] eq "abcd" );
+
+# now try to read before the start of the array
+eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
+ok(57, $@ =~ '^Modification of non-creatable array value attempted' );
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+ok(58, safeUntie \@h);
+
+unlink $Dfile;
+
+
+{
+ # Check bval defaults to \n
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ ok(59, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(60, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ ok(61, $x eq "abc\ndef\n\nghi\n") ;
+}
+
+{
+ # Change bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{bval} = "-" ;
+ ok(62, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(63, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc-def--ghi-") ;
+ bad_one() unless $ok ;
+ ok(64, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with default bval (space)
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{reclen} = 5 ;
+ ok(65, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(66, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc def ghi ") ;
+ bad_one() unless $ok ;
+ ok(67, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with user-defined bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{bval} = "-" ;
+ $dbh->{reclen} = 5 ;
+ ok(68, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ ok(69, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc--def-------ghi--") ;
+ bad_one() unless $ok ;
+ ok(70, $ok) ;
+}
+
+{
+ # check that attempting to tie an associative array to a DB_RECNO will fail
+
+ my $filename = "xyz" ;
+ my %x ;
+ eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
+ ok(71, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ our (@ISA, @EXPORT);
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE or die "Could not close: $!";
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(72, $@ eq "") ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
+ ' ;
+ die "Could not tie: $!" unless $X;
+
+ main::ok(73, $@ eq "") ;
+
+ my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
+ main::ok(74, $@ eq "") ;
+ main::ok(75, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
+ main::ok(76, $@ eq "") ;
+ main::ok(77, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(78, $@ eq "" ) ;
+ main::ok(79, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok(80, $@ eq "") ;
+ main::ok(81, $ret eq "[[11]]") ;
+
+ undef $X;
+ main::ok(82, main::safeUntie \@h);
+ unlink "SubDB.pm", "recno.tmp" ;
+
+}
+
+{
+
+ # test $#
+ my $self ;
+ unlink $Dfile;
+ ok(83, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[2] = "ghi" ;
+ $h[3] = "jkl" ;
+ ok(84, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(85, safeUntie \@h);
+ my $x = docat($Dfile) ;
+ ok(86, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to same length
+ ok(87, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 3 }
+ else
+ { $self->STORESIZE(4) }
+ ok(88, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ ok(89, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(90, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to bigger
+ ok(91, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 6 }
+ else
+ { $self->STORESIZE(7) }
+ ok(92, $FA ? $#h == 6 : $self->length() == 7) ;
+ undef $self ;
+ ok(93, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(94, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
+
+ # $# sets array smaller
+ ok(95, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 2 }
+ else
+ { $self->STORESIZE(3) }
+ ok(96, $FA ? $#h == 2 : $self->length() == 3) ;
+ undef $self ;
+ ok(97, safeUntie \@h);
+ $x = docat($Dfile) ;
+ ok(98, $x eq "abc\ndef\nghi\n") ;
+
+ unlink $Dfile;
+
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+
+ print "# Fetch Key : expected '$fk' got '$fetch_key'\n"
+ if $fetch_key ne $fk ;
+ print "# Fetch Value : expected '$fv' got '$fetch_value'\n"
+ if $fetch_value ne $fv ;
+ print "# Store Key : expected '$sk' got '$store_key'\n"
+ if $store_key ne $sk ;
+ print "# Store Value : expected '$sv' got '$store_value'\n"
+ if $store_value ne $sv ;
+ print "# \$_ : expected 'original' got '$_'\n"
+ if $_ ne 'original' ;
+
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(99, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ # fk sk fv sv
+ ok(100, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(101, $h[0] eq "joe");
+ # fk sk fv sv
+ ok(102, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $db->FIRSTKEY() == 0) ;
+ # fk sk fv sv
+ ok(104, checkOutput( 0, "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { ++ $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ *= 2 ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[1] = "Joe" ;
+ # fk sk fv sv
+ ok(105, checkOutput( "", 2, "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(106, $h[1] eq "[Jxe]");
+ # fk sk fv sv
+ ok(107, checkOutput( "", 2, "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $db->FIRSTKEY() == 1) ;
+ # fk sk fv sv
+ ok(109, checkOutput( 1, "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(110, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(111, $h[0] eq "joe");
+ ok(112, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(113, $db->FIRSTKEY() == 0) ;
+ ok(114, checkOutput( 0, "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(115, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(116, $h[0] eq "joe");
+ ok(117, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(118, $db->FIRSTKEY() == 0) ;
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ ok(120, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+
+ unlink $Dfile;
+ ok(121, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(122, $result{"store key"} eq "store key - 1: [0]");
+ ok(123, $result{"store value"} eq "store value - 1: [joe]");
+ ok(124, ! defined $result{"fetch key"} );
+ ok(125, ! defined $result{"fetch value"} );
+ ok(126, $_ eq "original") ;
+
+ ok(127, $db->FIRSTKEY() == 0 ) ;
+ ok(128, $result{"store key"} eq "store key - 1: [0]");
+ ok(129, $result{"store value"} eq "store value - 1: [joe]");
+ ok(130, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(131, ! defined $result{"fetch value"} );
+ ok(132, $_ eq "original") ;
+
+ $h[7] = "john" ;
+ ok(133, $result{"store key"} eq "store key - 2: [0 7]");
+ ok(134, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(135, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(136, ! defined $result{"fetch value"} );
+ ok(137, $_ eq "original") ;
+
+ ok(138, $h[0] eq "joe");
+ ok(139, $result{"store key"} eq "store key - 3: [0 7 0]");
+ ok(140, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(141, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(142, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(143, $_ eq "original") ;
+
+ undef $db ;
+ ok(144, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(145, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_store_key (sub { $_ = $h[0] }) ;
+
+ eval '$h[1] = 1234' ;
+ ok(146, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ ok(147, safeUntie \@h);
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $FA ? push @h, "green", "black"
+ : $x->push("green", "black") ;
+
+ my $elements = $FA ? scalar @h : $x->length ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $FA ? pop @h : $x->pop ;
+ print "popped $last\n" ;
+
+ $FA ? unshift @h, "white"
+ : $x->unshift("white") ;
+ my $first = $FA ? shift @h : $x->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ undef $x ;
+ untie @h ;
+
+ unlink $filename ;
+ }
+
+ ok(148, docat_del($file) eq <<'EOM') ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+The last element is green
+The 2nd last element is yellow
+EOM
+
+ my $save_output = "xyzt" ;
+ {
+ my $redirect = new Redirect $save_output ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ our (@h, $H, $file, $i);
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+ unlink $file ;
+ }
+
+ ok(149, docat_del($save_output) eq <<'EOM') ;
+
+ORIGINAL
+0: zero
+1: one
+2: two
+3: three
+4: four
+
+The last record was [four]
+The first record was [zero]
+
+REVERSE
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+
+REVERSE again
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my @h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ $h[0] = undef;
+ ok(150, $a eq "") ;
+ ok(151, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @h ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ @h = (); ;
+ ok(152, $a eq "") ;
+ ok(153, safeUntie \@h);
+ unlink $Dfile;
+}
+
+{
+ # Check that DBM Filter can cope with read-only $_
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(154, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { }) ;
+ $db->filter_store_key (sub { }) ;
+ $db->filter_fetch_value (sub { }) ;
+ $db->filter_store_value (sub { }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(155, $h[0] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (156, ! $@);
+
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ $h[1] = "joe" ;
+
+ ok(157, $h[1] eq "joe");
+
+ eval { grep { $h[$_] } (1, 2, 3) };
+ ok (158, ! $@);
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+# Only test splice if this is a newish version of Perl
+exit unless $FA ;
+
+# Test SPLICE
+
+{
+ # check that the splice warnings are under the same lexical control
+ # as their non-tied counterparts.
+
+ use warnings;
+ use strict;
+
+ my $a = '';
+ my @a = (1);
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @tied ;
+
+ tie @tied, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+
+ # uninitialized offset
+ use warnings;
+ my $offset ;
+ $a = '';
+ splice(@a, $offset);
+ ok(159, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, $offset);
+ ok(160, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, $offset);
+ ok(161, $a eq '');
+ $a = '';
+ splice(@tied, $offset);
+ ok(162, $a eq '');
+
+ # uninitialized length
+ use warnings;
+ my $length ;
+ $a = '';
+ splice(@a, 0, $length);
+ ok(163, $a =~ /^Use of uninitialized value /);
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(164, $a =~ /^Use of uninitialized value in splice/);
+
+ no warnings 'uninitialized';
+ $a = '';
+ splice(@a, 0, $length);
+ ok(165, $a eq '');
+ $a = '';
+ splice(@tied, 0, $length);
+ ok(166, $a eq '');
+
+ # offset past end of array
+ use warnings;
+ $a = '';
+ splice(@a, 3);
+ my $splice_end_array = ($a =~ /^splice\(\) offset past end of array/);
+ $a = '';
+ splice(@tied, 3);
+ ok(167, !$splice_end_array || $a =~ /^splice\(\) offset past end of array/);
+
+ no warnings 'misc';
+ $a = '';
+ splice(@a, 3);
+ ok(168, $a eq '');
+ $a = '';
+ splice(@tied, 3);
+ ok(169, $a eq '');
+
+ ok(170, safeUntie \@tied);
+ unlink $Dfile;
+}
+
+#
+# These are a few regression tests: bundles of five arguments to pass
+# to test_splice(). The first four arguments correspond to those
+# given to splice(), and the last says which context to call it in
+# (scalar, list or void).
+#
+# The expected result is not needed because we get that by running
+# Perl's built-in splice().
+#
+my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion',
+ 'rarely', 'paleness' ],
+ -4, -2,
+ [ 'redoubled', 'Taylorize', 'Zoe', 'halogen' ],
+ 'void' ],
+
+ [ [ 'a' ], -2, 1, [ 'B' ], 'void' ],
+
+ [ [ 'Hartley', 'Islandia', 'assents', 'wishful' ],
+ 0, -4,
+ [ 'maids' ],
+ 'void' ],
+
+ [ [ 'visibility', 'pocketful', 'rectangles' ],
+ -10, 0,
+ [ 'garbages' ],
+ 'void' ],
+
+ [ [ 'sleeplessly' ],
+ 8, -4,
+ [ 'Margery', 'clearing', 'repercussion', 'clubs',
+ 'arise' ],
+ 'void' ],
+
+ [ [ 'chastises', 'recalculates' ],
+ 0, 0,
+ [ 'momentariness', 'mediates', 'accents', 'toils',
+ 'regaled' ],
+ 'void' ],
+
+ [ [ 'b', '' ],
+ 9, 8,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'b', '' ],
+ undef, undef,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'riheb' ], -8, undef, [], 'void' ],
+
+ [ [ 'uft', 'qnxs', '' ],
+ 6, -2,
+ [ 'znp', 'mhnkh', 'bn' ],
+ 'void' ],
+ );
+
+my $testnum = 171;
+my $failed = 0;
+require POSIX; my $tmp = POSIX::tmpnam();
+foreach my $test (@tests) {
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ ok($testnum++, 0);
+ }
+ else { ok($testnum++, 1) }
+}
+
+if ($failed) {
+ # Not worth running the random ones
+ print STDERR '# skipping ', $testnum++, "\n";
+}
+else {
+ # A thousand randomly-generated tests
+ $failed = 0;
+ srand(0);
+ foreach (0 .. 1000 - 1) {
+ my $test = rand_test();
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ print STDERR "# failed: ", Dumper($test);
+ print STDERR "# error: $err\n";
+ $failed = 1;
+ print STDERR "# skipping any remaining random tests\n";
+ last;
+ }
+ }
+
+ ok($testnum++, not $failed);
+}
+
+die "testnum ($testnum) != total_tests ($total_tests) + 1"
+ if $testnum != $total_tests + 1;
+
+exit ;
+
+# Subroutines for SPLICE testing
+
+# test_splice()
+#
+# Test the new splice() against Perl's built-in one. The first four
+# parameters are those passed to splice(), except that the lists must
+# be (explicitly) passed by reference, and are not actually modified.
+# (It's just a test!) The last argument specifies the context in
+# which to call the functions: 'list', 'scalar', or 'void'.
+#
+# Returns:
+# undef, if the two splices give the same results for the given
+# arguments and context;
+#
+# an error message showing the difference, otherwise.
+#
+# Reads global variable $tmp.
+#
+sub test_splice {
+ die 'usage: test_splice(array, offset, length, list, context)' if @_ != 5;
+ my ($array, $offset, $length, $list, $context) = @_;
+ my @array = @$array;
+ my @list = @$list;
+
+ unlink $tmp;
+
+ my @h;
+ my $H = tie @h, 'DB_File', $tmp, O_CREAT|O_RDWR, 0644, $DB_RECNO
+ or die "cannot open $tmp: $!";
+
+ my $i = 0;
+ foreach ( @array ) { $h[$i++] = $_ }
+
+ return "basic DB_File sanity check failed"
+ if list_diff(\@array, \@h);
+
+ # Output from splice():
+ # Returned value (munged a bit), error msg, warnings
+ #
+ my ($s_r, $s_error, @s_warnings);
+
+ my $gather_warning = sub { push @s_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($s_error, @s_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.$//;
+ }
+
+ # Now do the same for DB_File's version of splice
+ my ($ms_r, $ms_error, @ms_warnings);
+ $gather_warning = sub { push @ms_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($ms_error, @ms_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.?.*//s;
+ }
+
+ return "different errors: '$s_error' vs '$ms_error'"
+ if $s_error ne $ms_error;
+ return('different return values: ' . Dumper($s_r) . ' vs ' . Dumper($ms_r))
+ if list_diff($s_r, $ms_r);
+ return('different changed list: ' . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ if ((scalar @s_warnings) != (scalar @ms_warnings)) {
+ return 'different number of warnings';
+ }
+
+ while (@s_warnings) {
+ my $sw = shift @s_warnings;
+ my $msw = shift @ms_warnings;
+
+ if (defined $sw and defined $msw) {
+ $msw =~ s/ \(.+\)$//;
+ $msw =~ s/ in splice$// if $] < 5.006;
+ if ($sw ne $msw) {
+ return "different warning: '$sw' vs '$msw'";
+ }
+ }
+ elsif (not defined $sw and not defined $msw) {
+ # Okay.
+ }
+ else {
+ return "one warning defined, another undef";
+ }
+ }
+
+ undef $H;
+ untie @h;
+
+ open(TEXT, $tmp) or die "cannot open $tmp: $!";
+ @h = <TEXT>; normalise @h; chomp @h;
+ close TEXT or die "cannot close $tmp: $!";
+ return('list is different when re-read from disk: '
+ . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ return undef; # success
+}
+
+
+# list_diff()
+#
+# Do two lists differ?
+#
+# Parameters:
+# reference to first list
+# reference to second list
+#
+# Returns true iff they differ. Only works for lists of (string or
+# undef).
+#
+# Surely there is a better way to do this?
+#
+sub list_diff {
+ die 'usage: list_diff(ref to first list, ref to second list)'
+ if @_ != 2;
+ my ($a, $b) = @_;
+ my @a = @$a; my @b = @$b;
+ return 1 if (scalar @a) != (scalar @b);
+ for (my $i = 0; $i < @a; $i++) {
+ my ($ae, $be) = ($a[$i], $b[$i]);
+ if (defined $ae and defined $be) {
+ return 1 if $ae ne $be;
+ }
+ elsif (not defined $ae and not defined $be) {
+ # Two undefined values are 'equal'
+ }
+ else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+# rand_test()
+#
+# Think up a random ARRAY, OFFSET, LENGTH, LIST, and context.
+# ARRAY or LIST might be empty, and OFFSET or LENGTH might be
+# undefined. Return a 'test' - a listref of these five things.
+#
+sub rand_test {
+ die 'usage: rand_test()' if @_;
+ my @contexts = qw<list scalar void>;
+ my $context = $contexts[int(rand @contexts)];
+ return [ rand_list(),
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ rand_list(),
+ $context ];
+}
+
+
+sub rand_list {
+ die 'usage: rand_list()' if @_;
+ my @r;
+
+ while (rand() > 0.1 * (scalar @r + 1)) {
+ push @r, rand_word();
+ }
+ return \@r;
+}
+
+
+sub rand_word {
+ die 'usage: rand_word()' if @_;
+ my $r = '';
+ my @chars = qw<a b c d e f g h i j k l m n o p q r s t u v w x y z>;
+ while (rand() > 0.1 * (length($r) + 1)) {
+ $r .= $chars[int(rand(scalar @chars))];
+ }
+ return $r;
+}
+
+
diff --git a/bdb/perl.DB_File/typemap b/bdb/perl/DB_File/typemap
index 55439ee76d9..8ad7b1282dc 100644
--- a/bdb/perl.DB_File/typemap
+++ b/bdb/perl/DB_File/typemap
@@ -15,19 +15,21 @@ DBTKEY T_dbtkeydatum
INPUT
T_dbtkeydatum
- ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBM_ckFilter($arg, filter_store_key, \"filter_store_key\");
DBT_clear($var) ;
- if (db->type != DB_RECNO) {
- $var.data = SvPV($arg, PL_na);
- $var.size = (int)PL_na;
- }
- else {
- Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
- $var.data = & Value;
- $var.size = (int)sizeof(recno_t);
+ if (SvOK($arg)){
+ if (db->type != DB_RECNO) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+ else {
+ Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(recno_t);
+ }
}
T_dbtdatum
- ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBM_ckFilter($arg, filter_store_value, \"filter_store_value\");
DBT_clear($var) ;
if (SvOK($arg)) {
$var.data = SvPV($arg, PL_na);
diff --git a/bdb/perl.DB_File/version.c b/bdb/perl/DB_File/version.c
index 82b3e8b27b9..03b17c18e60 100644
--- a/bdb/perl.DB_File/version.c
+++ b/bdb/perl/DB_File/version.c
@@ -3,12 +3,12 @@
version.c -- Perl 5 interface to Berkeley DB
written by Paul Marquess <Paul.Marquess@btinternet.com>
- last modified 16th January 2000
- version 1.73
+ last modified 2nd Jan 2002
+ version 1.802
All comments/suggestions/problems are welcome
- Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ Copyright (c) 1995-2002 Paul Marquess. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
@@ -22,6 +22,7 @@
*/
+#define PERL_NO_GET_CONTEXT
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
diff --git a/bdb/qam/qam.c b/bdb/qam/qam.c
index 0c9f453044f..b10f8743439 100644
--- a/bdb/qam/qam.c
+++ b/bdb/qam/qam.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam.c,v 11.72 2001/01/16 20:10:55 ubell Exp $";
+static const char revid[] = "$Id: qam.c,v 11.134 2002/08/13 20:46:08 ubell Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,20 +18,20 @@ static const char revid[] = "$Id: qam.c,v 11.72 2001/01/16 20:10:55 ubell Exp $"
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_am.h"
-#include "mp.h"
-#include "lock.h"
-#include "log.h"
-#include "btree.h"
-#include "qam.h"
-
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/btree.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+
+static int __qam_bulk __P((DBC *, DBT *, u_int32_t));
static int __qam_c_close __P((DBC *, db_pgno_t, int *));
static int __qam_c_del __P((DBC *));
static int __qam_c_destroy __P((DBC *));
static int __qam_c_get __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
static int __qam_c_put __P((DBC *, DBT *, DBT *, u_int32_t, db_pgno_t *));
+static int __qam_consume __P((DBC *, QMETA *, db_recno_t));
static int __qam_getno __P((DB *, const DBT *, db_recno_t *));
/*
@@ -61,17 +61,16 @@ __qam_position(dbc, recnop, mode, exactp)
pg = QAM_RECNO_PAGE(dbp, *recnop);
if ((ret = __db_lget(dbc, 0, pg, mode == QAM_READ ?
- DB_LOCK_READ : DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ DB_LOCK_READ : DB_LOCK_WRITE, 0, &cp->lock)) != 0)
return (ret);
cp->page = NULL;
*exactp = 0;
if ((ret = __qam_fget(dbp, &pg,
- mode == QAM_WRITE ? DB_MPOOL_CREATE : 0,
- &cp->page)) != 0) {
+ mode == QAM_WRITE ? DB_MPOOL_CREATE : 0, &cp->page)) != 0) {
/* We did not fetch it, we can release the lock. */
(void)__LPUT(dbc, cp->lock);
- cp->lock.off = LOCK_INVALID;
- if (mode != QAM_WRITE && (ret == EINVAL || ret == ENOENT))
+ if (mode != QAM_WRITE &&
+ (ret == DB_PAGE_NOTFOUND || ret == ENOENT))
return (0);
return (ret);
}
@@ -88,7 +87,7 @@ __qam_position(dbc, recnop, mode, exactp)
}
qp = QAM_GET_RECORD(dbp, cp->page, cp->indx);
- *exactp = F_ISSET(qp, QAM_VALID);
+ *exactp = F_ISSET(qp, QAM_VALID) ? 1 : 0;
return (ret);
}
@@ -116,9 +115,9 @@ __qam_pitem(dbc, pagep, indx, recno, data)
DBT olddata, pdata, *datap;
QAMDATA *qp;
QUEUE *t;
- u_int32_t size;
+ u_int32_t alloced;
u_int8_t *dest, *p;
- int alloced, ret;
+ int ret;
alloced = ret = 0;
@@ -131,7 +130,6 @@ __qam_pitem(dbc, pagep, indx, recno, data)
qp = QAM_GET_RECORD(dbp, pagep, indx);
p = qp->data;
- size = data->size;
datap = data;
if (F_ISSET(data, DB_DBT_PARTIAL)) {
if (data->doff + data->dlen > t->re_len) {
@@ -159,12 +157,12 @@ len_err: __db_err(dbp->dbenv,
* to log so that both this and the recovery code is simpler.
*/
- if (DB_LOGGING(dbc) || !F_ISSET(qp, QAM_VALID)) {
+ if (DBC_LOGGING(dbc) || !F_ISSET(qp, QAM_VALID)) {
datap = &pdata;
memset(datap, 0, sizeof(*datap));
if ((ret = __os_malloc(dbp->dbenv,
- t->re_len, NULL, &datap->data)) != 0)
+ t->re_len, &datap->data)) != 0)
return (ret);
alloced = 1;
datap->size = t->re_len;
@@ -188,14 +186,14 @@ len_err: __db_err(dbp->dbenv,
}
no_partial:
- if (DB_LOGGING(dbc)) {
+ if (DBC_LOGGING(dbc)) {
olddata.size = 0;
if (F_ISSET(qp, QAM_SET)) {
olddata.data = qp->data;
olddata.size = t->re_len;
}
- if ((ret = __qam_add_log(dbp->dbenv, dbc->txn, &LSN(pagep),
- 0, dbp->log_fileid, &LSN(pagep), pagep->pgno,
+ if ((ret = __qam_add_log(dbp, dbc->txn, &LSN(pagep),
+ 0, &LSN(pagep), pagep->pgno,
indx, recno, datap, qp->flags,
olddata.size == 0 ? NULL : &olddata)) != 0)
goto err;
@@ -207,7 +205,7 @@ no_partial:
memset(p + datap->size, t->re_pad, t->re_len - datap->size);
err: if (alloced)
- __os_free(datap->data, t->re_len);
+ __os_free(dbp->dbenv, datap->data);
return (ret);
}
@@ -223,23 +221,37 @@ __qam_c_put(dbc, key, data, flags, pgnop)
u_int32_t flags;
db_pgno_t *pgnop;
{
- QUEUE_CURSOR *cp;
DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
QMETA *meta;
+ QUEUE_CURSOR *cp;
db_pgno_t pg;
db_recno_t new_cur, new_first;
u_int32_t opcode;
int exact, ret, t_ret;
- COMPQUIET(key, NULL);
-
dbp = dbc->dbp;
+ mpf = dbp->mpf;
if (pgnop != NULL)
*pgnop = PGNO_INVALID;
cp = (QUEUE_CURSOR *)dbc->internal;
+ switch (flags) {
+ case DB_KEYFIRST:
+ case DB_KEYLAST:
+ if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
+ return (ret);
+ /* FALLTHROUGH */
+ case DB_CURRENT:
+ break;
+ default:
+ /* The interface shouldn't let anything else through. */
+ DB_ASSERT(0);
+ return (__db_ferr(dbp->dbenv, "__qam_c_put", flags));
+ }
+
/* Write lock the record. */
if ((ret = __db_lget(dbc,
0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
@@ -252,29 +264,14 @@ __qam_c_put(dbc, key, data, flags, pgnop)
return (ret);
}
- if (exact && flags == DB_NOOVERWRITE) {
- ret = __TLPUT(dbc, lock);
- /* Doing record locking, release the page lock */
- if ((t_ret = __LPUT(dbc, cp->lock)) == 0)
- cp->lock.off = LOCK_INVALID;
- else
- if (ret == 0)
- ret = t_ret;
- if ((t_ret =
- __qam_fput(dbp, cp->pgno, cp->page, 0)) != 0 && ret == 0)
- ret = t_ret;
- cp->page = NULL;
- return (ret == 0 ? DB_KEYEXIST : ret);
- }
-
/* Put the item on the page. */
ret = __qam_pitem(dbc, (QPAGE *)cp->page, cp->indx, cp->recno, data);
/* Doing record locking, release the page lock */
if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
ret = t_ret;
- if ((t_ret =
- __qam_fput(dbp, cp->pgno, cp->page, DB_MPOOL_DIRTY)) && ret == 0)
+ if ((t_ret = __qam_fput(
+ dbp, cp->pgno, cp->page, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
cp->page = NULL;
cp->lock = lock;
@@ -284,11 +281,15 @@ __qam_c_put(dbc, key, data, flags, pgnop)
/* We may need to reset the head or tail of the queue. */
pg = ((QUEUE *)dbp->q_internal)->q_meta;
- if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
- /* We did not fetch it, we can release the lock. */
- (void)__LPUT(dbc, lock);
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
return (ret);
}
@@ -313,7 +314,8 @@ __qam_c_put(dbc, key, data, flags, pgnop)
} else {
if (QAM_BEFORE_FIRST(meta, cp->recno) &&
(meta->first_recno <= meta->cur_recno ||
- meta->first_recno - cp->recno < cp->recno - meta->cur_recno)) {
+ meta->first_recno - cp->recno <
+ cp->recno - meta->cur_recno)) {
new_first = cp->recno;
opcode |= QAM_SETFIRST;
}
@@ -321,7 +323,8 @@ __qam_c_put(dbc, key, data, flags, pgnop)
if (meta->cur_recno == cp->recno ||
(QAM_AFTER_CURRENT(meta, cp->recno) &&
(meta->first_recno <= meta->cur_recno ||
- cp->recno - meta->cur_recno <= meta->first_recno - cp->recno))) {
+ cp->recno - meta->cur_recno <=
+ meta->first_recno - cp->recno))) {
new_cur = cp->recno + 1;
if (new_cur == RECNO_OOB)
new_cur++;
@@ -329,10 +332,12 @@ __qam_c_put(dbc, key, data, flags, pgnop)
}
}
- if (opcode != 0 && DB_LOGGING(dbc)) {
- ret = __qam_mvptr_log(dbp->dbenv, dbc->txn, &meta->dbmeta.lsn,
- 0, opcode, dbp->log_fileid, meta->first_recno, new_first,
- meta->cur_recno, new_cur, &meta->dbmeta.lsn);
+ if (opcode != 0 && DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn,
+ 0, opcode, meta->first_recno, new_first,
+ meta->cur_recno, new_cur, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ if (ret != 0)
+ opcode = 0;
}
if (opcode & QAM_SETCUR)
@@ -340,9 +345,8 @@ __qam_c_put(dbc, key, data, flags, pgnop)
if (opcode & QAM_SETFIRST)
meta->first_recno = new_first;
- if ((t_ret =
- memp_fput(dbp->mpf, meta, opcode != 0 ? DB_MPOOL_DIRTY : 0)) != 0 &&
- ret == 0)
+ if ((t_ret = mpf->put(
+ mpf, meta, opcode != 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
ret = t_ret;
/* Don't hold the meta page long term. */
@@ -352,70 +356,42 @@ __qam_c_put(dbc, key, data, flags, pgnop)
}
/*
- * __qam_put --
- * Add a record to the queue.
- * If we are doing anything but appending, just call qam_c_put to do the
- * work. Otherwise we fast path things here.
+ * __qam_append --
+ * Perform a put(DB_APPEND) in queue.
*
- * PUBLIC: int __qam_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
+ * PUBLIC: int __qam_append __P((DBC *, DBT *, DBT *));
*/
int
-__qam_put(dbp, txn, key, data, flags)
- DB *dbp;
- DB_TXN *txn;
+__qam_append(dbc, key, data)
+ DBC *dbc;
DBT *key, *data;
- u_int32_t flags;
{
- QUEUE_CURSOR *cp;
- DBC *dbc;
+ DB *dbp;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
QMETA *meta;
QPAGE *page;
QUEUE *qp;
+ QUEUE_CURSOR *cp;
db_pgno_t pg;
db_recno_t recno;
int ret, t_ret;
- PANIC_CHECK(dbp->dbenv);
- DB_CHECK_TXN(dbp, txn);
-
- /* Allocate a cursor. */
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
-
- DEBUG_LWRITE(dbc, dbc->txn, "qam_put", key, data, flags);
-
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (QUEUE_CURSOR *)dbc->internal;
- /* Check for invalid flags. */
- if ((ret = __db_putchk(dbp,
- key, data, flags, F_ISSET(dbp, DB_AM_RDONLY), 0)) != 0)
- goto done;
-
- /* If not appending, then just call the cursor routine */
- if (flags != DB_APPEND) {
- if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
- goto done;
-
- ret = __qam_c_put(dbc, NULL, data, flags, NULL);
- goto done;
- }
-
- /* Write lock the meta page. */
pg = ((QUEUE *)dbp->q_internal)->q_meta;
- if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
- goto done;
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
- /* We did not fetch it, we can release the lock. */
- (void)__LPUT(dbc, lock);
- goto done;
- }
-
- /* Record that we are going to allocate a record. */
- if (DB_LOGGING(dbc)) {
- __qam_inc_log(dbp->dbenv,
- dbc->txn, &meta->dbmeta.lsn,
- 0, dbp->log_fileid, &meta->dbmeta.lsn);
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
+ return (ret);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
}
/* Get the next record number. */
@@ -436,15 +412,17 @@ __qam_put(dbp, txn, key, data, flags)
meta->first_recno = recno;
/* Lock the record and release meta page lock. */
- if ((ret = __db_lget(dbc,
- 1, recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
+ if ((ret = __db_lget(dbc, LCK_COUPLE_ALWAYS,
+ recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0) {
+ (void)__LPUT(dbc, lock);
goto err;
+ }
/*
* The application may modify the data based on the selected record
* number.
*/
- if (flags == DB_APPEND && dbc->dbp->db_append_recno != NULL &&
+ if (dbc->dbp->db_append_recno != NULL &&
(ret = dbc->dbp->db_append_recno(dbc->dbp, data, recno)) != 0) {
(void)__LPUT(dbc, lock);
goto err;
@@ -484,16 +462,20 @@ __qam_put(dbp, txn, key, data, flags)
/* Return the record number to the user. */
if (ret == 0)
- ret = __db_retcopy(dbp, key,
- &recno, sizeof(recno), &dbc->rkey.data, &dbc->rkey.ulen);
+ ret = __db_retcopy(dbp->dbenv, key,
+ &recno, sizeof(recno), &dbc->rkey->data, &dbc->rkey->ulen);
+
+ /* Position the cursor on this record. */
+ cp->recno = recno;
/* See if we are leaving the extent. */
qp = (QUEUE *) dbp->q_internal;
- if (qp->page_ext != 0
- && (recno % (qp->page_ext * qp->rec_page) == 0
- || recno == UINT32_T_MAX)) {
- if ((ret =
- __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ if (qp->page_ext != 0 &&
+ (recno % (qp->page_ext * qp->rec_page) == 0 ||
+ recno == UINT32_T_MAX)) {
+ if ((ret = __db_lget(dbc,
+ 0, ((QUEUE *)dbp->q_internal)->q_meta,
+ DB_LOCK_WRITE, 0, &lock)) != 0)
goto err;
if (!QAM_AFTER_CURRENT(meta, recno))
ret = __qam_fclose(dbp, pg);
@@ -502,13 +484,7 @@ __qam_put(dbp, txn, key, data, flags)
err:
/* Release the meta page. */
- if ((t_ret
- = memp_fput(dbp->mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
- ret = t_ret;
-
-done:
- /* Discard the cursor. */
- if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ if ((t_ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY)) != 0 && ret == 0)
ret = t_ret;
return (ret);
@@ -522,50 +498,57 @@ static int
__qam_c_del(dbc)
DBC *dbc;
{
- QUEUE_CURSOR *cp;
DB *dbp;
DBT data;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
PAGE *pagep;
QAMDATA *qp;
QMETA *meta;
+ QUEUE_CURSOR *cp;
db_pgno_t pg;
+ db_recno_t first;
int exact, ret, t_ret;
dbp = dbc->dbp;
+ mpf = dbp->mpf;
cp = (QUEUE_CURSOR *)dbc->internal;
pg = ((QUEUE *)dbp->q_internal)->q_meta;
- if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_READ, 0, &lock)) != 0)
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it.
+ */
+ if ((ret = mpf->get(mpf, &pg, 0, &meta)) != 0)
return (ret);
- if ((ret = memp_fget(dbp->mpf, &pg, 0, &meta)) != 0) {
- /* We did not fetch it, we can release the lock. */
- (void)__LPUT(dbc, lock);
+ /* Write lock the meta page. */
+ if ((ret = __db_lget(dbc, 0, pg, DB_LOCK_READ, 0, &lock)) != 0) {
+ (void)mpf->put(mpf, meta, 0);
return (ret);
}
if (QAM_NOT_VALID(meta, cp->recno))
ret = DB_NOTFOUND;
+ first = meta->first_recno;
+
/* Don't hold the meta page long term. */
if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
ret = t_ret;
- if ((t_ret = memp_fput(dbp->mpf, meta, 0)) != 0 && ret == 0)
- ret = t_ret;
if (ret != 0)
- return (ret);
+ goto err1;
if ((ret = __db_lget(dbc,
0, cp->recno, DB_LOCK_WRITE, DB_LOCK_RECORD, &lock)) != 0)
- return (ret);
+ goto err1;
cp->lock_mode = DB_LOCK_WRITE;
/* Find the record ; delete only deletes exact matches. */
if ((ret = __qam_position(dbc,
&cp->recno, QAM_WRITE, &exact)) != 0) {
cp->lock = lock;
- return (ret);
+ goto err1;
}
if (!exact) {
ret = DB_NOTFOUND;
@@ -575,21 +558,18 @@ __qam_c_del(dbc)
pagep = cp->page;
qp = QAM_GET_RECORD(dbp, pagep, cp->indx);
- if (DB_LOGGING(dbc)) {
- if (((QUEUE *)dbp->q_internal)->page_ext == 0
- || ((QUEUE *)dbp->q_internal)->re_len == 0) {
- if ((ret =
- __qam_del_log(dbp->dbenv,
- dbc->txn, &LSN(pagep), 0,
- dbp->log_fileid, &LSN(pagep),
+ if (DBC_LOGGING(dbc)) {
+ if (((QUEUE *)dbp->q_internal)->page_ext == 0 ||
+ ((QUEUE *)dbp->q_internal)->re_len == 0) {
+ if ((ret = __qam_del_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
pagep->pgno, cp->indx, cp->recno)) != 0)
goto err1;
} else {
data.size = ((QUEUE *)dbp->q_internal)->re_len;
data.data = qp->data;
- if ((ret =
- __qam_delext_log(dbp->dbenv, dbc->txn,
- &LSN(pagep), 0, dbp->log_fileid, &LSN(pagep),
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pagep), 0, &LSN(pagep),
pagep->pgno, cp->indx, cp->recno, &data)) != 0)
goto err1;
}
@@ -597,60 +577,28 @@ __qam_c_del(dbc)
F_CLR(qp, QAM_VALID);
-err1:
- if ((t_ret = __qam_fput(
- dbp, cp->pgno, cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0)
- return (ret ? ret : t_ret);
- cp->page = NULL;
- /* Doing record locking, release the page lock */
- if ((t_ret = __LPUT(dbc, cp->lock)) != 0) {
- cp->lock = lock;
- return (ret ? ret : t_ret);
+ if (cp->recno == first) {
+ pg = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, pg, DB_LOCK_WRITE, 0, &lock)) != 0)
+ goto err1;
+ ret = __qam_consume(dbc, meta, first);
+ if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
+ ret = t_ret;
}
- cp->lock = lock;
- return (ret);
-}
-/*
- * __qam_delete --
- * Queue db->del function.
- *
- * PUBLIC: int __qam_delete __P((DB *, DB_TXN *, DBT *, u_int32_t));
- */
-int
-__qam_delete(dbp, txn, key, flags)
- DB *dbp;
- DB_TXN *txn;
- DBT *key;
- u_int32_t flags;
-{
- QUEUE_CURSOR *cp;
- DBC *dbc;
- int ret, t_ret;
-
- PANIC_CHECK(dbp->dbenv);
- DB_CHECK_TXN(dbp, txn);
-
- /* Check for invalid flags. */
- if ((ret =
- __db_delchk(dbp, key, flags, F_ISSET(dbp, DB_AM_RDONLY))) != 0)
- return (ret);
-
- /* Acquire a cursor. */
- if ((ret = dbp->cursor(dbp, txn, &dbc, DB_WRITELOCK)) != 0)
- return (ret);
-
- DEBUG_LWRITE(dbc, txn, "qam_delete", key, NULL, flags);
-
- cp = (QUEUE_CURSOR *)dbc->internal;
- if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
- goto err;
-
- ret = __qam_c_del(dbc);
+err1:
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (cp->page != NULL && (t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, ret == 0 ? DB_MPOOL_DIRTY : 0)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
- /* Release the cursor. */
-err: if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ /* Doing record locking, release the page lock */
+ if ((t_ret = __LPUT(dbc, cp->lock)) != 0 && ret == 0)
ret = t_ret;
+ cp->lock = lock;
return (ret);
}
@@ -671,39 +619,40 @@ __qam_c_get(dbc, key, data, flags, pgnop)
db_pgno_t *pgnop;
{
DB *dbp;
- DB_LOCK lock, pglock, metalock, save_lock;
+ DBC *dbcdup;
DBT tmp;
+ DB_ENV *dbenv;
+ DB_LOCK lock, pglock, metalock;
+ DB_MPOOLFILE *mpf;
PAGE *pg;
QAMDATA *qp;
QMETA *meta;
QUEUE *t;
QUEUE_CURSOR *cp;
- db_indx_t save_indx;
db_lockmode_t lock_mode;
- db_pgno_t metapno, save_page;
- db_recno_t current, first, save_recno;
+ db_pgno_t metapno;
+ db_recno_t first;
qam_position_mode mode;
- u_int32_t rec_extent;
int exact, is_first, locked, ret, t_ret, wait, with_delete;
- int put_mode, meta_dirty, retrying, skip_again, wrapped;
+ int put_mode, meta_dirty, retrying;
- cp = (QUEUE_CURSOR *)dbc->internal;
dbp = dbc->dbp;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
- PANIC_CHECK(dbp->dbenv);
+ PANIC_CHECK(dbenv);
wait = 0;
with_delete = 0;
retrying = 0;
- rec_extent = 0;
lock_mode = DB_LOCK_READ;
- mode = QAM_READ;
put_mode = 0;
t_ret = 0;
*pgnop = 0;
pg = NULL;
- skip_again = 0;
+ mode = QAM_READ;
if (F_ISSET(dbc, DBC_RMW)) {
lock_mode = DB_LOCK_WRITE;
mode = QAM_WRITE;
@@ -714,7 +663,9 @@ __qam_c_get(dbc, key, data, flags, pgnop)
flags = DB_CONSUME;
}
if (flags == DB_CONSUME) {
- DB_CHECK_TXN(dbp, dbc->txn);
+ if ((ret = __db_check_txn(dbp, dbc->txn, dbc->locker, 0)) != 0)
+ return (ret);
+
with_delete = 1;
flags = DB_FIRST;
lock_mode = DB_LOCK_WRITE;
@@ -724,30 +675,30 @@ __qam_c_get(dbc, key, data, flags, pgnop)
DEBUG_LREAD(dbc, dbc->txn, "qam_c_get",
flags == DB_SET || flags == DB_SET_RANGE ? key : NULL, NULL, flags);
+ /* Make lint and friends happy. */
+ meta_dirty = 0;
+ locked = 0;
+
is_first = 0;
t = (QUEUE *)dbp->q_internal;
- /* get the meta page */
metapno = t->q_meta;
- if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+
+ /*
+ * Get the meta page first, we don't want to write lock it while
+ * trying to pin it. This is because someone my have it pinned
+ * but not locked.
+ */
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0)
return (ret);
+ if ((ret = __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
locked = 1;
- if ((ret = memp_fget(dbp->mpf, &metapno, 0, &meta)) != 0) {
- /* We did not fetch it, we can release the lock. */
- (void)__LPUT(dbc, metalock);
- return (ret);
- }
first = 0;
- /* Make lint and friends happy. */
- meta_dirty = 0;
-
/* Release any previous lock if not in a transaction. */
- if (cp->lock.off != LOCK_INVALID) {
- (void)__TLPUT(dbc, cp->lock);
- cp->lock.off = LOCK_INVALID;
- }
+ (void)__TLPUT(dbc, cp->lock);
retry: /* Update the record number. */
switch (flags) {
@@ -778,8 +729,8 @@ retry: /* Update the record number. */
case DB_PREV:
case DB_PREV_NODUP:
if (cp->recno != RECNO_OOB) {
- if (QAM_BEFORE_FIRST(meta, cp->recno)
- || cp->recno == meta->first_recno) {
+ if (QAM_BEFORE_FIRST(meta, cp->recno) ||
+ cp->recno == meta->first_recno) {
ret = DB_NOTFOUND;
goto err;
}
@@ -799,14 +750,15 @@ retry: /* Update the record number. */
if (cp->recno == RECNO_OOB)
cp->recno--;
break;
- case DB_GET_BOTH:
case DB_SET:
case DB_SET_RANGE:
+ case DB_GET_BOTH:
+ case DB_GET_BOTH_RANGE:
if ((ret = __qam_getno(dbp, key, &cp->recno)) != 0)
goto err;
break;
default:
- ret = __db_unknown_flag(dbp->dbenv, "__qam_c_get", flags);
+ ret = __db_unknown_flag(dbenv, "__qam_c_get", flags);
goto err;
}
@@ -830,14 +782,16 @@ retry: /* Update the record number. */
retrying = 1;
goto retry;
}
- if (CDB_LOCKING(dbp->dbenv)) {
- if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ if (CDB_LOCKING(dbenv)) {
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
DB_LOCK_SWITCH, &dbc->lock_dbt,
DB_LOCK_WAIT, &dbc->mylock)) != 0)
goto err;
- if ((ret = lock_get(dbp->dbenv, dbc->locker,
- DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
- &dbc->mylock)) != 0)
+ if ((ret = dbenv->lock_get(
+ dbenv, dbc->locker,
+ DB_LOCK_UPGRADE, &dbc->lock_dbt,
+ DB_LOCK_WRITE, &dbc->mylock)) != 0)
goto err;
goto retry;
}
@@ -859,7 +813,7 @@ retry: /* Update the record number. */
if ((ret = __db_lget(dbc, 0, metapno,
DB_LOCK_WAIT, DB_LOCK_SWITCH, &metalock)) != 0)
goto err;
- if ((ret = lock_get(dbp->dbenv, dbc->locker,
+ if ((ret = dbenv->lock_get(dbenv, dbc->locker,
DB_LOCK_UPGRADE, &dbc->lock_dbt, DB_LOCK_WRITE,
&metalock)) != 0)
goto err;
@@ -883,11 +837,15 @@ retry: /* Update the record number. */
DB_LOCK_NOWAIT | DB_LOCK_RECORD : DB_LOCK_RECORD,
&lock)) == DB_LOCK_NOTGRANTED && with_delete) {
#ifdef QDEBUG
- __db_logmsg(dbp->dbenv,
+ __db_logmsg(dbenv,
dbc->txn, "Queue S", 0, "%x %d %d %d",
dbc->locker, cp->recno, first, meta->first_recno);
#endif
first = 0;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
+ goto err;
+ locked = 1;
goto retry;
}
@@ -929,9 +887,9 @@ retry: /* Update the record number. */
cp->lock_mode = lock_mode;
if (!exact) {
- if (flags == DB_NEXT || flags == DB_NEXT_NODUP
- || flags == DB_PREV || flags == DB_PREV_NODUP
- || flags == DB_LAST) {
+ if (flags == DB_NEXT || flags == DB_NEXT_NODUP ||
+ flags == DB_PREV || flags == DB_PREV_NODUP ||
+ flags == DB_LAST) {
/* Release locks and try again. */
if (pg != NULL)
(void)__qam_fput(dbp, cp->pgno, pg, 0);
@@ -951,18 +909,20 @@ retry: /* Update the record number. */
}
/* Return the key if the user didn't give us one. */
- if (key != NULL && flags != DB_SET && flags != DB_GET_BOTH &&
- (ret = __db_retcopy(dbp, key, &cp->recno, sizeof(cp->recno),
- &dbc->rkey.data, &dbc->rkey.ulen)) != 0)
- goto err1;
-
- if (key != NULL)
+ if (key != NULL) {
+ if (flags != DB_GET_BOTH && flags != DB_GET_BOTH_RANGE &&
+ flags != DB_SET && flags != DB_SET_RANGE &&
+ (ret = __db_retcopy(dbp->dbenv,
+ key, &cp->recno, sizeof(cp->recno),
+ &dbc->rkey->data, &dbc->rkey->ulen)) != 0)
+ goto err1;
F_SET(key, DB_DBT_ISSET);
+ }
qp = QAM_GET_RECORD(dbp, pg, cp->indx);
/* Return the data item. */
- if (flags == DB_GET_BOTH) {
+ if (flags == DB_GET_BOTH || flags == DB_GET_BOTH_RANGE) {
/*
* Need to compare
*/
@@ -973,8 +933,10 @@ retry: /* Update the record number. */
goto err1;
}
}
- if (data != NULL && (ret = __db_retcopy(dbp, data,
- qp->data, t->re_len, &dbc->rdata.data, &dbc->rdata.ulen)) != 0)
+ if (data != NULL &&
+ !F_ISSET(dbc, DBC_MULTIPLE|DBC_MULTIPLE_KEY) &&
+ (ret = __db_retcopy(dbp->dbenv, data,
+ qp->data, t->re_len, &dbc->rdata->data, &dbc->rdata->ulen)) != 0)
goto err1;
if (data != NULL)
@@ -982,18 +944,53 @@ retry: /* Update the record number. */
/* Finally, if we are doing DB_CONSUME mark the record. */
if (with_delete) {
- if (DB_LOGGING(dbc)) {
+ /*
+ * Assert that we're not a secondary index. Doing a DB_CONSUME
+ * on a secondary makes very little sense, since one can't
+ * DB_APPEND there; attempting one should be forbidden by
+ * the interface.
+ */
+ DB_ASSERT(!F_ISSET(dbp, DB_AM_SECONDARY));
+
+ /*
+ * Check and see if we *have* any secondary indices.
+ * If we do, we're a primary, so call __db_c_del_primary
+ * to delete the references to the item we're about to
+ * delete.
+ *
+ * Note that we work on a duplicated cursor, since the
+ * __db_ret work has already been done, so it's not safe
+ * to perform any additional ops on this cursor.
+ */
+ if (LIST_FIRST(&dbp->s_secondaries) != NULL) {
+ if ((ret = __db_c_idup(dbc,
+ &dbcdup, DB_POSITIONI)) != 0)
+ goto err1;
+
+ if ((ret = __db_c_del_primary(dbcdup)) != 0) {
+ /*
+ * The __db_c_del_primary return is more
+ * interesting.
+ */
+ (void)dbcdup->c_close(dbcdup);
+ goto err1;
+ }
+
+ if ((ret = dbcdup->c_close(dbcdup)) != 0)
+ goto err1;
+ }
+
+ if (DBC_LOGGING(dbc)) {
if (t->page_ext == 0 || t->re_len == 0) {
- if ((ret = __qam_del_log(dbp->dbenv, dbc->txn,
- &LSN(pg), 0, dbp->log_fileid, &LSN(pg),
+ if ((ret = __qam_del_log(dbp, dbc->txn,
+ &LSN(pg), 0, &LSN(pg),
pg->pgno, cp->indx, cp->recno)) != 0)
goto err1;
} else {
tmp.data = qp->data;
tmp.size = t->re_len;
- if ((ret =
- __qam_delext_log(dbp->dbenv, dbc->txn,
- &LSN(pg), 0, dbp->log_fileid, &LSN(pg),
+ if ((ret = __qam_delext_log(dbp,
+ dbc->txn, &LSN(pg), 0, &LSN(pg),
pg->pgno, cp->indx, cp->recno, &tmp)) != 0)
goto err1;
}
@@ -1003,7 +1000,7 @@ retry: /* Update the record number. */
put_mode = DB_MPOOL_DIRTY;
if ((ret = __LPUT(dbc, pglock)) != 0)
- goto err;
+ goto err1;
/*
* Now we need to update the metapage
@@ -1021,8 +1018,9 @@ retry: /* Update the record number. */
dbc, 0, metapno, lock_mode, 0, &metalock)) != 0)
goto err1;
locked = 1;
+
#ifdef QDEBUG
- __db_logmsg(dbp->dbenv,
+ __db_logmsg(dbenv,
dbc->txn, "Queue D", 0, "%x %d %d %d",
dbc->locker, cp->recno, first, meta->first_recno);
#endif
@@ -1037,190 +1035,380 @@ retry: /* Update the record number. */
if (first != meta->first_recno)
goto done;
- save_page = cp->pgno;
- save_indx = cp->indx;
- save_recno = cp->recno;
- save_lock = cp->lock;
+ if ((ret = __qam_consume(dbc, meta, first)) != 0)
+ goto err1;
+ }
- /*
- * If we skipped some deleted records, we need to
- * reposition on the first one. Get a lock
- * in case someone is trying to put it back.
- */
- if (first != cp->recno) {
- ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
- DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
- if (ret == DB_LOCK_NOTGRANTED) {
- ret = 0;
- goto done;
- }
- if (ret != 0)
- goto err1;
- if ((ret =
- __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
- goto err1;
- cp->page = NULL;
- put_mode = 0;
- if ((ret = __qam_position(dbc,
- &first, QAM_READ, &exact)) != 0 || exact != 0) {
- (void)__LPUT(dbc, lock);
- goto err1;
- }
- if ((ret =__LPUT(dbc, lock)) != 0)
- goto err1;
- if ((ret = __LPUT(dbc, cp->lock)) != 0)
- goto err1;
+done:
+err1: if (cp->page != NULL) {
+ t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode);
+
+ if (!ret)
+ ret = t_ret;
+ /* Doing record locking, release the page lock */
+ t_ret = __LPUT(dbc, pglock);
+ cp->page = NULL;
+ }
+
+err: if (!ret)
+ ret = t_ret;
+ if (meta) {
+
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, meta_dirty ? DB_MPOOL_DIRTY : 0);
+
+ if (!ret)
+ ret = t_ret;
+
+ /* Don't hold the meta page long term. */
+ if (locked)
+ t_ret = __LPUT(dbc, metalock);
+ }
+ DB_ASSERT(!LOCK_ISSET(metalock));
+
+ /*
+ * There is no need to keep the record locked if we are
+ * not in a transaction.
+ */
+ if (t_ret == 0)
+ t_ret = __TLPUT(dbc, cp->lock);
+
+ return (ret ? ret : t_ret);
+}
+
+/*
+ * __qam_consume -- try to reset the head of the queue.
+ *
+ */
+
+static int
+__qam_consume(dbc, meta, first)
+ DBC *dbc;
+ QMETA *meta;
+ db_recno_t first;
+{
+ DB *dbp;
+ DB_LOCK lock, save_lock;
+ DB_MPOOLFILE *mpf;
+ QUEUE_CURSOR *cp;
+ db_indx_t save_indx;
+ db_pgno_t save_page;
+ db_recno_t current, save_recno;
+ u_int32_t rec_extent;
+ int exact, put_mode, ret, t_ret, wrapped;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+ put_mode = DB_MPOOL_DIRTY;
+ ret = t_ret = 0;
+
+ save_page = cp->pgno;
+ save_indx = cp->indx;
+ save_recno = cp->recno;
+ save_lock = cp->lock;
+
+ /*
+ * If we skipped some deleted records, we need to
+ * reposition on the first one. Get a lock
+ * in case someone is trying to put it back.
+ */
+ if (first != cp->recno) {
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ goto done;
}
+ if (ret != 0)
+ goto done;
+ if ((ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ goto done;
+ cp->page = NULL;
+ put_mode = 0;
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0 || exact != 0) {
+ (void)__LPUT(dbc, lock);
+ goto done;
+ }
+ if ((ret =__LPUT(dbc, lock)) != 0)
+ goto done;
+ if ((ret = __LPUT(dbc, cp->lock)) != 0)
+ goto done;
+ }
- current = meta->cur_recno;
- wrapped = 0;
- if (first > current)
- wrapped = 1;
- rec_extent = meta->page_ext * meta->rec_page;
+ current = meta->cur_recno;
+ wrapped = 0;
+ if (first > current)
+ wrapped = 1;
+ rec_extent = meta->page_ext * meta->rec_page;
- /* Loop until we find a record or hit current */
- for (;;) {
- /*
- * Check to see if we are moving off the extent
- * and remove the extent.
- * If we are moving off a page we need to
- * get rid of the buffer.
- * Wait for the lagging readers to move off the
- * page.
- */
- if (rec_extent != 0
- && ((exact = first % rec_extent == 0)
- || first % meta->rec_page == 0
- || first == UINT32_T_MAX)) {
- if (exact == 1 && (ret = __db_lget(dbc,
- 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
- break;
+ /* Loop until we find a record or hit current */
+ for (;;) {
+ /*
+ * Check to see if we are moving off the extent
+ * and remove the extent.
+ * If we are moving off a page we need to
+ * get rid of the buffer.
+ * Wait for the lagging readers to move off the
+ * page.
+ */
+ if (cp->page != NULL && rec_extent != 0 &&
+ ((exact = (first % rec_extent == 0)) ||
+ first % meta->rec_page == 0 ||
+ first == UINT32_T_MAX)) {
+ if (exact == 1 && (ret = __db_lget(dbc,
+ 0, cp->pgno, DB_LOCK_WRITE, 0, &cp->lock)) != 0)
+ break;
#ifdef QDEBUG
- __db_logmsg(dbp->dbenv,
- dbc->txn, "Queue R", 0, "%x %d %d %d",
- dbc->locker, cp->pgno, first, meta->first_recno);
+ __db_logmsg(dbp->dbenv,
+ dbc->txn, "Queue R", 0, "%x %d %d %d",
+ dbc->locker, cp->pgno, first, meta->first_recno);
#endif
- put_mode |= DB_MPOOL_DISCARD;
- if ((ret = __qam_fput(dbp,
- cp->pgno, cp->page, put_mode)) != 0)
- break;
- cp->page = NULL;
-
- if (exact == 1) {
- ret = __qam_fremove(dbp, cp->pgno);
- t_ret = __LPUT(dbc, cp->lock);
- }
- if (ret != 0)
- break;
- if (t_ret != 0) {
- ret = t_ret;
- break;
- }
- } else if ((ret =
- __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ put_mode |= DB_MPOOL_DISCARD;
+ if ((ret = __qam_fput(dbp,
+ cp->pgno, cp->page, put_mode)) != 0)
break;
cp->page = NULL;
- first++;
- if (first == RECNO_OOB) {
- wrapped = 0;
- first++;
- }
-
- /*
- * LOOP EXIT when we come move to the current
- * pointer.
- */
- if (!wrapped && first >= current)
- break;
- ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
- DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
- if (ret == DB_LOCK_NOTGRANTED) {
- ret = 0;
- break;
+ if (exact == 1) {
+ ret = __qam_fremove(dbp, cp->pgno);
+ t_ret = __LPUT(dbc, cp->lock);
}
if (ret != 0)
break;
-
- if ((ret = __qam_position(dbc,
- &first, QAM_READ, &exact)) != 0) {
- (void)__LPUT(dbc, lock);
- break;
- }
- put_mode = 0;
- if ((ret =__LPUT(dbc, lock)) != 0
- || (ret = __LPUT(dbc, cp->lock)) != 0 ||exact) {
- if ((t_ret = __qam_fput(dbp, cp->pgno,
- cp->page, put_mode)) != 0 && ret == 0)
- ret = t_ret;
- cp->page = NULL;
+ if (t_ret != 0) {
+ ret = t_ret;
break;
}
+ } else if (cp->page != NULL && (ret =
+ __qam_fput(dbp, cp->pgno, cp->page, put_mode)) != 0)
+ break;
+ cp->page = NULL;
+ first++;
+ if (first == RECNO_OOB) {
+ wrapped = 0;
+ first++;
}
- cp->pgno = save_page;
- cp->indx = save_indx;
- cp->recno = save_recno;
- cp->lock = save_lock;
-
/*
- * We have advanced as far as we can.
- * Advance first_recno to this point.
+ * LOOP EXIT when we come move to the current
+ * pointer.
*/
- if (meta->first_recno != first) {
+ if (!wrapped && first >= current)
+ break;
+
+ ret = __db_lget(dbc, 0, first, DB_LOCK_READ,
+ DB_LOCK_NOWAIT | DB_LOCK_RECORD, &lock);
+ if (ret == DB_LOCK_NOTGRANTED) {
+ ret = 0;
+ break;
+ }
+ if (ret != 0)
+ break;
+
+ if ((ret = __qam_position(dbc,
+ &first, QAM_READ, &exact)) != 0) {
+ (void)__LPUT(dbc, lock);
+ break;
+ }
+ put_mode = 0;
+ if ((ret =__LPUT(dbc, lock)) != 0 ||
+ (ret = __LPUT(dbc, cp->lock)) != 0 || exact) {
+ if ((t_ret = __qam_fput(dbp, cp->pgno,
+ cp->page, put_mode)) != 0 && ret == 0)
+ ret = t_ret;
+ cp->page = NULL;
+ break;
+ }
+ }
+
+ cp->pgno = save_page;
+ cp->indx = save_indx;
+ cp->recno = save_recno;
+ cp->lock = save_lock;
+
+ /*
+ * We have advanced as far as we can.
+ * Advance first_recno to this point.
+ */
+ if (ret == 0 && meta->first_recno != first) {
#ifdef QDEBUG
__db_logmsg(dbp->dbenv, dbc->txn, "Queue M",
0, "%x %d %d %d", dbc->locker, cp->recno,
first, meta->first_recno);
#endif
- if (DB_LOGGING(dbc))
- if ((ret =
- __qam_incfirst_log(dbp->dbenv,
- dbc->txn, &meta->dbmeta.lsn, 0,
- dbp->log_fileid, cp->recno)) != 0)
- goto err;
- meta->first_recno = first;
- meta_dirty = 1;
- }
+ if (DBC_LOGGING(dbc))
+ if ((ret = __qam_incfirst_log(dbp,
+ dbc->txn, &meta->dbmeta.lsn, 0,
+ cp->recno, PGNO_BASE_MD)) != 0)
+ goto done;
+ meta->first_recno = first;
+ (void)mpf->set(mpf, meta, DB_MPOOL_DIRTY);
}
done:
-err1: if (cp->page != NULL) {
- t_ret = __qam_fput(dbp, cp->pgno, cp->page, put_mode);
+ return (ret);
+}
- if (!ret)
+static int
+__qam_bulk(dbc, data, flags)
+ DBC *dbc;
+ DBT *data;
+ u_int32_t flags;
+{
+ DB *dbp;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ PAGE *pg;
+ QMETA *meta;
+ QAMDATA *qp;
+ QUEUE_CURSOR *cp;
+ db_indx_t indx;
+ db_pgno_t metapno;
+ qam_position_mode mode;
+ int32_t *endp, *offp;
+ u_int8_t *dbuf, *dp, *np;
+ int exact, recs, re_len, ret, t_ret, valid;
+ int is_key, need_pg, pagesize, size, space;
+
+ dbp = dbc->dbp;
+ mpf = dbp->mpf;
+ cp = (QUEUE_CURSOR *)dbc->internal;
+
+ mode = QAM_READ;
+ if (F_ISSET(dbc, DBC_RMW))
+ mode = QAM_WRITE;
+
+ pagesize = dbp->pgsize;
+ re_len = ((QUEUE *)dbp->q_internal)->re_len;
+ recs = ((QUEUE *)dbp->q_internal)->rec_page;
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+
+ is_key = LF_ISSET(DB_MULTIPLE_KEY) ? 1 : 0;
+ size = 0;
+
+ if ((ret = __db_lget(dbc, 0, metapno, DB_LOCK_READ, 0, &metalock)) != 0)
+ return (ret);
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+
+ dbuf = data->data;
+ np = dp = dbuf;
+
+ /* Keep track of space that is left. There is an termination entry */
+ space = data->ulen;
+ space -= sizeof(*offp);
+
+ /* Build the offset/size table form the end up. */
+ endp = (int32_t *) ((u_int8_t *)dbuf + data->ulen);
+ endp--;
+ offp = endp;
+
+next_pg:
+ if ((ret = __qam_position(dbc, &cp->recno, mode, &exact)) != 0)
+ goto done;
+
+ pg = cp->page;
+ indx = cp->indx;
+ need_pg = 1;
+
+ do {
+ /*
+ * If this page is a nonexistent page at the end of an
+ * extent, pg may be NULL. A NULL page has no valid records,
+ * so just keep looping as though qp exists and isn't QAM_VALID;
+ * calling QAM_GET_RECORD is unsafe.
+ */
+ valid = 0;
+
+ /* Wrap around, skipping zero. */
+ if (cp->recno == RECNO_OOB)
+ cp->recno++;
+ if (pg != NULL) {
+ qp = QAM_GET_RECORD(dbp, pg, indx);
+ if (F_ISSET(qp, QAM_VALID)) {
+ valid = 1;
+ space -= (is_key ? 3 : 2) * sizeof(*offp);
+ if (space < 0)
+ goto get_space;
+ if (need_pg) {
+ dp = np;
+ size = pagesize - QPAGE_SZ(dbp);
+ if (space < size) {
+get_space:
+ if (offp == endp) {
+ data->size =
+ ALIGN(size +
+ pagesize,
+ sizeof(u_int32_t));
+ ret = ENOMEM;
+ break;
+ }
+ if (indx != 0)
+ indx--;
+ cp->recno--;
+ break;
+ }
+ memcpy(dp,
+ (char *)pg + QPAGE_SZ(dbp), size);
+ need_pg = 0;
+ space -= size;
+ np += size;
+ }
+ if (is_key)
+ *offp-- = cp->recno;
+ *offp-- = (int32_t)((u_int8_t*)qp -
+ (u_int8_t*)pg - QPAGE_SZ(dbp) +
+ dp - dbuf + SSZA(QAMDATA, data));
+ *offp-- = re_len;
+ }
+ }
+ if (!valid && is_key == 0) {
+ *offp-- = 0;
+ *offp-- = 0;
+ }
+ cp->recno++;
+ } while (++indx < recs && indx != RECNO_OOB
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno));
+
+ if ((t_ret = __TLPUT(dbc, cp->lock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (cp->page != NULL) {
+ if ((t_ret =
+ __qam_fput(dbp, cp->pgno, cp->page, 0)) != 0 && ret == 0)
ret = t_ret;
- /* Doing record locking, release the page lock */
- t_ret = __LPUT(dbc, pglock);
cp->page = NULL;
}
-err: if (!ret)
- ret = t_ret;
- if (meta) {
+ if (ret == 0
+ && (indx >= recs || indx == RECNO_OOB)
+ && cp->recno != meta->cur_recno
+ && !QAM_AFTER_CURRENT(meta, cp->recno))
+ goto next_pg;
- /* release the meta page */
- t_ret = memp_fput(
- dbp->mpf, meta, meta_dirty ? DB_MPOOL_DIRTY : 0);
+ if (is_key == 1)
+ *offp = RECNO_OOB;
+ else
+ *offp = -1;
- if (!ret)
- ret = t_ret;
+done:
+ /* release the meta page */
+ t_ret = mpf->put(mpf, meta, 0);
- /* Don't hold the meta page long term. */
- if (locked)
- t_ret = __LPUT(dbc, metalock);
- }
- DB_ASSERT(metalock.off == LOCK_INVALID);
+ if (!ret)
+ ret = t_ret;
- /*
- * There is no need to keep the record locked if we are
- * not in a transaction.
- */
- if (t_ret == 0)
- t_ret = __TLPUT(dbc, cp->lock);
+ t_ret = __LPUT(dbc, metalock);
- return (ret ? ret : t_ret);
+ return (ret);
}
/*
@@ -1241,15 +1429,12 @@ __qam_c_close(dbc, root_pgno, rmroot)
cp = (QUEUE_CURSOR *)dbc->internal;
/* Discard any locks not acquired inside of a transaction. */
- if (cp->lock.off != LOCK_INVALID) {
- (void)__TLPUT(dbc, cp->lock);
- cp->lock.off = LOCK_INVALID;
- }
+ (void)__TLPUT(dbc, cp->lock);
+ LOCK_INIT(cp->lock);
cp->page = NULL;
cp->pgno = PGNO_INVALID;
cp->indx = 0;
- cp->lock.off = LOCK_INVALID;
cp->lock_mode = DB_LOCK_NG;
cp->recno = RECNO_OOB;
cp->flags = 0;
@@ -1277,7 +1462,7 @@ __qam_c_dup(orig_dbc, new_dbc)
/* reget the long term lock if we are not in a xact */
if (orig_dbc->txn != NULL ||
- !STD_LOCKING(orig_dbc) || orig->lock.off == LOCK_INVALID)
+ !STD_LOCKING(orig_dbc) || !LOCK_ISSET(orig->lock))
return (0);
return (__db_lget(new_dbc,
@@ -1313,8 +1498,10 @@ __qam_c_init(dbc)
dbc->c_count = __db_c_count;
dbc->c_del = __db_c_del;
dbc->c_dup = __db_c_dup;
- dbc->c_get = __db_c_get;
+ dbc->c_get = dbc->c_real_get = __db_c_get;
+ dbc->c_pget = __db_c_pget;
dbc->c_put = __db_c_put;
+ dbc->c_am_bulk = __qam_bulk;
dbc->c_am_close = __qam_c_close;
dbc->c_am_del = __qam_c_del;
dbc->c_am_destroy = __qam_c_destroy;
@@ -1334,7 +1521,7 @@ __qam_c_destroy(dbc)
DBC *dbc;
{
/* Discard the structures. */
- __os_free(dbc->internal, sizeof(QUEUE_CURSOR));
+ __os_free(dbc->dbp->dbenv, dbc->internal);
return (0);
}
@@ -1355,3 +1542,74 @@ __qam_getno(dbp, key, rep)
}
return (0);
}
+
+/*
+ * __qam_truncate --
+ * Truncate a queue database
+ *
+ * PUBLIC: int __qam_truncate __P((DB *, DB_TXN *, u_int32_t *));
+ */
+int
+__qam_truncate(dbp, txn, countp)
+ DB *dbp;
+ DB_TXN *txn;
+ u_int32_t *countp;
+{
+ DBC *dbc;
+ DB_LOCK metalock;
+ DB_MPOOLFILE *mpf;
+ QMETA *meta;
+ db_pgno_t metapno;
+ int count, ret, t_ret;
+
+ mpf = dbp->mpf;
+
+ /* Acquire a cursor. */
+ if ((ret = dbp->cursor(dbp, txn, &dbc, 0)) != 0)
+ return (ret);
+
+ /* Walk the queue, counting rows. */
+ count = 0;
+ while ((ret = __qam_c_get(dbc, NULL, NULL, DB_CONSUME, &metapno)) == 0)
+ count++;
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+ /* Discard the cursor. */
+ if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
+ return (ret);
+
+ /* update the meta page */
+ /* get the meta page */
+ metapno = ((QUEUE *)dbp->q_internal)->q_meta;
+ if ((ret =
+ __db_lget(dbc, 0, metapno, DB_LOCK_WRITE, 0, &metalock)) != 0)
+ return (ret);
+
+ if ((ret = mpf->get(mpf, &metapno, 0, &meta)) != 0) {
+ /* We did not fetch it, we can release the lock. */
+ (void)__LPUT(dbc, metalock);
+ return (ret);
+ }
+ if (DBC_LOGGING(dbc)) {
+ ret = __qam_mvptr_log(dbp, dbc->txn, &meta->dbmeta.lsn, 0,
+ QAM_SETCUR | QAM_SETFIRST | QAM_TRUNCATE, meta->first_recno,
+ 1, meta->cur_recno, 1, &meta->dbmeta.lsn, PGNO_BASE_MD);
+ }
+ if (ret == 0)
+ meta->first_recno = meta->cur_recno = 1;
+
+ if ((t_ret =
+ mpf->put(mpf, meta, ret == 0 ? DB_MPOOL_DIRTY: 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
+ ret = t_ret;
+
+ *countp = count;
+
+ return (ret);
+}
diff --git a/bdb/qam/qam.src b/bdb/qam/qam.src
index 507d7a65229..f8bf4da4dd0 100644
--- a/bdb/qam/qam.src
+++ b/bdb/qam/qam.src
@@ -1,13 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: qam.src,v 11.15 2001/01/16 20:10:55 ubell Exp $
+ * $Id: qam.src,v 11.28 2002/04/17 19:03:13 krinsky Exp $
*/
-PREFIX qam
+PREFIX __qam
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -15,61 +16,55 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "qam.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/qam.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
- * inc
- * Used when we increment a record number. These do not actually
- * tell you what record number you got, just that you incremented
- * the record number. These operations are never undone.
- */
-BEGIN inc 76
-ARG fileid int32_t ld
-POINTER lsn DB_LSN * lu
-END
-
-/*
* incfirst
* Used when we increment first_recno.
*/
-BEGIN incfirst 77
-ARG fileid int32_t ld
+BEGIN incfirst 84
+DB fileid int32_t ld
ARG recno db_recno_t lu
+WRLOCK meta_pgno db_pgno_t lu
END
/*
* mvptr
* Used when we change one or both of cur_recno and first_recno.
*/
-BEGIN mvptr 78
+BEGIN mvptr 85
ARG opcode u_int32_t lu
-ARG fileid int32_t ld
+DB fileid int32_t ld
ARG old_first db_recno_t lu
ARG new_first db_recno_t lu
ARG old_cur db_recno_t lu
ARG new_cur db_recno_t lu
POINTER metalsn DB_LSN * lu
+WRLOCK meta_pgno db_pgno_t lu
END
+
/*
* del
* Used when we delete a record.
* recno is the record that is being deleted.
*/
BEGIN del 79
-ARG fileid int32_t ld
+DB fileid int32_t ld
POINTER lsn DB_LSN * lu
-ARG pgno db_pgno_t lu
+WRLOCK pgno db_pgno_t lu
ARG indx u_int32_t lu
ARG recno db_recno_t lu
END
@@ -81,9 +76,9 @@ END
* data is the record itself.
*/
BEGIN add 80
-ARG fileid int32_t ld
+DB fileid int32_t ld
POINTER lsn DB_LSN * lu
-ARG pgno db_pgno_t lu
+WRLOCK pgno db_pgno_t lu
ARG indx u_int32_t lu
ARG recno db_recno_t lu
DBT data DBT s
@@ -92,30 +87,12 @@ DBT olddata DBT s
END
/*
- * delete
- * Used when we remove a Queue extent file.
- */
-BEGIN delete 81
-DBT name DBT s
-POINTER lsn DB_LSN * lu
-END
-
-/*
- * rename
- * Used when we rename a Queue extent file.
- */
-BEGIN rename 82
-DBT name DBT s
-DBT newname DBT s
-END
-
-/*
* delext
* Used when we delete a record in extent based queue.
* recno is the record that is being deleted.
*/
BEGIN delext 83
-ARG fileid int32_t ld
+DB fileid int32_t ld
POINTER lsn DB_LSN * lu
ARG pgno db_pgno_t lu
ARG indx u_int32_t lu
diff --git a/bdb/qam/qam_conv.c b/bdb/qam/qam_conv.c
index 2eb1c7227e6..d89fe06b0cf 100644
--- a/bdb/qam/qam_conv.c
+++ b/bdb/qam/qam_conv.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_conv.c,v 11.6 2000/11/16 23:40:57 ubell Exp $";
+static const char revid[] = "$Id: qam_conv.c,v 11.14 2002/08/06 06:17:02 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,10 +16,9 @@ static const char revid[] = "$Id: qam_conv.c,v 11.6 2000/11/16 23:40:57 ubell Ex
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "qam.h"
-#include "db_swap.h"
-#include "db_am.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
/*
* __qam_mswap --
@@ -43,6 +42,8 @@ __qam_mswap(pg)
SWAP32(p); /* re_pad */
SWAP32(p); /* rec_page */
SWAP32(p); /* page_ext */
+ p += 91 * sizeof(u_int32_t); /* unused */
+ SWAP32(p); /* crypto_magic */
return (0);
}
@@ -68,7 +69,7 @@ __qam_pgin_out(dbenv, pg, pp, cookie)
COMPQUIET(pg, 0);
COMPQUIET(dbenv, NULL);
pginfo = (DB_PGINFO *)cookie->data;
- if (!pginfo->needswap)
+ if (!F_ISSET(pginfo, DB_AM_SWAP))
return (0);
h = pp;
diff --git a/bdb/qam/qam_files.c b/bdb/qam/qam_files.c
index e53a3bf24c0..f15a88d546d 100644
--- a/bdb/qam/qam_files.c
+++ b/bdb/qam/qam_files.c
@@ -1,67 +1,65 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_files.c,v 1.16 2001/01/19 18:01:59 bostic Exp $";
+static const char revid[] = "$Id: qam_files.c,v 1.52 2002/08/26 17:52:18 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <stdlib.h>
#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_am.h"
-#include "lock.h"
-#include "btree.h"
-#include "qam.h"
-#include "mp.h"
+#include "dbinc/db_page.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
/*
- * __qam_fprobe -- calcluate and open extent
+ * __qam_fprobe -- calculate and open extent
*
- * Calculate which extent the page is in, open and create
- * if necessary.
+ * Calculate which extent the page is in, open and create if necessary.
*
- * PUBLIC: int __qam_fprobe __P((DB *, db_pgno_t, void *, qam_probe_mode, int));
+ * PUBLIC: int __qam_fprobe
+ * PUBLIC: __P((DB *, db_pgno_t, void *, qam_probe_mode, u_int32_t));
*/
-
int
__qam_fprobe(dbp, pgno, addrp, mode, flags)
DB *dbp;
db_pgno_t pgno;
void *addrp;
qam_probe_mode mode;
- int flags;
+ u_int32_t flags;
{
DB_ENV *dbenv;
DB_MPOOLFILE *mpf;
MPFARRAY *array;
QUEUE *qp;
- u_int32_t extid, maxext;
- char buf[256];
- int numext, offset, oldext, openflags, ret;
+ u_int8_t fid[DB_FILE_ID_LEN];
+ u_int32_t extid, maxext, openflags;
+ char buf[MAXPATHLEN];
+ int numext, offset, oldext, ret;
+ dbenv = dbp->dbenv;
qp = (QUEUE *)dbp->q_internal;
+ ret = 0;
+
if (qp->page_ext == 0) {
mpf = dbp->mpf;
- if (mode == QAM_PROBE_GET)
- return (memp_fget(mpf, &pgno, flags, addrp));
- return (memp_fput(mpf, addrp, flags));
+ return (mode == QAM_PROBE_GET ?
+ mpf->get(mpf, &pgno, flags, addrp) :
+ mpf->put(mpf, addrp, flags));
}
- dbenv = dbp->dbenv;
mpf = NULL;
- ret = 0;
/*
* Need to lock long enough to find the mpf or create the file.
@@ -92,36 +90,48 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
/*
* Check to see if the requested extent is outside the range of
- * extents in the array. This is true by defualt if there are
+ * extents in the array. This is true by default if there are
* no extents here yet.
*/
if (offset < 0 || (unsigned) offset >= array->n_extent) {
oldext = array->n_extent;
- numext = array->hi_extent - array->low_extent + 1;
- if (offset < 0
- && (unsigned) -offset + numext <= array->n_extent) {
- /* If we can fit this one in, move the array up */
+ numext = array->hi_extent - array->low_extent + 1;
+ if (offset < 0 &&
+ (unsigned) -offset + numext <= array->n_extent) {
+ /*
+ * If we can fit this one into the existing array by
+ * shifting the existing entries then we do not have
+ * to allocate.
+ */
memmove(&array->mpfarray[-offset],
array->mpfarray, numext
* sizeof(array->mpfarray[0]));
memset(array->mpfarray, 0, -offset
- * sizeof(array->mpfarray[0]));
+ * sizeof(array->mpfarray[0]));
offset = 0;
} else if ((u_int32_t)offset == array->n_extent &&
mode != QAM_PROBE_MPF && array->mpfarray[0].pinref == 0) {
- /* We can close the bottom extent. */
+ /*
+ * If this is at the end of the array and the file at
+ * the begining has a zero pin count we can close
+ * the bottom extent and put this one at the end.
+ */
mpf = array->mpfarray[0].mpf;
- if (mpf != NULL && (ret = memp_fclose(mpf)) != 0)
+ if (mpf != NULL && (ret = mpf->close(mpf, 0)) != 0)
goto err;
memmove(&array->mpfarray[0], &array->mpfarray[1],
- (array->n_extent - 1) * sizeof (array->mpfarray[0]));
+ (array->n_extent - 1) * sizeof(array->mpfarray[0]));
array->low_extent++;
array->hi_extent++;
offset--;
array->mpfarray[offset].mpf = NULL;
array->mpfarray[offset].pinref = 0;
} else {
- /* See if we have wrapped around the queue. */
+ /*
+ * See if we have wrapped around the queue.
+ * If it has then allocate the second array.
+ * Otherwise just expand the one we are using.
+ */
maxext = (u_int32_t) UINT32_T_MAX
/ (qp->page_ext * qp->rec_page);
if ((u_int32_t) abs(offset) >= maxext/2) {
@@ -143,51 +153,73 @@ __qam_fprobe(dbp, pgno, addrp, mode, flags)
alloc:
if ((ret = __os_realloc(dbenv,
array->n_extent * sizeof(struct __qmpf),
- NULL, &array->mpfarray)) != 0)
+ &array->mpfarray)) != 0)
goto err;
if (offset < 0) {
+ /*
+ * Move the array up and put the new one
+ * in the first slot.
+ */
offset = -offset;
- memmove(&array->mpfarray[offset], array->mpfarray,
+ memmove(&array->mpfarray[offset],
+ array->mpfarray,
numext * sizeof(array->mpfarray[0]));
memset(array->mpfarray, 0,
offset * sizeof(array->mpfarray[0]));
memset(&array->mpfarray[numext + offset], 0,
- (array->n_extent - (numext + offset))
- * sizeof(array->mpfarray[0]));
+ (array->n_extent - (numext + offset))
+ * sizeof(array->mpfarray[0]));
offset = 0;
}
else
+ /* Clear the new part of the array. */
memset(&array->mpfarray[oldext], 0,
(array->n_extent - oldext) *
sizeof(array->mpfarray[0]));
}
}
+ /* Update the low and hi range of saved extents. */
if (extid < array->low_extent)
array->low_extent = extid;
if (extid > array->hi_extent)
array->hi_extent = extid;
+
+ /* If the extent file is not yet open, open it. */
if (array->mpfarray[offset].mpf == NULL) {
- snprintf(buf,
- sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, extid);
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
+ if ((ret = dbenv->memp_fcreate(
+ dbenv, &array->mpfarray[offset].mpf, 0)) != 0)
+ goto err;
+ mpf = array->mpfarray[offset].mpf;
+ (void)mpf->set_lsn_offset(mpf, 0);
+ (void)mpf->set_pgcookie(mpf, &qp->pgcookie);
+
+ /* Set up the fileid for this extent. */
+ __qam_exid(dbp, fid, extid);
+ (void)mpf->set_fileid(mpf, fid);
openflags = DB_EXTENT;
if (LF_ISSET(DB_MPOOL_CREATE))
openflags |= DB_CREATE;
if (F_ISSET(dbp, DB_AM_RDONLY))
openflags |= DB_RDONLY;
- qp->finfo.fileid = NULL;
- if ((ret = __memp_fopen(dbenv->mp_handle,
- NULL, buf, openflags, qp->mode, dbp->pgsize,
- 1, &qp->finfo, &array->mpfarray[offset].mpf)) != 0)
+ if (F_ISSET(dbenv, DB_ENV_DIRECT_DB))
+ openflags |= DB_DIRECT;
+ if ((ret = mpf->open(
+ mpf, buf, openflags, qp->mode, dbp->pgsize)) != 0) {
+ array->mpfarray[offset].mpf = NULL;
+ (void)mpf->close(mpf, 0);
goto err;
+ }
}
mpf = array->mpfarray[offset].mpf;
if (mode == QAM_PROBE_GET)
array->mpfarray[offset].pinref++;
if (LF_ISSET(DB_MPOOL_CREATE))
- __memp_clear_unlink(mpf);
+ mpf->set_unlink(mpf, 0);
err:
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
@@ -200,9 +232,8 @@ err:
pgno--;
pgno %= qp->page_ext;
if (mode == QAM_PROBE_GET)
- return (memp_fget(mpf,
- &pgno, flags | DB_MPOOL_EXTENT, addrp));
- ret = memp_fput(mpf, addrp, flags);
+ return (mpf->get(mpf, &pgno, flags, addrp));
+ ret = mpf->put(mpf, addrp, flags);
MUTEX_THREAD_LOCK(dbenv, dbp->mutexp);
array->mpfarray[offset].pinref--;
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
@@ -218,7 +249,6 @@ err:
*
* PUBLIC: int __qam_fclose __P((DB *, db_pgno_t));
*/
-
int
__qam_fclose(dbp, pgnoaddr)
DB *dbp;
@@ -251,12 +281,13 @@ __qam_fclose(dbp, pgnoaddr)
mpf = array->mpfarray[offset].mpf;
array->mpfarray[offset].mpf = NULL;
- ret = memp_fclose(mpf);
+ ret = mpf->close(mpf, 0);
done:
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
return (ret);
}
+
/*
* __qam_fremove -- remove an extent.
*
@@ -266,7 +297,6 @@ done:
*
* PUBLIC: int __qam_fremove __P((DB *, db_pgno_t));
*/
-
int
__qam_fremove(dbp, pgnoaddr)
DB *dbp;
@@ -278,7 +308,7 @@ __qam_fremove(dbp, pgnoaddr)
QUEUE *qp;
u_int32_t extid;
#if CONFIG_TEST
- char buf[256], *real_name;
+ char buf[MAXPATHLEN], *real_name;
#endif
int offset, ret;
@@ -300,22 +330,34 @@ __qam_fremove(dbp, pgnoaddr)
real_name = NULL;
/* Find the real name of the file. */
snprintf(buf, sizeof(buf),
- QUEUE_EXTENT, qp->dir, qp->name, extid);
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, extid);
if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
+ DB_APP_DATA, buf, 0, NULL, &real_name)) != 0)
goto err;
#endif
+ /*
+ * The log must be flushed before the file is deleted. We depend on
+ * the log record of the last delete to recreate the file if we crash.
+ */
+ if (LOGGING_ON(dbenv) && (ret = dbenv->log_flush(dbenv, NULL)) != 0)
+ goto err;
+
mpf = array->mpfarray[offset].mpf;
array->mpfarray[offset].mpf = NULL;
- __memp_set_unlink(mpf);
- if ((ret = memp_fclose(mpf)) != 0)
+ mpf->set_unlink(mpf, 1);
+ if ((ret = mpf->close(mpf, 0)) != 0)
goto err;
+ /*
+ * If the file is at the bottom of the array
+ * shift things down and adjust the end points.
+ */
if (offset == 0) {
memmove(array->mpfarray, &array->mpfarray[1],
(array->hi_extent - array->low_extent)
* sizeof(array->mpfarray[0]));
- array->mpfarray[array->hi_extent - array->low_extent].mpf = NULL;
+ array->mpfarray[
+ array->hi_extent - array->low_extent].mpf = NULL;
if (array->low_extent != array->hi_extent)
array->low_extent++;
} else {
@@ -327,7 +369,7 @@ err:
MUTEX_THREAD_UNLOCK(dbenv, dbp->mutexp);
#if CONFIG_TEST
if (real_name != NULL)
- __os_freestr(real_name);
+ __os_free(dbenv, real_name);
#endif
return (ret);
}
@@ -353,6 +395,7 @@ __qam_sync(dbp, flags)
int done, ret;
dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
PANIC_CHECK(dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->sync");
@@ -369,7 +412,7 @@ __qam_sync(dbp, flags)
return (0);
/* Flush any dirty pages from the cache to the backing file. */
- if ((ret = memp_fsync(dbp->mpf)) != 0)
+ if ((ret = mpf->sync(dbp->mpf)) != 0)
return (ret);
qp = (QUEUE *)dbp->q_internal;
@@ -383,7 +426,7 @@ __qam_sync(dbp, flags)
if (filelist == NULL)
return (0);
- __os_free(filelist, 0);
+ __os_free(dbp->dbenv, filelist);
done = 0;
qp = (QUEUE *)dbp->q_internal;
@@ -394,7 +437,7 @@ again:
mpfp = array->mpfarray;
for (i = array->low_extent; i <= array->hi_extent; i++, mpfp++)
if ((mpf = mpfp->mpf) != NULL) {
- if ((ret = memp_fsync(mpf)) != 0)
+ if ((ret = mpf->sync(mpf)) != 0)
goto err;
/*
* If we are the only ones with this file open
@@ -402,7 +445,7 @@ again:
*/
if (mpfp->pinref == 0) {
mpfp->mpf = NULL;
- if ((ret = memp_fclose(mpf)) != 0)
+ if ((ret = mpf->close(mpf, 0)) != 0)
goto err;
}
}
@@ -431,16 +474,19 @@ __qam_gen_filelist(dbp, filelistp)
QUEUE_FILELIST **filelistp;
{
DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
QUEUE *qp;
QMETA *meta;
- db_pgno_t i, last, start, stop;
+ db_pgno_t i, last, start;
db_recno_t current, first;
QUEUE_FILELIST *fp;
int ret;
dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
qp = (QUEUE *)dbp->q_internal;
*filelistp = NULL;
+
if (qp->page_ext == 0)
return (0);
@@ -450,18 +496,14 @@ __qam_gen_filelist(dbp, filelistp)
/* Find out the page number of the last page in the database. */
i = PGNO_BASE_MD;
- if ((ret = memp_fget(dbp->mpf, &i, 0, &meta)) != 0) {
- (void)dbp->close(dbp, 0);
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
return (ret);
- }
current = meta->cur_recno;
first = meta->first_recno;
- if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0) {
- (void)dbp->close(dbp, 0);
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
return (ret);
- }
last = QAM_RECNO_PAGE(dbp, current);
start = QAM_RECNO_PAGE(dbp, first);
@@ -476,14 +518,10 @@ __qam_gen_filelist(dbp, filelistp)
return (ret);
fp = *filelistp;
i = start;
- if (last >= start)
- stop = last;
- else
- stop = QAM_RECNO_PAGE(dbp, UINT32_T_MAX);
-again:
- for (; i <= last; i += qp->page_ext) {
- if ((ret = __qam_fprobe(dbp,
- i, &fp->mpf, QAM_PROBE_MPF, 0)) != 0) {
+
+again: for (; i <= last; i += qp->page_ext) {
+ if ((ret =
+ __qam_fprobe(dbp, i, &fp->mpf, QAM_PROBE_MPF, 0)) != 0) {
if (ret == ENOENT)
continue;
return (ret);
@@ -494,10 +532,111 @@ again:
if (last < start) {
i = 1;
- stop = last;
start = 0;
goto again;
}
return (0);
}
+
+/*
+ * __qam_extent_names -- generate a list of extent files names.
+ *
+ * PUBLIC: int __qam_extent_names __P((DB_ENV *, char *, char ***));
+ */
+int
+__qam_extent_names(dbenv, name, namelistp)
+ DB_ENV *dbenv;
+ char *name;
+ char ***namelistp;
+{
+ DB *dbp;
+ QUEUE *qp;
+ QUEUE_FILELIST *filelist, *fp;
+ char buf[MAXPATHLEN], *dir, **cp, *freep;
+ int cnt, len, ret;
+
+ *namelistp = NULL;
+ filelist = NULL;
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret =
+ __db_open(dbp, NULL, name, NULL, DB_QUEUE, DB_RDONLY, 0)) != 0)
+ return (ret);
+ qp = dbp->q_internal;
+ if (qp->page_ext == 0)
+ goto done;
+
+ if ((ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ goto done;
+
+ if (filelist == NULL)
+ goto done;
+
+ cnt = 0;
+ for (fp = filelist; fp->mpf != NULL; fp++)
+ cnt++;
+ dir = ((QUEUE *)dbp->q_internal)->dir;
+ name = ((QUEUE *)dbp->q_internal)->name;
+
+ /* QUEUE_EXTENT contains extra chars, but add 6 anyway for the int. */
+ len = (u_int32_t)(cnt * (sizeof(**namelistp)
+ + strlen(QUEUE_EXTENT) + strlen(dir) + strlen(name) + 6));
+
+ if ((ret =
+ __os_malloc(dbp->dbenv, len, namelistp)) != 0)
+ goto done;
+ cp = *namelistp;
+ freep = (char *)(cp + cnt + 1);
+ for (fp = filelist; fp->mpf != NULL; fp++) {
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, dir, PATH_SEPARATOR[0], name, fp->id);
+ len = (u_int32_t)strlen(buf);
+ *cp++ = freep;
+ strcpy(freep, buf);
+ freep += len + 1;
+ }
+ *cp = NULL;
+
+done:
+ if (filelist != NULL)
+ __os_free(dbp->dbenv, filelist);
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return (ret);
+}
+
+/*
+ * __qam_exid --
+ * Generate a fileid for an extent based on the fileid of the main
+ * file. Since we do not log schema creates/deletes explicitly, the log
+ * never captures the fileid of an extent file. In order that masters and
+ * replicas have the same fileids (so they can explicitly delete them), we
+ * use computed fileids for the extent files of Queue files.
+ *
+ * An extent file id retains the low order 12 bytes of the file id and
+ * overwrites the dev/inode fields, placing a 0 in the inode field, and
+ * the extent number in the dev field.
+ *
+ * PUBLIC: void __qam_exid __P((DB *, u_int8_t *, u_int32_t));
+ */
+void
+__qam_exid(dbp, fidp, exnum)
+ DB *dbp;
+ u_int8_t *fidp;
+ u_int32_t exnum;
+{
+ int i;
+ u_int8_t *p;
+
+ /* Copy the fileid from the master. */
+ memcpy(fidp, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* The first four bytes are the inode or the FileIndexLow; 0 it. */
+ for (i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = 0;
+
+ /* The next four bytes are the dev/FileIndexHigh; insert the exnum . */
+ for (p = (u_int8_t *)&exnum, i = sizeof(u_int32_t); i > 0; --i)
+ *fidp++ = *p++;
+}
diff --git a/bdb/qam/qam_method.c b/bdb/qam/qam_method.c
index 1c94f4b8db0..5415fc5d00c 100644
--- a/bdb/qam/qam_method.c
+++ b/bdb/qam/qam_method.c
@@ -1,34 +1,32 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_method.c,v 11.17 2001/01/10 04:50:54 ubell Exp $";
+static const char revid[] = "$Id: qam_method.c,v 11.55 2002/08/26 17:52:19 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+
#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_int.h"
-#include "db_shash.h"
-#include "db_am.h"
-#include "qam.h"
-#include "db.h"
-#include "mp.h"
-#include "lock.h"
-#include "log.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/fop.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/txn.h"
static int __qam_set_extentsize __P((DB *, u_int32_t));
-static int __qam_remove_callback __P((DB *, void *));
struct __qam_cookie {
DB_LSN lsn;
@@ -77,7 +75,8 @@ __qam_db_close(dbp)
int ret, t_ret;
ret = 0;
- t = dbp->q_internal;
+ if ((t = dbp->q_internal) == NULL)
+ return (0);
array = &t->array1;
again:
@@ -88,10 +87,10 @@ again:
mpf = mpfp->mpf;
mpfp->mpf = NULL;
if (mpf != NULL &&
- (t_ret = memp_fclose(mpf)) != 0 && ret == 0)
+ (t_ret = mpf->close(mpf, 0)) != 0 && ret == 0)
ret = t_ret;
}
- __os_free(array->mpfarray, 0);
+ __os_free(dbp->dbenv, array->mpfarray);
}
if (t->array2.n_extent != 0) {
array = &t->array2;
@@ -100,8 +99,8 @@ again:
}
if (t->path != NULL)
- __os_free(t->path, 0);
- __os_free(t, sizeof(QUEUE));
+ __os_free(dbp->dbenv, t->path);
+ __os_free(dbp->dbenv, t);
dbp->q_internal = NULL;
return (ret);
@@ -115,7 +114,7 @@ __qam_set_extentsize(dbp, extentsize)
DB_ILLEGAL_AFTER_OPEN(dbp, "set_extentsize");
if (extentsize < 1) {
- __db_err(dbp->dbenv, "Extent size must be at least 1.");
+ __db_err(dbp->dbenv, "Extent size must be at least 1");
return (EINVAL);
}
@@ -128,29 +127,35 @@ __qam_set_extentsize(dbp, extentsize)
* __db_prqueue --
* Print out a queue
*
- * PUBLIC: int __db_prqueue __P((DB *, u_int32_t));
+ * PUBLIC: int __db_prqueue __P((DB *, FILE *, u_int32_t));
*/
int
-__db_prqueue(dbp, flags)
+__db_prqueue(dbp, fp, flags)
DB *dbp;
+ FILE *fp;
u_int32_t flags;
{
+ DB_MPOOLFILE *mpf;
PAGE *h;
QMETA *meta;
db_pgno_t first, i, last, pg_ext, stop;
- int ret;
+ int ret, t_ret;
+
+ mpf = dbp->mpf;
/* Find out the page number of the last page in the database. */
i = PGNO_BASE_MD;
- if ((ret = memp_fget(dbp->mpf, &i, 0, &meta)) != 0)
+ if ((ret = mpf->get(mpf, &i, 0, &meta)) != 0)
return (ret);
first = QAM_RECNO_PAGE(dbp, meta->first_recno);
last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
- if ((ret = __db_prpage(dbp, (PAGE *)meta, flags)) != 0)
- return (ret);
- if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ ret = __db_prpage(dbp, (PAGE *)meta, fp, flags);
+ if ((t_ret = mpf->put(mpf, meta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ret != 0)
return (ret);
i = first;
@@ -162,20 +167,20 @@ __db_prqueue(dbp, flags)
/* Dump each page. */
begin:
for (; i <= stop; ++i) {
- if ((ret = __qam_fget(dbp, &i, DB_MPOOL_EXTENT, &h)) != 0) {
+ if ((ret = __qam_fget(dbp, &i, 0, &h)) != 0) {
pg_ext = ((QUEUE *)dbp->q_internal)->page_ext;
if (pg_ext == 0) {
- if (ret == EINVAL && first == last)
+ if (ret == DB_PAGE_NOTFOUND && first == last)
return (0);
return (ret);
}
- if (ret == ENOENT || ret == EINVAL) {
+ if (ret == ENOENT || ret == DB_PAGE_NOTFOUND) {
i += pg_ext - ((i - 1) % pg_ext) - 1;
continue;
}
return (ret);
}
- (void)__db_prpage(dbp, h, flags);
+ (void)__db_prpage(dbp, h, fp, flags);
if ((ret = __qam_fput(dbp, i, h, 0)) != 0)
return (ret);
}
@@ -193,31 +198,31 @@ begin:
* __qam_remove
* Remove method for a Queue.
*
- * PUBLIC: int __qam_remove __P((DB *, const char *,
- * PUBLIC: const char *, DB_LSN *, int (**)(DB *, void*), void **));
+ * PUBLIC: int __qam_remove __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, const char *, DB_LSN *));
*/
int
-__qam_remove(dbp, name, subdb, lsnp, callbackp, cookiep)
+__qam_remove(dbp, txn, name, subdb, lsnp)
DB *dbp;
+ DB_TXN *txn;
const char *name, *subdb;
DB_LSN *lsnp;
- int (**callbackp) __P((DB *, void *));
- void **cookiep;
{
- DBT namedbt;
DB_ENV *dbenv;
- DB_LSN lsn;
+ DB *tmpdbp;
MPFARRAY *ap;
QUEUE *qp;
- int ret;
- char *backup, buf[256], *real_back, *real_name;
QUEUE_FILELIST *filelist, *fp;
- struct __qam_cookie *qam_cookie;
+ int ret, needclose, t_ret;
+ char buf[MAXPATHLEN];
+ u_int8_t fid[DB_FILE_ID_LEN];
+
+ COMPQUIET(lsnp, NULL);
dbenv = dbp->dbenv;
ret = 0;
- backup = real_back = real_name = NULL;
filelist = NULL;
+ needclose = 0;
PANIC_CHECK(dbenv);
@@ -226,148 +231,86 @@ __qam_remove(dbp, name, subdb, lsnp, callbackp, cookiep)
*/
if (subdb != NULL) {
__db_err(dbenv,
- "Queue does not support multiple databases per file.");
+ "Queue does not support multiple databases per file");
ret = EINVAL;
- goto done;
+ goto err;
+ }
+
+ /*
+ * Since regular remove no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /*
+ * We need to make sure we don't self-deadlock, so give
+ * this dbp the same locker as the incoming one.
+ */
+ tmpdbp->lid = dbp->lid;
+
+ /*
+ * If this is a transactional dbp and the open fails, then
+ * the transactional abort will close the dbp. If it's not
+ * a transactional open, then we always have to close it
+ * even if the open fails. Once the open has succeeded,
+ * then we will always want to close it.
+ */
+ if (txn == NULL)
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp,
+ txn, name, NULL, DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ needclose = 1;
}
- qp = (QUEUE *)dbp->q_internal;
+ qp = (QUEUE *)tmpdbp->q_internal;
if (qp->page_ext != 0 &&
- (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
- goto done;
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
+ goto err;
if (filelist == NULL)
- goto done;
+ goto err;
for (fp = filelist; fp->mpf != NULL; fp++) {
- snprintf(buf,
- sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
- goto done;
- if (LOGGING_ON(dbenv)) {
- memset(&namedbt, 0, sizeof(namedbt));
- namedbt.data = (char *)buf;
- namedbt.size = strlen(buf) + 1;
-
- if ((ret =
- __qam_delete_log(dbenv, dbp->open_txn,
- &lsn, DB_FLUSH, &namedbt, lsnp)) != 0) {
- __db_err(dbenv,
- "%s: %s", name, db_strerror(ret));
- goto done;
- }
- }
- (void)__memp_fremove(fp->mpf);
- if ((ret = memp_fclose(fp->mpf)) != 0)
- goto done;
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
+ goto err;
if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
ap = &qp->array1;
else
ap = &qp->array2;
ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
- /* Create name for backup file. */
- if (TXN_ON(dbenv)) {
- if ((ret = __db_backup_name(dbenv,
- buf, &backup, lsnp)) != 0)
- goto done;
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, backup, 0, NULL, &real_back)) != 0)
- goto done;
- if ((ret = __os_rename(dbenv,
- real_name, real_back)) != 0)
- goto done;
- __os_freestr(real_back);
- real_back = NULL;
- }
- else
- if ((ret = __os_unlink(dbenv, real_name)) != 0)
- goto done;
- __os_freestr(real_name);
- real_name = NULL;
- }
- if ((ret= __os_malloc(dbenv,
- sizeof(struct __qam_cookie), NULL, &qam_cookie)) != 0)
- goto done;
- qam_cookie->lsn = *lsnp;
- qam_cookie->filelist = filelist;
- *cookiep = qam_cookie;
- *callbackp = __qam_remove_callback;
-
-done:
- if (ret != 0 && filelist != NULL)
- __os_free(filelist, 0);
- if (real_back != NULL)
- __os_freestr(real_back);
- if (real_name != NULL)
- __os_freestr(real_name);
- if (backup != NULL)
- __os_freestr(backup);
-
- return (ret);
-}
-
-static int
-__qam_remove_callback(dbp, cookie)
- DB *dbp;
- void *cookie;
-{
- DB_ENV *dbenv;
- DB_LSN *lsnp;
- QUEUE *qp;
- QUEUE_FILELIST *filelist, *fp;
- char *backup, buf[256], *real_back;
- int ret;
-
- qp = (QUEUE *)dbp->q_internal;
- if (qp->page_ext == 0)
- return (__os_unlink(dbp->dbenv, cookie));
-
- dbenv = dbp->dbenv;
- lsnp = &((struct __qam_cookie *)cookie)->lsn;
- filelist = fp = ((struct __qam_cookie *)cookie)->filelist;
- real_back = backup = NULL;
- if ((ret =
- __db_backup_name(dbenv, qp->name, &backup, lsnp)) != 0)
- goto err;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
- goto err;
- if ((ret = __os_unlink(dbp->dbenv, real_back)) != 0)
- goto err;
-
- __os_freestr(backup);
- __os_freestr(real_back);
-
- if (fp == NULL)
- return (0);
-
- for (; fp->mpf != NULL; fp++) {
- snprintf(buf,
- sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
- real_back = backup = NULL;
- if ((ret = __db_backup_name(dbenv, buf, &backup, lsnp)) != 0)
- goto err;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
+ /* Take care of object reclamation. */
+ __qam_exid(tmpdbp, fid, fp->id);
+ if ((ret = __fop_remove(dbenv,
+ txn, fid, buf, DB_APP_DATA)) != 0)
goto err;
- ret = __os_unlink(dbenv, real_back);
- __os_freestr(real_back);
- __os_freestr(backup);
}
- __os_free(filelist, 0);
- __os_free(cookie, sizeof (struct __qam_cookie));
- return (0);
-
-err:
- if (backup != NULL)
- __os_freestr(backup);
-
- if (real_back != NULL)
- __os_freestr(real_back);
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /*
+ * Since we copied the lid from the dbp, we'd better not
+ * free it here.
+ */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
return (ret);
}
@@ -376,97 +319,95 @@ err:
* __qam_rename
* Rename method for Queue.
*
- * PUBLIC: int __qam_rename __P((DB *,
+ * PUBLIC: int __qam_rename __P((DB *, DB_TXN *,
* PUBLIC: const char *, const char *, const char *));
*/
int
-__qam_rename(dbp, filename, subdb, newname)
+__qam_rename(dbp, txn, filename, subdb, newname)
DB *dbp;
+ DB_TXN *txn;
const char *filename, *subdb, *newname;
{
- DBT namedbt, newnamedbt;
DB_ENV *dbenv;
- DB_LSN newlsn;
+ DB *tmpdbp;
MPFARRAY *ap;
QUEUE *qp;
QUEUE_FILELIST *fp, *filelist;
- char buf[256], nbuf[256], *namep, *real_name, *real_newname;
- int ret;
+ char buf[MAXPATHLEN], nbuf[MAXPATHLEN];
+ char *namep;
+ int ret, needclose, t_ret;
+ u_int8_t fid[DB_FILE_ID_LEN], *fidp;
dbenv = dbp->dbenv;
ret = 0;
- real_name = real_newname = NULL;
filelist = NULL;
-
- qp = (QUEUE *)dbp->q_internal;
+ needclose = 0;
if (subdb != NULL) {
__db_err(dbenv,
- "Queue does not support multiple databases per file.");
+ "Queue does not support multiple databases per file");
ret = EINVAL;
goto err;
}
+
+ /*
+ * Since regular rename no longer opens the database, we may have
+ * to do it here.
+ */
+ if (F_ISSET(dbp, DB_AM_OPEN_CALLED))
+ tmpdbp = dbp;
+ else {
+ if ((ret = db_create(&tmpdbp, dbenv, 0)) != 0)
+ return (ret);
+ /* Copy the incoming locker so we don't self-deadlock. */
+ tmpdbp->lid = dbp->lid;
+ needclose = 1;
+ if ((ret = tmpdbp->open(tmpdbp, txn, filename, NULL,
+ DB_QUEUE, 0, 0)) != 0)
+ goto err;
+ }
+
+ qp = (QUEUE *)tmpdbp->q_internal;
+
if (qp->page_ext != 0 &&
- (ret = __qam_gen_filelist(dbp, &filelist)) != 0)
+ (ret = __qam_gen_filelist(tmpdbp, &filelist)) != 0)
goto err;
if ((namep = __db_rpath(newname)) != NULL)
newname = namep + 1;
+ fidp = fid;
for (fp = filelist; fp != NULL && fp->mpf != NULL; fp++) {
- if ((ret = __memp_fremove(fp->mpf)) != 0)
- goto err;
- if ((ret = memp_fclose(fp->mpf)) != 0)
+ fp->mpf->get_fileid(fp->mpf, fidp);
+ if ((ret = fp->mpf->close(fp->mpf, DB_MPOOL_DISCARD)) != 0)
goto err;
if (qp->array2.n_extent == 0 || qp->array2.low_extent > fp->id)
ap = &qp->array1;
else
ap = &qp->array2;
ap->mpfarray[fp->id - ap->low_extent].mpf = NULL;
- snprintf(buf,
- sizeof(buf), QUEUE_EXTENT, qp->dir, qp->name, fp->id);
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, buf, 0, NULL, &real_name)) != 0)
- goto err;
- snprintf(nbuf,
- sizeof(nbuf), QUEUE_EXTENT, qp->dir, newname, fp->id);
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, nbuf, 0, NULL, &real_newname)) != 0)
+ snprintf(buf, sizeof(buf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], qp->name, fp->id);
+ snprintf(nbuf, sizeof(nbuf),
+ QUEUE_EXTENT, qp->dir, PATH_SEPARATOR[0], newname, fp->id);
+ if ((ret = __fop_rename(dbenv,
+ txn, buf, nbuf, fidp, DB_APP_DATA)) != 0)
goto err;
- if (LOGGING_ON(dbenv)) {
- memset(&namedbt, 0, sizeof(namedbt));
- namedbt.data = (char *)buf;
- namedbt.size = strlen(buf) + 1;
-
- memset(&newnamedbt, 0, sizeof(namedbt));
- newnamedbt.data = (char *)nbuf;
- newnamedbt.size = strlen(nbuf) + 1;
-
- if ((ret =
- __qam_rename_log(dbenv,
- dbp->open_txn, &newlsn, 0,
- &namedbt, &newnamedbt)) != 0) {
- __db_err(dbenv, "%s: %s", filename, db_strerror(ret));
- goto err;
- }
-
- if ((ret = __log_filelist_update(dbenv, dbp,
- dbp->log_fileid, newname, NULL)) != 0)
- goto err;
- }
- if ((ret = __os_rename(dbenv, real_name, real_newname)) != 0)
- goto err;
- __os_freestr(real_name);
- __os_freestr(real_newname);
- real_name = real_newname = NULL;
}
-err:
- if (real_name != NULL)
- __os_freestr(real_name);
- if (real_newname != NULL)
- __os_freestr(real_newname);
- if (filelist != NULL)
- __os_free(filelist, 0);
+err: if (filelist != NULL)
+ __os_free(dbenv, filelist);
+ if (needclose) {
+ /* We copied this, so we mustn't free it. */
+ tmpdbp->lid = DB_LOCK_INVALIDID;
+ /* We need to remove the lockevent we associated with this. */
+ if (txn != NULL)
+ __txn_remlock(dbenv,
+ txn, &tmpdbp->handle_lock, DB_LOCK_INVALIDID);
+
+ if ((t_ret =
+ __db_close_i(tmpdbp, txn, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ }
return (ret);
}
diff --git a/bdb/qam/qam_open.c b/bdb/qam/qam_open.c
index 73346439fd6..efe4dfc540e 100644
--- a/bdb/qam/qam_open.c
+++ b/bdb/qam/qam_open.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_open.c,v 11.31 2000/12/20 17:59:29 ubell Exp $";
+static const char revid[] = "$Id: qam_open.c,v 11.55 2002/09/04 19:06:45 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,174 +18,95 @@ static const char revid[] = "$Id: qam_open.c,v 11.31 2000/12/20 17:59:29 ubell E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_swap.h"
-#include "db_am.h"
-#include "lock.h"
-#include "qam.h"
+#include "dbinc/crypto.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_swap.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/qam.h"
+#include "dbinc/fop.h"
+
+static int __qam_init_meta __P((DB *, QMETA *));
/*
* __qam_open
*
- * PUBLIC: int __qam_open __P((DB *, const char *, db_pgno_t, int, u_int32_t));
+ * PUBLIC: int __qam_open __P((DB *,
+ * PUBLIC: DB_TXN *, const char *, db_pgno_t, int, u_int32_t));
*/
int
-__qam_open(dbp, name, base_pgno, mode, flags)
+__qam_open(dbp, txn, name, base_pgno, mode, flags)
DB *dbp;
+ DB_TXN *txn;
const char *name;
db_pgno_t base_pgno;
int mode;
u_int32_t flags;
{
- QUEUE *t;
DBC *dbc;
+ DB_ENV *dbenv;
DB_LOCK metalock;
- DB_LSN orig_lsn;
+ DB_MPOOLFILE *mpf;
QMETA *qmeta;
- int locked;
+ QUEUE *t;
int ret, t_ret;
- ret = 0;
- locked = 0;
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
t = dbp->q_internal;
+ ret = 0;
+ qmeta = NULL;
- if (name == NULL && t->page_ext != 0) {
- __db_err(dbp->dbenv,
- "Extent size may not be specified for in-memory queue database.");
- return (EINVAL);
- }
/* Initialize the remaining fields/methods of the DB. */
- dbp->del = __qam_delete;
- dbp->put = __qam_put;
dbp->stat = __qam_stat;
dbp->sync = __qam_sync;
dbp->db_am_remove = __qam_remove;
dbp->db_am_rename = __qam_rename;
- metalock.off = LOCK_INVALID;
-
/*
* Get a cursor. If DB_CREATE is specified, we may be creating
* pages, and to do that safely in CDB we need a write cursor.
* In STD_LOCKING mode, we'll synchronize using the meta page
* lock instead.
*/
- if ((ret = dbp->cursor(dbp, dbp->open_txn,
- &dbc, LF_ISSET(DB_CREATE) && CDB_LOCKING(dbp->dbenv) ?
- DB_WRITECURSOR : 0)) != 0)
+ if ((ret = dbp->cursor(dbp, txn, &dbc,
+ LF_ISSET(DB_CREATE) && CDB_LOCKING(dbenv) ? DB_WRITECURSOR : 0))
+ != 0)
return (ret);
- /* Get, and optionally create the metadata page. */
+ /*
+ * Get the meta data page. It must exist, because creates of
+ * files/databases come in through the __qam_new_file interface
+ * and queue doesn't support subdatabases.
+ */
if ((ret =
__db_lget(dbc, 0, base_pgno, DB_LOCK_READ, 0, &metalock)) != 0)
goto err;
- if ((ret = memp_fget(
- dbp->mpf, &base_pgno, DB_MPOOL_CREATE, (PAGE **)&qmeta)) != 0)
+ if ((ret =
+ mpf->get(mpf, &base_pgno, 0, (PAGE **)&qmeta)) != 0)
goto err;
- /*
- * If the magic number is correct, we're not creating the tree.
- * Correct any fields that may not be right. Note, all of the
- * local flags were set by DB->open.
- */
-again: if (qmeta->dbmeta.magic == DB_QAMMAGIC) {
- t->re_pad = qmeta->re_pad;
- t->re_len = qmeta->re_len;
- t->rec_page = qmeta->rec_page;
- t->page_ext = qmeta->page_ext;
-
- (void)memp_fput(dbp->mpf, (PAGE *)qmeta, 0);
- goto done;
- }
-
- /* If we're doing CDB; we now have to get the write lock. */
- if (CDB_LOCKING(dbp->dbenv)) {
- DB_ASSERT(LF_ISSET(DB_CREATE));
- if ((ret = lock_get(dbp->dbenv, dbc->locker, DB_LOCK_UPGRADE,
- &dbc->lock_dbt, DB_LOCK_WRITE, &dbc->mylock)) != 0)
- goto err;
- }
-
- /*
- * If we are doing locking, relase the read lock
- * and get a write lock. We want to avoid deadlock.
- */
- if (locked == 0 && STD_LOCKING(dbc)) {
- if ((ret = __LPUT(dbc, metalock)) != 0)
- goto err;
- if ((ret = __db_lget(dbc,
- 0, base_pgno, DB_LOCK_WRITE, 0, &metalock)) != 0)
- goto err;
- locked = 1;
- goto again;
- }
- /* Initialize the tree structure metadata information. */
- orig_lsn = qmeta->dbmeta.lsn;
- memset(qmeta, 0, sizeof(QMETA));
- ZERO_LSN(qmeta->dbmeta.lsn);
- qmeta->dbmeta.pgno = base_pgno;
- qmeta->dbmeta.magic = DB_QAMMAGIC;
- qmeta->dbmeta.version = DB_QAMVERSION;
- qmeta->dbmeta.pagesize = dbp->pgsize;
- qmeta->dbmeta.type = P_QAMMETA;
- qmeta->re_pad = t->re_pad;
- qmeta->re_len = t->re_len;
- qmeta->rec_page = CALC_QAM_RECNO_PER_PAGE(dbp);
- qmeta->cur_recno = 1;
- qmeta->first_recno = 1;
- qmeta->page_ext = t->page_ext;
- t->rec_page = qmeta->rec_page;
- memcpy(qmeta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
-
- /* Verify that we can fit at least one record per page. */
- if (QAM_RECNO_PER_PAGE(dbp) < 1) {
- __db_err(dbp->dbenv,
- "Record size of %lu too large for page size of %lu",
- (u_long)t->re_len, (u_long)dbp->pgsize);
- (void)memp_fput(dbp->mpf, (PAGE *)qmeta, 0);
+ /* If the magic number is incorrect, that's a fatal error. */
+ if (qmeta->dbmeta.magic != DB_QAMMAGIC) {
+ __db_err(dbenv, "%s: unexpected file type or format", name);
ret = EINVAL;
goto err;
}
- if ((ret = __db_log_page(dbp,
- name, &orig_lsn, base_pgno, (PAGE *)qmeta)) != 0)
- goto err;
-
- /* Release the metadata page. */
- if ((ret = memp_fput(dbp->mpf, (PAGE *)qmeta, DB_MPOOL_DIRTY)) != 0)
- goto err;
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTLOG, ret, name);
-
- /*
- * Flush the metadata page to disk.
- *
- * !!!
- * It's not useful to return not-yet-flushed here -- convert it to
- * an error.
- */
- if ((ret = memp_fsync(dbp->mpf)) == DB_INCOMPLETE) {
- __db_err(dbp->dbenv, "Flush of metapage failed");
- ret = EINVAL;
- }
- DB_TEST_RECOVERY(dbp, DB_TEST_POSTSYNC, ret, name);
-
-done: t->q_meta = base_pgno;
- t->q_root = base_pgno + 1;
-
/* Setup information needed to open extents. */
- if (t->page_ext != 0) {
- t->finfo.pgcookie = &t->pgcookie;
- t->finfo.fileid = NULL;
- t->finfo.lsn_offset = 0;
+ t->page_ext = qmeta->page_ext;
+ if (t->page_ext != 0) {
t->pginfo.db_pagesize = dbp->pgsize;
- t->pginfo.needswap = F_ISSET(dbp, DB_AM_SWAP);
+ t->pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ t->pginfo.type = dbp->type;
t->pgcookie.data = &t->pginfo;
t->pgcookie.size = sizeof(DB_PGINFO);
if ((ret = __os_strdup(dbp->dbenv, name, &t->path)) != 0)
- goto err;
+ return (ret);
t->dir = t->path;
if ((t->name = __db_rpath(t->path)) == NULL) {
t->name = t->path;
@@ -198,8 +119,22 @@ done: t->q_meta = base_pgno;
t->mode = mode;
}
-err:
-DB_TEST_RECOVERY_LABEL
+ if (name == NULL && t->page_ext != 0) {
+ __db_err(dbenv,
+ "Extent size may not be specified for in-memory queue database");
+ return (EINVAL);
+ }
+
+ t->re_pad = qmeta->re_pad;
+ t->re_len = qmeta->re_len;
+ t->rec_page = qmeta->rec_page;
+
+ t->q_meta = base_pgno;
+ t->q_root = base_pgno + 1;
+
+err: if (qmeta != NULL && (t_ret = mpf->put(mpf, qmeta, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
/* Don't hold the meta page long term. */
(void)__LPUT(dbc, metalock);
@@ -225,6 +160,7 @@ __qam_metachk(dbp, name, qmeta)
int ret;
dbenv = dbp->dbenv;
+ ret = 0;
/*
* At this point, all we know is that the magic number is for a Queue.
@@ -241,6 +177,7 @@ __qam_metachk(dbp, name, qmeta)
name, (u_long)vers);
return (DB_OLD_VERSION);
case 3:
+ case 4:
break;
default:
__db_err(dbenv,
@@ -264,5 +201,131 @@ __qam_metachk(dbp, name, qmeta)
/* Copy the file's ID. */
memcpy(dbp->fileid, qmeta->dbmeta.uid, DB_FILE_ID_LEN);
+ /* Set up AM-specific methods that do not require an open. */
+ dbp->db_am_rename = __qam_rename;
+ dbp->db_am_remove = __qam_remove;
+
+ return (ret);
+}
+
+/*
+ * __qam_init_meta --
+ * Initialize the meta-data for a Queue database.
+ */
+static int
+__qam_init_meta(dbp, meta)
+ DB *dbp;
+ QMETA *meta;
+{
+ QUEUE *t;
+
+ t = dbp->q_internal;
+
+ memset(meta, 0, sizeof(QMETA));
+ LSN_NOT_LOGGED(meta->dbmeta.lsn);
+ meta->dbmeta.pgno = PGNO_BASE_MD;
+ meta->dbmeta.last_pgno = 0;
+ meta->dbmeta.magic = DB_QAMMAGIC;
+ meta->dbmeta.version = DB_QAMVERSION;
+ meta->dbmeta.pagesize = dbp->pgsize;
+ if (F_ISSET(dbp, DB_AM_CHKSUM))
+ FLD_SET(meta->dbmeta.metaflags, DBMETA_CHKSUM);
+ if (F_ISSET(dbp, DB_AM_ENCRYPT)) {
+ meta->dbmeta.encrypt_alg =
+ ((DB_CIPHER *)dbp->dbenv->crypto_handle)->alg;
+ DB_ASSERT(meta->dbmeta.encrypt_alg != 0);
+ meta->crypto_magic = meta->dbmeta.magic;
+ }
+ meta->dbmeta.type = P_QAMMETA;
+ meta->re_pad = t->re_pad;
+ meta->re_len = t->re_len;
+ meta->rec_page = CALC_QAM_RECNO_PER_PAGE(dbp);
+ meta->cur_recno = 1;
+ meta->first_recno = 1;
+ meta->page_ext = t->page_ext;
+ t->rec_page = meta->rec_page;
+ memcpy(meta->dbmeta.uid, dbp->fileid, DB_FILE_ID_LEN);
+
+ /* Verify that we can fit at least one record per page. */
+ if (QAM_RECNO_PER_PAGE(dbp) < 1) {
+ __db_err(dbp->dbenv,
+ "Record size of %lu too large for page size of %lu",
+ (u_long)t->re_len, (u_long)dbp->pgsize);
+ return (EINVAL);
+ }
+
return (0);
}
+
+/*
+ * __qam_new_file --
+ * Create the necessary pages to begin a new queue database file.
+ *
+ * This code appears more complex than it is because of the two cases (named
+ * and unnamed). The way to read the code is that for each page being created,
+ * there are three parts: 1) a "get page" chunk (which either uses malloc'd
+ * memory or calls mpf->get), 2) the initialization, and 3) the "put page"
+ * chunk which either does a fop write or an mpf->put.
+ *
+ * PUBLIC: int __qam_new_file __P((DB *, DB_TXN *, DB_FH *, const char *));
+ */
+int
+__qam_new_file(dbp, txn, fhp, name)
+ DB *dbp;
+ DB_TXN *txn;
+ DB_FH *fhp;
+ const char *name;
+{
+ QMETA *meta;
+ DB_ENV *dbenv;
+ DB_MPOOLFILE *mpf;
+ DB_PGINFO pginfo;
+ DBT pdbt;
+ db_pgno_t pgno;
+ int ret;
+ void *buf;
+
+ dbenv = dbp->dbenv;
+ mpf = dbp->mpf;
+ buf = NULL;
+ meta = NULL;
+
+ /* Build meta-data page. */
+
+ if (name == NULL) {
+ pgno = PGNO_BASE_MD;
+ ret = mpf->get(mpf, &pgno, DB_MPOOL_CREATE, &meta);
+ } else {
+ ret = __os_calloc(dbp->dbenv, 1, dbp->pgsize, &buf);
+ meta = (QMETA *)buf;
+ }
+ if (ret != 0)
+ return (ret);
+
+ if ((ret = __qam_init_meta(dbp, meta)) != 0)
+ goto err;
+
+ if (name == NULL)
+ ret = mpf->put(mpf, meta, DB_MPOOL_DIRTY);
+ else {
+ pginfo.db_pagesize = dbp->pgsize;
+ pginfo.flags =
+ F_ISSET(dbp, (DB_AM_CHKSUM | DB_AM_ENCRYPT | DB_AM_SWAP));
+ pginfo.type = DB_QUEUE;
+ pdbt.data = &pginfo;
+ pdbt.size = sizeof(pginfo);
+ if ((ret = __db_pgout(dbenv, PGNO_BASE_MD, meta, &pdbt)) != 0)
+ goto err;
+ ret = __fop_write(dbenv,
+ txn, name, DB_APP_DATA, fhp, 0, buf, dbp->pgsize, 1);
+ }
+ if (ret != 0)
+ goto err;
+ meta = NULL;
+
+err: if (name != NULL)
+ __os_free(dbenv, buf);
+ else if (meta != NULL)
+ (void)mpf->put(mpf, meta, 0);
+ return (ret);
+}
diff --git a/bdb/qam/qam_rec.c b/bdb/qam/qam_rec.c
index 4d330f58651..2c0f1227752 100644
--- a/bdb/qam/qam_rec.c
+++ b/bdb/qam/qam_rec.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_rec.c,v 11.34 2001/01/19 18:01:59 bostic Exp $";
+static const char revid[] = "$Id: qam_rec.c,v 11.69 2002/08/06 06:17:10 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,91 +18,12 @@ static const char revid[] = "$Id: qam_rec.c,v 11.34 2001/01/19 18:01:59 bostic E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "lock.h"
-#include "db_am.h"
-#include "qam.h"
-#include "log.h"
-
-/*
- * __qam_inc_recover --
- * Recovery function for inc.
- *
- * PUBLIC: int __qam_inc_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__qam_inc_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __qam_inc_args *argp;
- DB *file_dbp;
- DBC *dbc;
- DB_LOCK lock;
- DB_MPOOLFILE *mpf;
- QMETA *meta;
- db_pgno_t metapg;
- int cmp_p, modified, ret;
-
- COMPQUIET(info, NULL);
- REC_PRINT(__qam_inc_print);
- REC_INTRO(__qam_inc_read, 1);
-
- metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
-
- if ((ret = __db_lget(dbc,
- LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
- goto done;
- if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
- if (DB_REDO(op)) {
- if ((ret = memp_fget(mpf,
- &metapg, DB_MPOOL_CREATE, &meta)) != 0) {
- (void)__LPUT(dbc, lock);
- goto out;
- }
- meta->dbmeta.pgno = metapg;
- meta->dbmeta.type = P_QAMMETA;
-
- } else {
- *lsnp = argp->prev_lsn;
- ret = 0;
- (void)__LPUT(dbc, lock);
- goto out;
- }
- }
-
- modified = 0;
- cmp_p = log_compare(&LSN(meta), &argp->lsn);
- CHECK_LSN(op, cmp_p, &LSN(meta), &argp->lsn);
-
- /*
- * The cur_recno never goes backwards. It is a point of
- * contention among appenders. If one fails cur_recno will
- * most likely be beyond that one when it aborts.
- * We move it ahead on either an abort or a commit
- * and make the LSN reflect that fact.
- */
- if (cmp_p == 0) {
- modified = 1;
- meta->cur_recno++;
- if (meta->cur_recno == RECNO_OOB)
- meta->cur_recno++;
- meta->dbmeta.lsn = *lsnp;
- }
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
- goto out;
-
- (void)__LPUT(dbc, lock);
-
-done: *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: REC_CLOSE;
-}
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
/*
* __qam_incfirst_recover --
@@ -138,9 +59,9 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
if ((ret = __db_lget(dbc,
LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
goto done;
- if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
if (DB_REDO(op)) {
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&metapg, DB_MPOOL_CREATE, &meta)) != 0) {
(void)__LPUT(dbc, lock);
goto out;
@@ -178,11 +99,11 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
cp = (QUEUE_CURSOR *)dbc->internal;
if (meta->first_recno == RECNO_OOB)
meta->first_recno++;
- while (meta->first_recno != meta->cur_recno
- && !QAM_BEFORE_FIRST(meta, argp->recno + 1)) {
+ while (meta->first_recno != meta->cur_recno &&
+ !QAM_BEFORE_FIRST(meta, argp->recno + 1)) {
if ((ret = __qam_position(dbc,
&meta->first_recno, QAM_READ, &exact)) != 0)
- goto out;
+ goto err;
if (cp->page != NULL)
__qam_fput(file_dbp, cp->pgno, cp->page, 0);
@@ -192,7 +113,7 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
rec_ext != 0 && meta->first_recno % rec_ext == 0)
if ((ret =
__qam_fremove(file_dbp, cp->pgno)) != 0)
- goto out;
+ goto err;
meta->first_recno++;
if (meta->first_recno == RECNO_OOB)
meta->first_recno++;
@@ -200,14 +121,19 @@ __qam_incfirst_recover(dbenv, dbtp, lsnp, op, info)
}
}
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
- goto out;
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err1;
(void)__LPUT(dbc, lock);
done: *lsnp = argp->prev_lsn;
ret = 0;
+ if (0) {
+err: (void)mpf->put(mpf, meta, 0);
+err1: (void)__LPUT(dbc, lock);
+ }
+
out: REC_CLOSE;
}
@@ -233,7 +159,7 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
DB_MPOOLFILE *mpf;
QMETA *meta;
db_pgno_t metapg;
- int cmp_p, modified, ret;
+ int cmp_n, cmp_p, modified, ret;
COMPQUIET(info, NULL);
REC_PRINT(__qam_mvptr_print);
@@ -244,9 +170,9 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
if ((ret = __db_lget(dbc,
LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
goto done;
- if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0) {
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
if (DB_REDO(op)) {
- if ((ret = memp_fget(mpf,
+ if ((ret = mpf->get(mpf,
&metapg, DB_MPOOL_CREATE, &meta)) != 0) {
(void)__LPUT(dbc, lock);
goto out;
@@ -262,13 +188,24 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
}
modified = 0;
- cmp_p = log_compare(&meta->dbmeta.lsn, &argp->metalsn);
+ cmp_n = log_compare(lsnp, &LSN(meta));
+ cmp_p = log_compare(&LSN(meta), &argp->metalsn);
/*
- * We never undo a movement of one of the pointers.
- * Just move them along regardless of abort/commit.
+ * Under normal circumstances, we never undo a movement of one of
+ * the pointers. Just move them along regardless of abort/commit.
+ *
+ * If we're undoing a truncate, we need to reset the pointers to
+ * their state before the truncate.
*/
- if (cmp_p == 0) {
+ if (DB_UNDO(op) && (argp->opcode & QAM_TRUNCATE)) {
+ if (cmp_n == 0) {
+ meta->first_recno = argp->old_first;
+ meta->cur_recno = argp->old_cur;
+ modified = 1;
+ meta->dbmeta.lsn = argp->metalsn;
+ }
+ } else if (cmp_p == 0) {
if (argp->opcode & QAM_SETFIRST)
meta->first_recno = argp->new_first;
@@ -279,7 +216,7 @@ __qam_mvptr_recover(dbenv, dbtp, lsnp, op, info)
meta->dbmeta.lsn = *lsnp;
}
- if ((ret = memp_fput(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)))
+ if ((ret = mpf->put(mpf, meta, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
(void)__LPUT(dbc, lock);
@@ -289,6 +226,7 @@ done: *lsnp = argp->prev_lsn;
out: REC_CLOSE;
}
+
/*
* __qam_del_recover --
* Recovery function for del.
@@ -321,7 +259,7 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
REC_INTRO(__qam_del_read, 1);
if ((ret = __qam_fget(file_dbp,
- &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
goto out;
modified = 0;
@@ -338,20 +276,20 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
if ((ret = __db_lget(dbc,
LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
- return (ret);
- if ((ret = memp_fget(file_dbp->mpf, &metapg, 0, &meta)) != 0) {
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
(void)__LPUT(dbc, lock);
- goto done;
+ goto err;
}
if (meta->first_recno == RECNO_OOB ||
- (QAM_BEFORE_FIRST(meta, argp->recno)
- && (meta->first_recno <= meta->cur_recno
- || meta->first_recno -
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
argp->recno < argp->recno - meta->cur_recno))) {
meta->first_recno = argp->recno;
- (void)memp_fput(file_dbp->mpf, meta, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
} else
- (void)memp_fput(file_dbp->mpf, meta, 0);
+ (void)mpf->put(mpf, meta, 0);
(void)__LPUT(dbc, lock);
/* Need to undo delete - mark the record as present */
@@ -366,7 +304,7 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
* is harmless in queue except when we're determining
* what we need to roll forward during recovery. [#2588]
*/
- if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
LSN(pagep) = argp->lsn;
modified = 1;
} else if (cmp_n > 0 && DB_REDO(op)) {
@@ -377,14 +315,18 @@ __qam_del_recover(dbenv, dbtp, lsnp, op, info)
modified = 1;
}
if ((ret = __qam_fput(file_dbp,
- argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
done: *lsnp = argp->prev_lsn;
ret = 0;
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
out: REC_CLOSE;
}
+
/*
* __qam_delext_recover --
* Recovery function for del in an extent based queue.
@@ -415,9 +357,19 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
REC_PRINT(__qam_delext_print);
REC_INTRO(__qam_delext_read, 1);
- if ((ret = __qam_fget(file_dbp,
- &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
- goto out;
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are redoing a delete and the page is not there
+ * we are done.
+ */
+ if (DB_REDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
modified = 0;
if (pagep->pgno == PGNO_INVALID) {
@@ -433,25 +385,25 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
if ((ret = __db_lget(dbc,
LCK_ROLLBACK, metapg, DB_LOCK_WRITE, 0, &lock)) != 0)
- return (ret);
- if ((ret = memp_fget(file_dbp->mpf, &metapg, 0, &meta)) != 0) {
+ goto err;
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0) {
(void)__LPUT(dbc, lock);
- goto done;
+ goto err;
}
if (meta->first_recno == RECNO_OOB ||
- (QAM_BEFORE_FIRST(meta, argp->recno)
- && (meta->first_recno <= meta->cur_recno
- || meta->first_recno -
+ (QAM_BEFORE_FIRST(meta, argp->recno) &&
+ (meta->first_recno <= meta->cur_recno ||
+ meta->first_recno -
argp->recno < argp->recno - meta->cur_recno))) {
meta->first_recno = argp->recno;
- (void)memp_fput(file_dbp->mpf, meta, DB_MPOOL_DIRTY);
+ (void)mpf->put(mpf, meta, DB_MPOOL_DIRTY);
} else
- (void)memp_fput(file_dbp->mpf, meta, 0);
+ (void)mpf->put(mpf, meta, 0);
(void)__LPUT(dbc, lock);
if ((ret = __qam_pitem(dbc, pagep,
argp->indx, argp->recno, &argp->data)) != 0)
- goto done;
+ goto err;
/*
* Move the LSN back to this point; do not move it forward.
@@ -461,7 +413,7 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
* is harmless in queue except when we're determining
* what we need to roll forward during recovery. [#2588]
*/
- if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
LSN(pagep) = argp->lsn;
modified = 1;
} else if (cmp_n > 0 && DB_REDO(op)) {
@@ -472,12 +424,15 @@ __qam_delext_recover(dbenv, dbtp, lsnp, op, info)
modified = 1;
}
if ((ret = __qam_fput(file_dbp,
- argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
done: *lsnp = argp->prev_lsn;
ret = 0;
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
+ }
out: REC_CLOSE;
}
@@ -485,7 +440,8 @@ out: REC_CLOSE;
* __qam_add_recover --
* Recovery function for add.
*
- * PUBLIC: int __qam_add_recover __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ * PUBLIC: int __qam_add_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
*/
int
__qam_add_recover(dbenv, dbtp, lsnp, op, info)
@@ -503,16 +459,26 @@ __qam_add_recover(dbenv, dbtp, lsnp, op, info)
QMETA *meta;
QPAGE *pagep;
db_pgno_t metapg;
- int cmp_n, modified, ret;
+ int cmp_n, meta_dirty, modified, ret;
COMPQUIET(info, NULL);
REC_PRINT(__qam_add_print);
REC_INTRO(__qam_add_read, 1);
modified = 0;
- if ((ret = __qam_fget(file_dbp,
- &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
- goto out;
+ if ((ret = __qam_fget(file_dbp, &argp->pgno, 0, &pagep)) != 0) {
+ if (ret != DB_PAGE_NOTFOUND && ret != ENOENT)
+ goto out;
+ /*
+ * If we are undoing an append and the page is not there
+ * we are done.
+ */
+ if (DB_UNDO(op))
+ goto done;
+ if ((ret = __qam_fget(file_dbp,
+ &argp->pgno, DB_MPOOL_CREATE, &pagep)) != 0)
+ goto out;
+ }
if (pagep->pgno == PGNO_INVALID) {
pagep->pgno = argp->pgno;
@@ -522,25 +488,36 @@ __qam_add_recover(dbenv, dbtp, lsnp, op, info)
cmp_n = log_compare(lsnp, &LSN(pagep));
- if (cmp_n > 0 && DB_REDO(op)) {
- /* Need to redo add - put the record on page */
- if ((ret = __qam_pitem(dbc, pagep, argp->indx, argp->recno,
- &argp->data)) != 0)
- goto err;
- LSN(pagep) = *lsnp;
- modified = 1;
- /* Make sure first pointer includes this record. */
+ if (DB_REDO(op)) {
+ /* Fix meta-data page. */
metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
- if ((ret = memp_fget(mpf, &metapg, 0, &meta)) != 0)
+ if ((ret = mpf->get(mpf, &metapg, 0, &meta)) != 0)
goto err;
+ meta_dirty = 0;
if (QAM_BEFORE_FIRST(meta, argp->recno)) {
meta->first_recno = argp->recno;
- if ((ret = memp_fput(mpf, meta, DB_MPOOL_DIRTY)) != 0)
- goto err;
- } else
- if ((ret = memp_fput(mpf, meta, 0)) != 0)
- goto err;
+ meta_dirty = 1;
+ }
+ if (argp->recno == meta->cur_recno ||
+ QAM_AFTER_CURRENT(meta, argp->recno)) {
+ meta->cur_recno = argp->recno + 1;
+ meta_dirty = 1;
+ }
+ if ((ret =
+ mpf->put(mpf, meta, meta_dirty? DB_MPOOL_DIRTY : 0)) != 0)
+ goto err;
+ /* Now update the actual page if necessary. */
+ if (cmp_n > 0) {
+ /* Need to redo add - put the record on page */
+ if ((ret = __qam_pitem(dbc,
+ pagep, argp->indx, argp->recno, &argp->data)) != 0)
+ goto err;
+ LSN(pagep) = *lsnp;
+ modified = 1;
+ /* Make sure pointers include this record. */
+ metapg = ((QUEUE *)file_dbp->q_internal)->q_meta;
+ }
} else if (DB_UNDO(op)) {
/*
* Need to undo add
@@ -572,161 +549,20 @@ __qam_add_recover(dbenv, dbtp, lsnp, op, info)
* is harmless in queue except when we're determining
* what we need to roll forward during recovery. [#2588]
*/
- if (op == DB_TXN_BACKWARD_ROLL && cmp_n < 0)
+ if (op == DB_TXN_BACKWARD_ROLL && cmp_n <= 0)
LSN(pagep) = argp->lsn;
}
-err: if ((ret = __qam_fput(file_dbp,
- argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)))
+ if ((ret = __qam_fput(file_dbp,
+ argp->pgno, pagep, modified ? DB_MPOOL_DIRTY : 0)) != 0)
goto out;
done: *lsnp = argp->prev_lsn;
ret = 0;
-out: REC_CLOSE;
-}
-/*
- * __qam_delete_recover --
- * Recovery function for delete of an extent.
- *
- * PUBLIC: int __qam_delete_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__qam_delete_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __qam_delete_args *argp;
- int ret;
- char *backup, *real_back, *real_name;
-
- COMPQUIET(info, NULL);
-
- REC_PRINT(__qam_delete_print);
-
- backup = real_back = real_name = NULL;
- if ((ret = __qam_delete_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- if (DB_REDO(op)) {
- /*
- * On a recovery, as we recreate what was going on, we
- * recreate the creation of the file. And so, even though
- * it committed, we need to delete it. Try to delete it,
- * but it is not an error if that delete fails.
- */
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_name, NULL) == 0) {
- if ((ret = __os_unlink(dbenv, real_name)) != 0)
- goto out;
- }
- } else if (DB_UNDO(op)) {
- /*
- * Trying to undo. File may or may not have been deleted.
- * Try to move the backup to the original. If the backup
- * exists, then this is right. If it doesn't exist, then
- * nothing will happen and that's OK.
- */
- if ((ret = __db_backup_name(dbenv, argp->name.data,
- &backup, &argp->lsn)) != 0)
- goto out;
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, backup, 0, NULL, &real_back)) != 0)
- goto out;
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_back, NULL) == 0)
- if ((ret =
- __os_rename(dbenv, real_back, real_name)) != 0)
- goto out;
- }
- *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: if (argp != NULL)
- __os_free(argp, 0);
- if (backup != NULL)
- __os_freestr(backup);
- if (real_back != NULL)
- __os_freestr(real_back);
- if (real_name != NULL)
- __os_freestr(real_name);
- return (ret);
-}
-/*
- * __qam_rename_recover --
- * Recovery function for rename.
- *
- * PUBLIC: int __qam_rename_recover
- * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
- */
-int
-__qam_rename_recover(dbenv, dbtp, lsnp, op, info)
- DB_ENV *dbenv;
- DBT *dbtp;
- DB_LSN *lsnp;
- db_recops op;
- void *info;
-{
- __qam_rename_args *argp;
- char *new_name, *real_name;
- int ret;
-
- COMPQUIET(info, NULL);
-
- REC_PRINT(__qam_rename_print);
-
- new_name = real_name = NULL;
-
- if ((ret = __qam_rename_read(dbenv, dbtp->data, &argp)) != 0)
- goto out;
-
- if (DB_REDO(op)) {
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->name.data, 0, NULL, &real_name)) != 0)
- goto out;
- if (__os_exists(real_name, NULL) == 0) {
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, argp->newname.data,
- 0, NULL, &new_name)) != 0)
- goto out;
- if ((ret = __os_rename(dbenv,
- real_name, new_name)) != 0)
- goto out;
- }
- } else {
- if ((ret = __db_appname(dbenv, DB_APP_DATA,
- NULL, argp->newname.data, 0, NULL, &new_name)) != 0)
- goto out;
- if (__os_exists(new_name, NULL) == 0) {
- if ((ret = __db_appname(dbenv,
- DB_APP_DATA, NULL, argp->name.data,
- 0, NULL, &real_name)) != 0)
- goto out;
- if ((ret = __os_rename(dbenv,
- new_name, real_name)) != 0)
- goto out;
- }
+ if (0) {
+err: (void)__qam_fput(file_dbp, argp->pgno, pagep, 0);
}
- *lsnp = argp->prev_lsn;
- ret = 0;
-
-out: if (argp != NULL)
- __os_free(argp, 0);
-
- if (new_name != NULL)
- __os_free(new_name, 0);
-
- if (real_name != NULL)
- __os_free(real_name, 0);
-
- return (ret);
+out: REC_CLOSE;
}
diff --git a/bdb/qam/qam_stat.c b/bdb/qam/qam_stat.c
index 865f477c1eb..57c67da4292 100644
--- a/bdb/qam/qam_stat.c
+++ b/bdb/qam/qam_stat.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_stat.c,v 11.16 2001/01/10 04:50:54 ubell Exp $";
+static const char revid[] = "$Id: qam_stat.c,v 11.32 2002/05/11 13:40:11 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,32 +18,33 @@ static const char revid[] = "$Id: qam_stat.c,v 11.16 2001/01/10 04:50:54 ubell E
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_shash.h"
-#include "db_am.h"
-#include "lock.h"
-#include "qam.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/db_am.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
/*
* __qam_stat --
* Gather/print the qam statistics
*
- * PUBLIC: int __qam_stat __P((DB *, void *, void *(*)(size_t), u_int32_t));
+ * PUBLIC: int __qam_stat __P((DB *, void *, u_int32_t));
*/
int
-__qam_stat(dbp, spp, db_malloc, flags)
+__qam_stat(dbp, spp, flags)
DB *dbp;
void *spp;
- void *(*db_malloc) __P((size_t));
u_int32_t flags;
{
- QUEUE *t;
DBC *dbc;
DB_LOCK lock;
+ DB_MPOOLFILE *mpf;
DB_QUEUE_STAT *sp;
PAGE *h;
QAMDATA *qp, *ep;
QMETA *meta;
+ QUEUE *t;
db_indx_t indx;
db_pgno_t first, last, pgno, pg_ext, stop;
u_int32_t re_len;
@@ -52,9 +53,10 @@ __qam_stat(dbp, spp, db_malloc, flags)
PANIC_CHECK(dbp->dbenv);
DB_ILLEGAL_BEFORE_OPEN(dbp, "DB->stat");
- t = dbp->q_internal;
+ LOCK_INIT(lock);
+ mpf = dbp->mpf;
sp = NULL;
- lock.off = LOCK_INVALID;
+ t = dbp->q_internal;
/* Check for invalid flags. */
if ((ret = __db_statchk(dbp, flags)) != 0)
@@ -70,35 +72,29 @@ __qam_stat(dbp, spp, db_malloc, flags)
DEBUG_LWRITE(dbc, NULL, "qam_stat", NULL, NULL, flags);
/* Allocate and clear the structure. */
- if ((ret = __os_malloc(dbp->dbenv, sizeof(*sp), db_malloc, &sp)) != 0)
+ if ((ret = __os_umalloc(dbp->dbenv, sizeof(*sp), &sp)) != 0)
goto err;
memset(sp, 0, sizeof(*sp));
re_len = ((QUEUE *)dbp->q_internal)->re_len;
- if (flags == DB_CACHED_COUNTS) {
- if ((ret = __db_lget(dbc,
- 0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
- goto err;
- if ((ret =
- memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
- goto err;
- sp->qs_nkeys = meta->dbmeta.key_count;
- sp->qs_ndata = meta->dbmeta.record_count;
-
- goto done;
- }
/* Determine the last page of the database. */
if ((ret = __db_lget(dbc,
0, t->q_meta, DB_LOCK_READ, 0, &lock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
goto err;
+ if (flags == DB_FAST_STAT || flags == DB_CACHED_COUNTS) {
+ sp->qs_nkeys = meta->dbmeta.key_count;
+ sp->qs_ndata = meta->dbmeta.record_count;
+ goto meta_only;
+ }
+
first = QAM_RECNO_PAGE(dbp, meta->first_recno);
last = QAM_RECNO_PAGE(dbp, meta->cur_recno);
- if ((ret = memp_fput(dbp->mpf, meta, 0)) != 0)
+ if ((ret = mpf->put(mpf, meta, 0)) != 0)
goto err;
(void)__LPUT(dbc, lock);
@@ -114,20 +110,23 @@ begin:
/* Walk through the pages and count. */
for (; pgno <= stop; ++pgno) {
if ((ret =
- __db_lget(dbc,
- 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
+ __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
goto err;
- ret = __qam_fget(dbp, &pgno, DB_MPOOL_EXTENT, &h);
+ ret = __qam_fget(dbp, &pgno, 0, &h);
if (ret == ENOENT) {
pgno += pg_ext - 1;
continue;
}
- if (ret == EINVAL) {
+ if (ret == DB_PAGE_NOTFOUND) {
+ if (pg_ext == 0) {
+ if (pgno != stop && first != last)
+ goto err;
+ ret = 0;
+ break;
+ }
pgno += pg_ext - ((pgno - 1) % pg_ext) - 1;
continue;
}
- if (ret == EIO && first == last && pg_ext == 0)
- break;
if (ret != 0)
goto err;
@@ -147,6 +146,8 @@ begin:
goto err;
(void)__LPUT(dbc, lock);
}
+
+ (void)__LPUT(dbc, lock);
if (first > last) {
pgno = 1;
stop = last;
@@ -159,26 +160,28 @@ begin:
0, t->q_meta, F_ISSET(dbp, DB_AM_RDONLY) ?
DB_LOCK_READ : DB_LOCK_WRITE, 0, &lock)) != 0)
goto err;
- if ((ret = memp_fget(dbp->mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
+ if ((ret = mpf->get(mpf, &t->q_meta, 0, (PAGE **)&meta)) != 0)
goto err;
+ if (!F_ISSET(dbp, DB_AM_RDONLY))
+ meta->dbmeta.key_count =
+ meta->dbmeta.record_count = sp->qs_ndata;
+ sp->qs_nkeys = sp->qs_ndata;
+
+meta_only:
/* Get the metadata fields. */
sp->qs_magic = meta->dbmeta.magic;
sp->qs_version = meta->dbmeta.version;
sp->qs_metaflags = meta->dbmeta.flags;
sp->qs_pagesize = meta->dbmeta.pagesize;
+ sp->qs_extentsize = meta->page_ext;
sp->qs_re_len = meta->re_len;
sp->qs_re_pad = meta->re_pad;
sp->qs_first_recno = meta->first_recno;
sp->qs_cur_recno = meta->cur_recno;
- sp->qs_nkeys = sp->qs_ndata;
- if (!F_ISSET(dbp, DB_AM_RDONLY))
- meta->dbmeta.key_count =
- meta->dbmeta.record_count = sp->qs_ndata;
-done:
/* Discard the meta-data page. */
- if ((ret = memp_fput(dbp->mpf,
+ if ((ret = mpf->put(mpf,
meta, F_ISSET(dbp, DB_AM_RDONLY) ? 0 : DB_MPOOL_DIRTY)) != 0)
goto err;
(void)__LPUT(dbc, lock);
@@ -188,11 +191,10 @@ done:
if (0) {
err: if (sp != NULL)
- __os_free(sp, sizeof(*sp));
+ __os_ufree(dbp->dbenv, sp);
}
- if (lock.off != LOCK_INVALID)
- (void)__LPUT(dbc, lock);
+ (void)__LPUT(dbc, lock);
if ((t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
ret = t_ret;
diff --git a/bdb/qam/qam_upgrade.c b/bdb/qam/qam_upgrade.c
index f49bfe88d90..6bd79fc948a 100644
--- a/bdb/qam/qam_upgrade.c
+++ b/bdb/qam/qam_upgrade.c
@@ -1,13 +1,13 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_upgrade.c,v 11.7 2000/11/30 00:58:44 ubell Exp $";
+static const char revid[] = "$Id: qam_upgrade.c,v 11.12 2002/03/29 20:46:48 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,10 +18,7 @@ static const char revid[] = "$Id: qam_upgrade.c,v 11.7 2000/11/30 00:58:44 ubell
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_swap.h"
-#include "db_am.h"
-#include "db_upgrade.h"
+#include "dbinc/db_upgrade.h"
/*
* __qam_31_qammeta --
diff --git a/bdb/qam/qam_verify.c b/bdb/qam/qam_verify.c
index a9a467d6785..5b020c2c335 100644
--- a/bdb/qam/qam_verify.c
+++ b/bdb/qam/qam_verify.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: qam_verify.c,v 1.17 2000/12/12 17:39:35 bostic Exp $";
+static const char revid[] = "$Id: qam_verify.c,v 1.30 2002/06/26 20:49:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -17,10 +17,10 @@ static const char revid[] = "$Id: qam_verify.c,v 1.17 2000/12/12 17:39:35 bostic
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "db_verify.h"
-#include "qam.h"
-#include "db_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_verify.h"
+#include "dbinc/qam.h"
+#include "dbinc/db_am.h"
/*
* __qam_vrfy_meta --
@@ -49,7 +49,9 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
* something very odd is going on.
*/
if (!F_ISSET(pip, VRFY_INCOMPLETE))
- EPRINT((dbp->dbenv, "Queue databases must be one-per-file."));
+ EPRINT((dbp->dbenv,
+ "Page %lu: queue databases must be one-per-file",
+ (u_long)pgno));
/*
* cur_recno/rec_page
@@ -59,8 +61,9 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
if (vdp->last_pgno > 0 && meta->cur_recno > 0 &&
meta->cur_recno - 1 > meta->rec_page * vdp->last_pgno) {
EPRINT((dbp->dbenv,
- "Current recno %lu references record past last page number %lu",
- meta->cur_recno, vdp->last_pgno));
+ "Page %lu: current recno %lu references record past last page number %lu",
+ (u_long)pgno,
+ (u_long)meta->cur_recno, (u_long)vdp->last_pgno));
isbad = 1;
}
@@ -69,10 +72,10 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
* return DB_VERIFY_FATAL
*/
if (ALIGN(meta->re_len + sizeof(QAMDATA) - 1, sizeof(u_int32_t)) *
- meta->rec_page + sizeof(QPAGE) > dbp->pgsize) {
+ meta->rec_page + QPAGE_SZ(dbp) > dbp->pgsize) {
EPRINT((dbp->dbenv,
- "Queue record length %lu impossibly high for page size and records per page",
- meta->re_len));
+ "Page %lu: queue record length %lu too high for page size and recs/page",
+ (u_long)pgno, (u_long)meta->re_len));
ret = DB_VERIFY_FATAL;
goto err;
} else {
@@ -80,7 +83,8 @@ __qam_vrfy_meta(dbp, vdp, meta, pgno, flags)
vdp->rec_page = meta->rec_page;
}
-err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
+err: if ((t_ret =
+ __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 && ret == 0)
ret = t_ret;
return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
}
@@ -114,14 +118,15 @@ __qam_vrfy_data(dbp, vdp, h, pgno, flags)
* some gross games to fake it out.
*/
fakedb.q_internal = &fakeq;
+ fakedb.flags = dbp->flags;
fakeq.re_len = vdp->re_len;
for (i = 0; i < vdp->rec_page; i++) {
qp = QAM_GET_RECORD(&fakedb, h, i);
if ((u_int8_t *)qp >= (u_int8_t *)h + dbp->pgsize) {
EPRINT((dbp->dbenv,
- "Queue record %lu extends past end of page %lu",
- i, pgno));
+ "Page %lu: queue record %lu extends past end of page",
+ (u_long)pgno, (u_long)i));
return (DB_VERIFY_BAD);
}
@@ -129,8 +134,8 @@ __qam_vrfy_data(dbp, vdp, h, pgno, flags)
qflags &= !(QAM_VALID | QAM_SET);
if (qflags != 0) {
EPRINT((dbp->dbenv,
- "Queue record %lu on page %lu has bad flags",
- i, pgno));
+ "Page %lu: queue record %lu has bad flags",
+ (u_long)pgno, (u_long)i));
return (DB_VERIFY_BAD);
}
}
@@ -161,7 +166,8 @@ __qam_vrfy_structure(dbp, vdp, flags)
if (pip->type != P_QAMMETA) {
EPRINT((dbp->dbenv,
- "Queue database has no meta page"));
+ "Page %lu: queue database has no meta page",
+ (u_long)PGNO_BASE_MD));
isbad = 1;
goto err;
}
@@ -174,21 +180,21 @@ __qam_vrfy_structure(dbp, vdp, flags)
if (!LF_ISSET(DB_SALVAGE))
__db_vrfy_struct_feedback(dbp, vdp);
- if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 ||
+ if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0 ||
(ret = __db_vrfy_getpageinfo(vdp, i, &pip)) != 0)
return (ret);
if (!F_ISSET(pip, VRFY_IS_ALLZEROES) &&
pip->type != P_QAMDATA) {
EPRINT((dbp->dbenv,
- "Queue database page %lu of incorrect type %lu",
- i, pip->type));
+ "Page %lu: queue database page of incorrect type %lu",
+ (u_long)i, (u_long)pip->type));
isbad = 1;
goto err;
} else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, i)) != 0)
goto err;
}
-err: if ((ret = __db_vrfy_putpageinfo(vdp, pip)) != 0)
+err: if ((ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0)
return (ret);
return (isbad == 1 ? DB_VERIFY_BAD : 0);
}
diff --git a/bdb/rep/rep_method.c b/bdb/rep/rep_method.c
new file mode 100644
index 00000000000..6773a537f4f
--- /dev/null
+++ b/bdb/rep/rep_method.c
@@ -0,0 +1,1144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_method.c,v 1.78 2002/09/10 12:58:07 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __rep_abort_prepared __P((DB_ENV *));
+static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
+static int __rep_client_dbinit __P((DB_ENV *, int));
+static int __rep_elect __P((DB_ENV *, int, int, u_int32_t, int *));
+static int __rep_elect_init __P((DB_ENV *, DB_LSN *, int, int, int, int *));
+static int __rep_flush __P((DB_ENV *));
+static int __rep_restore_prepared __P((DB_ENV *));
+static int __rep_set_limit __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_request __P((DB_ENV *, u_int32_t, u_int32_t));
+static int __rep_set_rep_transport __P((DB_ENV *, int,
+ int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)));
+static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
+static int __rep_stat __P((DB_ENV *, DB_REP_STAT **, u_int32_t));
+static int __rep_wait __P((DB_ENV *, u_int32_t, int *, u_int32_t));
+
+/*
+ * __rep_dbenv_create --
+ * Replication-specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_create __P((DB_ENV *));
+ */
+int
+__rep_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ COMPQUIET(db_rep, NULL);
+ COMPQUIET(ret, 0);
+ dbenv->rep_elect = __dbcl_rep_elect;
+ dbenv->rep_flush = __dbcl_rep_flush;
+ dbenv->rep_process_message = __dbcl_rep_process_message;
+ dbenv->rep_start = __dbcl_rep_start;
+ dbenv->rep_stat = __dbcl_rep_stat;
+ dbenv->set_rep_limit = __dbcl_rep_set_limit;
+ dbenv->set_rep_request = __dbcl_rep_set_request;
+ dbenv->set_rep_transport = __dbcl_rep_set_rep_transport;
+
+ } else
+#endif
+ {
+ dbenv->rep_elect = __rep_elect;
+ dbenv->rep_flush = __rep_flush;
+ dbenv->rep_process_message = __rep_process_message;
+ dbenv->rep_start = __rep_start;
+ dbenv->rep_stat = __rep_stat;
+ dbenv->set_rep_limit = __rep_set_limit;
+ dbenv->set_rep_request = __rep_set_request;
+ dbenv->set_rep_transport = __rep_set_rep_transport;
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_REP), &db_rep)) != 0)
+ return (ret);
+ dbenv->rep_handle = db_rep;
+
+ /* Initialize the per-process replication structure. */
+ db_rep->rep_send = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_start --
+ * Become a master or client, and start sending messages to participate
+ * in the replication environment. Must be called after the environment
+ * is open.
+ */
+static int
+__rep_start(dbenv, dbt, flags)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int announce, init_db, redo_prepared, ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "rep_start");
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->rep_start", flags,
+ DB_REP_CLIENT | DB_REP_LOGSONLY | DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* Exactly one of CLIENT and MASTER must be specified. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_CLIENT, DB_REP_MASTER)) != 0)
+ return (ret);
+ if (!LF_ISSET(DB_REP_CLIENT | DB_REP_MASTER | DB_REP_LOGSONLY)) {
+ __db_err(dbenv,
+ "DB_ENV->rep_start: replication mode must be specified");
+ return (EINVAL);
+ }
+
+ /* Masters can't be logs-only. */
+ if ((ret = __db_fcchk(dbenv,
+ "DB_ENV->rep_start", flags, DB_REP_LOGSONLY, DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* We need a transport function. */
+ if (db_rep->rep_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport must be called before DB_ENV->rep_start");
+ return (EINVAL);
+ }
+
+ /* We'd better not have any logged files open if we are a client. */
+ if (LF_ISSET(DB_REP_CLIENT) && (ret = __dbreg_nofiles(dbenv)) != 0) {
+ __db_err(dbenv, "DB_ENV->rep_start called with open files");
+ return (ret);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (rep->eid == DB_EID_INVALID)
+ rep->eid = dbenv->rep_eid;
+
+ if (LF_ISSET(DB_REP_MASTER)) {
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT)) {
+ /*
+ * If we're upgrading from having been a client,
+ * preclose, so that we close our temporary database.
+ *
+ * Do not close files that we may have opened while
+ * doing a rep_apply; they'll get closed when we
+ * finally close the environment, but for now, leave
+ * them open, as we don't want to recycle their
+ * fileids, and we may need the handles again if
+ * we become a client and the original master
+ * that opened them becomes a master again.
+ */
+ if ((ret = __rep_preclose(dbenv, 0)) != 0)
+ return (ret);
+
+ /*
+ * Now write a __txn_recycle record so that
+ * clients don't get confused with our txnids
+ * and txnids of previous masters.
+ */
+ F_CLR(dbenv, DB_ENV_REP_CLIENT);
+ if ((ret = __txn_reset(dbenv)) != 0)
+ return (ret);
+ }
+
+ redo_prepared = 0;
+ if (!F_ISSET(rep, REP_F_MASTER)) {
+ /* Master is not yet set. */
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_ISCLIENT);
+ rep->gen = ++rep->w_gen;
+ redo_prepared = 1;
+ } else if (rep->gen == 0)
+ rep->gen = 1;
+ }
+
+ F_SET(rep, REP_F_MASTER);
+ F_SET(dbenv, DB_ENV_REP_MASTER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = (DB_LOG *)dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Send the NEWMASTER message, then restore prepared txns
+ * if and only if we just upgraded from being a client.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, &lsn, NULL, 0)) == 0 &&
+ redo_prepared)
+ ret = __rep_restore_prepared(dbenv);
+ } else {
+ F_CLR(dbenv, DB_ENV_REP_MASTER);
+ F_SET(dbenv, DB_ENV_REP_CLIENT);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(dbenv, DB_ENV_REP_LOGSONLY);
+
+ announce = !F_ISSET(rep, REP_ISCLIENT) ||
+ rep->master_id == DB_EID_INVALID;
+ init_db = 0;
+ if (!F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_F_MASTER);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(rep, REP_F_LOGSONLY);
+ else
+ F_SET(rep, REP_F_UPGRADE);
+
+ /*
+ * We initialize the client's generation number to 0.
+ * Upon startup, it looks for a master and updates the
+ * generation number as necessary, exactly as it does
+ * during normal operation and a master failure.
+ */
+ rep->gen = 0;
+ rep->master_id = DB_EID_INVALID;
+ init_db = 1;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Abort any prepared transactions that were restored
+ * by recovery. We won't be able to create any txns of
+ * our own until they're resolved, but we can't resolve
+ * them ourselves; the master has to. If any get
+ * resolved as commits, we'll redo them when commit
+ * records come in. Aborts will simply be ignored.
+ */
+ if ((ret = __rep_abort_prepared(dbenv)) != 0)
+ return (ret);
+
+ if ((ret = __rep_client_dbinit(dbenv, init_db)) != 0)
+ return (ret);
+
+ /*
+ * If this client created a newly replicated environment,
+ * then announce the existence of this client. The master
+ * should respond with a message that will tell this client
+ * the current generation number and the current LSN. This
+ * will allow the client to either perform recovery or
+ * simply join in.
+ */
+ if (announce)
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWCLIENT, NULL, dbt, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __rep_client_dbinit --
+ *
+ * Initialize the LSN database on the client side. This is called from the
+ * client initialization code. The startup flag value indicates if
+ * this is the first thread/process starting up and therefore should create
+ * the LSN database. This routine must be called once by each process acting
+ * as a client.
+ */
+static int
+__rep_client_dbinit(dbenv, startup)
+ DB_ENV *dbenv;
+ int startup;
+{
+ DB_REP *db_rep;
+ DB *dbp;
+ int ret, t_ret;
+ u_int32_t flags;
+
+ PANIC_CHECK(dbenv);
+ db_rep = dbenv->rep_handle;
+ dbp = NULL;
+
+#define REPDBNAME "__db.rep.db"
+
+ /* Check if this has already been called on this environment. */
+ if (db_rep->rep_db != NULL)
+ return (0);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+
+ if (startup) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ /*
+ * Ignore errors, because if the file doesn't exist, this
+ * is perfectly OK.
+ */
+ (void)dbp->remove(dbp, REPDBNAME, NULL, 0);
+ }
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+ if ((ret = dbp->set_bt_compare(dbp, __rep_bt_cmp)) != 0)
+ goto err;
+
+ /* Allow writes to this database on a client. */
+ F_SET(dbp, DB_AM_CL_WRITER);
+
+ flags = (F_ISSET(dbenv, DB_ENV_THREAD) ? DB_THREAD : 0) |
+ (startup ? DB_CREATE : 0);
+ if ((ret = dbp->open(dbp, NULL,
+ "__db.rep.db", NULL, DB_BTREE, flags, 0)) != 0)
+ goto err;
+
+ db_rep->rep_db = dbp;
+
+ if (0) {
+err: if (dbp != NULL &&
+ (t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ db_rep->rep_db = NULL;
+ }
+
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ return (ret);
+}
+
+/*
+ * __rep_bt_cmp --
+ *
+ * Comparison function for the LSN table. We use the entire control
+ * structure as a key (for simplicity, so we don't have to merge the
+ * other fields in the control with the data field), but really only
+ * care about the LSNs.
+ */
+static int
+__rep_bt_cmp(dbp, dbt1, dbt2)
+ DB *dbp;
+ const DBT *dbt1, *dbt2;
+{
+ DB_LSN lsn1, lsn2;
+ REP_CONTROL *rp1, *rp2;
+
+ COMPQUIET(dbp, NULL);
+
+ rp1 = dbt1->data;
+ rp2 = dbt2->data;
+
+ __ua_memcpy(&lsn1, &rp1->lsn, sizeof(DB_LSN));
+ __ua_memcpy(&lsn2, &rp2->lsn, sizeof(DB_LSN));
+
+ if (lsn1.file > lsn2.file)
+ return (1);
+
+ if (lsn1.file < lsn2.file)
+ return (-1);
+
+ if (lsn1.offset > lsn2.offset)
+ return (1);
+
+ if (lsn1.offset < lsn2.offset)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * __rep_abort_prepared --
+ * Abort any prepared transactions that recovery restored.
+ *
+ * This is used by clients that have just run recovery, since
+ * they cannot/should not call txn_recover and handle prepared transactions
+ * themselves.
+ */
+static int
+__rep_abort_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+#define PREPLISTSIZE 50
+ DB_PREPLIST prep[PREPLISTSIZE], *p;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_aborts, ret;
+ long count, i;
+ u_int32_t op;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ do_aborts = 0;
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region->stat.st_nrestores != 0)
+ do_aborts = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_aborts) {
+ op = DB_FIRST;
+ do {
+ if ((ret = dbenv->txn_recover(dbenv,
+ prep, PREPLISTSIZE, &count, op)) != 0)
+ return (ret);
+ for (i = 0; i < count; i++) {
+ p = &prep[i];
+ if ((ret = p->txn->abort(p->txn)) != 0)
+ return (ret);
+ }
+ op = DB_NEXT;
+ } while (count == PREPLISTSIZE);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_restore_prepared --
+ * Restore to a prepared state any prepared but not yet committed
+ * transactions.
+ *
+ * This performs, in effect, a "mini-recovery"; it is called from
+ * __rep_start by newly upgraded masters. There may be transactions that an
+ * old master prepared but did not resolve, which we need to restore to an
+ * active state.
+ */
+static int
+__rep_restore_prepared(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LOGC *logc;
+ DB_LSN ckp_lsn, lsn;
+ DBT rec;
+ __txn_ckp_args *ckp_args;
+ __txn_regop_args *regop_args;
+ __txn_xa_regop_args *prep_args;
+ int ret, t_ret;
+ u_int32_t hi_txn, low_txn, rectype;
+ void *txninfo;
+
+ txninfo = NULL;
+ ckp_args = NULL;
+ prep_args = NULL;
+ regop_args = NULL;
+ ZERO_LSN(ckp_lsn);
+ ZERO_LSN(lsn);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /*
+ * We need to consider the set of records between the most recent
+ * checkpoint LSN and the end of the log; any txn in that
+ * range, and only txns in that range, could still have been
+ * active, and thus prepared but not yet committed (PBNYC),
+ * when the old master died.
+ *
+ * Find the most recent checkpoint LSN, and get the record there.
+ * If there is no checkpoint in the log, start off by getting
+ * the very first record in the log instead.
+ */
+ memset(&rec, 0, sizeof(DBT));
+ if ((ret = __txn_getckp(dbenv, &lsn)) == 0) {
+ if ((ret = logc->get(logc, &lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint record at LSN [%lu][%lu] not found",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ if ((ret = __txn_ckp_read(dbenv, rec.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)lsn.file, (u_long)lsn.offset);
+ goto err;
+ }
+
+ ckp_lsn = ckp_args->ckp_lsn;
+ __os_free(dbenv, ckp_args);
+
+ if ((ret = logc->get(logc, &ckp_lsn, &rec, DB_SET)) != 0) {
+ __db_err(dbenv,
+ "Checkpoint LSN record [%lu][%lu] not found",
+ (u_long)ckp_lsn.file, (u_long)ckp_lsn.offset);
+ goto err;
+ }
+ } else if ((ret = logc->get(logc, &lsn, &rec, DB_FIRST)) != 0) {
+ if (ret == DB_NOTFOUND) {
+ /* An empty log means no PBNYC txns. */
+ ret = 0;
+ goto done;
+ }
+ __db_err(dbenv, "Attempt to get first log record failed");
+ goto err;
+ }
+
+ /*
+ * We use the same txnlist infrastructure that recovery does;
+ * it demands an estimate of the high and low txnids for
+ * initialization.
+ *
+ * First, the low txnid.
+ */
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&low_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(low_txn));
+ if (low_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_NEXT)) == 0);
+
+ /* If there are no txns, there are no PBNYC txns. */
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* Now, the high txnid. */
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0) {
+ /*
+ * Note that DB_NOTFOUND is unacceptable here because we
+ * had to have looked at some log record to get this far.
+ */
+ __db_err(dbenv, "Final log record not found");
+ goto err;
+ }
+ do {
+ /* txnid is after rectype, which is a u_int32. */
+ memcpy(&hi_txn,
+ (u_int8_t *)rec.data + sizeof(u_int32_t), sizeof(hi_txn));
+ if (hi_txn != 0)
+ break;
+ } while ((ret = logc->get(logc, &lsn, &rec, DB_PREV)) == 0);
+ if (ret == DB_NOTFOUND) {
+ ret = 0;
+ goto done;
+ } else if (ret != 0)
+ goto err;
+
+ /* We have a high and low txnid. Initialise the txn list. */
+ if ((ret =
+ __db_txnlist_init(dbenv, low_txn, hi_txn, NULL, &txninfo)) != 0)
+ goto err;
+
+ /*
+ * Now, walk backward from the end of the log to ckp_lsn. Any
+ * prepares that we hit without first hitting a commit or
+ * abort belong to PBNYC txns, and we need to apply them and
+ * restore them to a prepared state.
+ *
+ * Note that we wind up applying transactions out of order.
+ * Since all PBNYC txns still held locks on the old master and
+ * were isolated, this should be safe.
+ */
+ for (ret = logc->get(logc, &lsn, &rec, DB_LAST);
+ ret == 0 && log_compare(&lsn, &ckp_lsn) > 0;
+ ret = logc->get(logc, &lsn, &rec, DB_PREV)) {
+ memcpy(&rectype, rec.data, sizeof(rectype));
+ switch (rectype) {
+ case DB___txn_regop:
+ /*
+ * It's a commit or abort--but we don't care
+ * which! Just add it to the list of txns
+ * that are resolved.
+ */
+ if ((ret = __txn_regop_read(dbenv, rec.data,
+ &regop_args)) != 0)
+ goto err;
+
+ ret = __db_txnlist_find(dbenv,
+ txninfo, regop_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ ret = __db_txnlist_add(dbenv, txninfo,
+ regop_args->txnid->txnid,
+ regop_args->opcode, &lsn);
+ __os_free(dbenv, regop_args);
+ break;
+ case DB___txn_xa_regop:
+ /*
+ * It's a prepare. If we haven't put the
+ * txn on our list yet, it hasn't been
+ * resolved, so apply and restore it.
+ */
+ if ((ret = __txn_xa_regop_read(dbenv, rec.data,
+ &prep_args)) != 0)
+ goto err;
+ ret = __db_txnlist_find(dbenv, txninfo,
+ prep_args->txnid->txnid);
+ if (ret == DB_NOTFOUND)
+ if ((ret = __rep_process_txn(dbenv, &rec)) == 0)
+ ret = __txn_restore_txn(dbenv,
+ &lsn, prep_args);
+ __os_free(dbenv, prep_args);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* It's not an error to have hit the beginning of the log. */
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+
+done:
+err: t_ret = logc->close(logc, 0);
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_set_limit --
+ * Set a limit on the amount of data that will be sent during a single
+ * invocation of __rep_process_message.
+ */
+static int
+__rep_set_limit(dbenv, gbytes, bytes)
+ DB_ENV *dbenv;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+{
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_limit: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ if (bytes > GIGABYTE) {
+ gbytes += bytes / GIGABYTE;
+ bytes = bytes % GIGABYTE;
+ }
+ rep->gbytes = gbytes;
+ rep->bytes = bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ return (0);
+}
+
+/*
+ * __rep_set_request --
+ * Set the minimum and maximum number of log records that we wait
+ * before retransmitting.
+ * UNDOCUMENTED.
+ */
+static int
+__rep_set_request(dbenv, min, max)
+ DB_ENV *dbenv;
+ u_int32_t min;
+ u_int32_t max;
+{
+ LOG *lp;
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ REP *rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_request: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->request_gap = min;
+ rep->max_gap = max;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ dblp = dbenv->lg_handle;
+ if (dblp != NULL && (lp = dblp->reginfo.primary) != NULL) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_set_transport --
+ * Set the transport function for replication.
+ */
+static int
+__rep_set_rep_transport(dbenv, eid, f_send)
+ DB_ENV *dbenv;
+ int eid;
+ int (*f_send) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t));
+{
+ DB_REP *db_rep;
+
+ PANIC_CHECK(dbenv);
+
+ if ((db_rep = dbenv->rep_handle) == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: database environment not properly initialized");
+ return (__db_panic(dbenv, EINVAL));
+ }
+
+ if (f_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: no send function specified");
+ return (EINVAL);
+ }
+
+ if (eid < 0) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport: eid must be greater than or equal to 0");
+ return (EINVAL);
+ }
+
+ db_rep->rep_send = f_send;
+
+ dbenv->rep_eid = eid;
+ return (0);
+}
+
+/*
+ * __rep_elect --
+ * Called after master failure to hold/participate in an election for
+ * a new master.
+ */
+static int
+__rep_elect(dbenv, nsites, priority, timeout, eidp)
+ DB_ENV *dbenv;
+ int nsites, priority;
+ u_int32_t timeout;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int in_progress, ret, send_vote, tiebreaker;
+ u_int32_t pid, sec, usec;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_elect", DB_INIT_TXN);
+
+ /* Error checking. */
+ if (nsites <= 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: nsites must be greater than 0");
+ return (EINVAL);
+ }
+ if (priority < 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_elect: priority may not be negative");
+ return (EINVAL);
+ }
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Generate a randomized tiebreaker value. */
+ __os_id(&pid);
+ if ((ret = __os_clock(dbenv, &sec, &usec)) != 0)
+ return (ret);
+ tiebreaker = pid ^ sec ^ usec ^ (u_int)rand() ^ P_TO_UINT32(&pid);
+
+ if ((ret = __rep_elect_init(dbenv,
+ &lsn, nsites, priority, tiebreaker, &in_progress)) != 0) {
+ if (ret == DB_REP_NEWMASTER) {
+ ret = 0;
+ *eidp = dbenv->rep_eid;
+ }
+ return (ret);
+ }
+
+ if (!in_progress) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Beginning an election");
+#endif
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_ELECT, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTSEND, ret, NULL);
+ }
+
+ /* Now send vote */
+ if ((ret =
+ __rep_send_vote(dbenv, &lsn, nsites, priority, tiebreaker)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE1, ret, NULL);
+
+ ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE1);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT1, ret, NULL);
+ switch (ret) {
+ case 0:
+ /* Check if election complete or phase complete. */
+ if (*eidp != DB_EID_INVALID)
+ return (0);
+ goto phase2;
+ case DB_TIMEOUT:
+ break;
+ default:
+ goto err;
+ }
+ /*
+ * If we got here, we haven't heard from everyone, but we've
+ * run out of time, so it's time to decide if we have enough
+ * votes to pick a winner and if so, to send out a vote to
+ * the winner.
+ */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ send_vote = DB_EID_INVALID;
+ if (rep->sites > rep->nsites / 2) {
+ /* We think we've seen enough to cast a vote. */
+ send_vote = rep->winner;
+ if (rep->winner == rep->eid)
+ rep->votes++;
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (send_vote == DB_EID_INVALID) {
+ /* We do not have enough votes to elect. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not enough votes to elect: received %d of %d",
+ rep->sites, rep->nsites);
+#endif
+ ret = DB_REP_UNAVAIL;
+ goto err;
+
+ }
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION) &&
+ send_vote != rep->eid)
+ __db_err(dbenv, "Sending vote");
+#endif
+
+ if (send_vote != rep->eid && (ret = __rep_send_message(dbenv,
+ send_vote, REP_VOTE2, NULL, NULL, 0)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTVOTE2, ret, NULL);
+
+phase2: ret = __rep_wait(dbenv, timeout, eidp, REP_F_EPHASE2);
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTWAIT2, ret, NULL);
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_TIMEOUT:
+ ret = DB_REP_UNAVAIL;
+ break;
+ default:
+ goto err;
+ }
+
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Ended election with %d", ret);
+#endif
+ return (ret);
+}
+
+/*
+ * __rep_elect_init
+ * Initialize an election. Sets beginp non-zero if the election is
+ * already in progress; makes it 0 otherwise.
+ */
+static int
+__rep_elect_init(dbenv, lsnp, nsites, priority, tiebreaker, beginp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, priority, tiebreaker, *beginp;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret, *tally;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ ret = 0;
+
+ /* We may miscount, as we don't hold the replication mutex here. */
+ rep->stat.st_elections++;
+
+ /* If we are already a master; simply broadcast that fact and return. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ (void)__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWMASTER, lsnp, NULL, 0);
+ rep->stat.st_elections_won++;
+ return (DB_REP_NEWMASTER);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ *beginp = IN_ELECTION(rep);
+ if (!*beginp) {
+ /*
+ * Make sure that we always initialize all the election fields
+ * before putting ourselves in an election state. That means
+ * issuing calls that can fail (allocation) before setting all
+ * the variables.
+ */
+ if (nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, nsites)) != 0)
+ goto err;
+ DB_ENV_TEST_RECOVERY(dbenv, DB_TEST_ELECTINIT, ret, NULL);
+ rep->nsites = nsites;
+ rep->priority = priority;
+ rep->votes = 0;
+ rep->master_id = DB_EID_INVALID;
+ F_SET(rep, REP_F_EPHASE1);
+
+ /* We have always heard from ourselves. */
+ rep->sites = 1;
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ tally[0] = rep->eid;
+
+ if (priority != 0) {
+ /* Make ourselves the winner to start. */
+ rep->winner = rep->eid;
+ rep->w_priority = priority;
+ rep->w_gen = rep->gen;
+ rep->w_lsn = *lsnp;
+ rep->w_tiebreaker = tiebreaker;
+ } else {
+ rep->winner = DB_EID_INVALID;
+ rep->w_priority = 0;
+ rep->w_gen = 0;
+ ZERO_LSN(rep->w_lsn);
+ rep->w_tiebreaker = 0;
+ }
+ }
+DB_TEST_RECOVERY_LABEL
+err: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+static int
+__rep_wait(dbenv, timeout, eidp, flags)
+ DB_ENV *dbenv;
+ u_int32_t timeout;
+ int *eidp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int done, ret;
+ u_int32_t sleeptime;
+
+ done = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /*
+ * The user specifies an overall timeout function, but checking
+ * is cheap and the timeout may be a generous upper bound.
+ * Sleep repeatedly for the smaller of .5s and timeout/10.
+ */
+ sleeptime = (timeout > 5000000) ? 500000 : timeout / 10;
+ if (sleeptime == 0)
+ sleeptime++;
+ while (timeout > 0) {
+ if ((ret = __os_sleep(dbenv, 0, sleeptime)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ done = !F_ISSET(rep, flags) && rep->master_id != DB_EID_INVALID;
+
+ *eidp = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (done)
+ return (0);
+
+ if (timeout > sleeptime)
+ timeout -= sleeptime;
+ else
+ timeout = 0;
+ }
+ return (DB_TIMEOUT);
+}
+
+/*
+ * __rep_flush --
+ * Re-push the last log record to all clients, in case they've lost
+ * messages and don't know it.
+ */
+static int
+__rep_flush(dbenv)
+ DB_ENV *dbenv;
+{
+ DBT rec;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ int ret, t_ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ memset(&rec, 0, sizeof(rec));
+ memset(&lsn, 0, sizeof(lsn));
+
+ if ((ret = logc->get(logc, &lsn, &rec, DB_LAST)) != 0)
+ goto err;
+
+ ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_LOG, &lsn, &rec, 0);
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __rep_stat --
+ * Fetch replication statistics.
+ */
+static int
+__rep_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_REP_STAT **statp;
+ u_int32_t flags;
+{
+ DB_LOG *dblp;
+ DB_REP *db_rep;
+ DB_REP_STAT *stats;
+ LOG *lp;
+ REP *rep;
+ u_int32_t queued;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->rep_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ /* Allocate a stat struct to return to the user. */
+ if ((ret = __os_umalloc(dbenv, sizeof(DB_REP_STAT), &stats)) != 0)
+ return (ret);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ memcpy(stats, &rep->stat, sizeof(*stats));
+
+ /* Copy out election stats. */
+ if (IN_ELECTION(rep)) {
+ if (F_ISSET(rep, REP_F_EPHASE1))
+ stats->st_election_status = 1;
+ else if (F_ISSET(rep, REP_F_EPHASE2))
+ stats->st_election_status = 2;
+
+ stats->st_election_nsites = rep->sites;
+ stats->st_election_cur_winner = rep->winner;
+ stats->st_election_priority = rep->w_priority;
+ stats->st_election_gen = rep->w_gen;
+ stats->st_election_lsn = rep->w_lsn;
+ stats->st_election_votes = rep->votes;
+ stats->st_election_tiebreaker = rep->w_tiebreaker;
+ }
+
+ /* Copy out other info that's protected by the rep mutex. */
+ stats->st_env_id = rep->eid;
+ stats->st_env_priority = rep->priority;
+ stats->st_nsites = rep->nsites;
+ stats->st_master = rep->master_id;
+ stats->st_gen = rep->gen;
+
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_status = DB_REP_MASTER;
+ else if (F_ISSET(rep, REP_F_LOGSONLY))
+ stats->st_status = DB_REP_LOGSONLY;
+ else if (F_ISSET(rep, REP_F_UPGRADE))
+ stats->st_status = DB_REP_CLIENT;
+ else
+ stats->st_status = 0;
+
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ queued = rep->stat.st_log_queued;
+ memset(&rep->stat, 0, sizeof(rep->stat));
+ rep->stat.st_log_queued = rep->stat.st_log_queued_total =
+ rep->stat.st_log_queued_max = queued;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * Log-related replication info is stored in the log system and
+ * protected by the log region lock.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ stats->st_next_lsn = lp->ready_lsn;
+ stats->st_waiting_lsn = lp->waiting_lsn;
+ } else {
+ if (F_ISSET(rep, REP_F_MASTER))
+ stats->st_next_lsn = lp->lsn;
+ else
+ ZERO_LSN(stats->st_next_lsn);
+ ZERO_LSN(stats->st_waiting_lsn);
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/bdb/rep/rep_record.c b/bdb/rep/rep_record.c
new file mode 100644
index 00000000000..e725008786c
--- /dev/null
+++ b/bdb/rep/rep_record.c
@@ -0,0 +1,1510 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_record.c,v 1.111 2002/09/11 19:39:11 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/log.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *));
+static int __rep_collect_txn __P((DB_ENV *, DB_LSN *, LSN_COLLECTION *));
+static int __rep_lsn_cmp __P((const void *, const void *));
+static int __rep_newfile __P((DB_ENV *, REP_CONTROL *, DBT *, DB_LSN *));
+
+#define IS_SIMPLE(R) ((R) != DB___txn_regop && \
+ (R) != DB___txn_ckp && (R) != DB___dbreg_register)
+
+/*
+ * __rep_process_message --
+ *
+ * This routine takes an incoming message and processes it.
+ *
+ * control: contains the control fields from the record
+ * rec: contains the actual record
+ * eidp: contains the machine id of the sender of the message;
+ * in the case of a DB_NEWMASTER message, returns the eid
+ * of the new master.
+ *
+ * PUBLIC: int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+ */
+int
+__rep_process_message(dbenv, control, rec, eidp)
+ DB_ENV *dbenv;
+ DBT *control, *rec;
+ int *eidp;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN init_lsn, lsn, newfilelsn, oldfilelsn;
+ DB_REP *db_rep;
+ DBT *d, data_dbt, lsndbt, mylog;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL *rp;
+ REP_VOTE_INFO *vi;
+ u_int32_t bytes, gen, gbytes, type, unused;
+ int check_limit, cmp, done, do_req, i;
+ int master, old, recovering, ret, t_ret, *tally;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "rep_stat", DB_INIT_TXN);
+
+ /* Control argument must be non-Null. */
+ if (control == NULL || control->size == 0) {
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: control argument must be specified");
+ return (EINVAL);
+ }
+
+ ret = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gen = rep->gen;
+ recovering = F_ISSET(rep, REP_F_RECOVER);
+
+ rep->stat.st_msgs_processed++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ rp = (REP_CONTROL *)control->data;
+
+#if 0
+ __rep_print_message(dbenv, *eidp, rp, "rep_process_message");
+#endif
+
+ /* Complain if we see an improper version number. */
+ if (rp->rep_version != DB_REPVERSION) {
+ __db_err(dbenv,
+ "unexpected replication message version %d, expected %d",
+ rp->rep_version, DB_REPVERSION);
+ return (EINVAL);
+ }
+ if (rp->log_version != DB_LOGVERSION) {
+ __db_err(dbenv,
+ "unexpected log record version %d, expected %d",
+ rp->log_version, DB_LOGVERSION);
+ return (EINVAL);
+ }
+
+ /*
+ * Check for generation number matching. Ignore any old messages
+ * except requests that are indicative of a new client that needs
+ * to get in sync.
+ */
+ if (rp->gen < gen && rp->rectype != REP_ALIVE_REQ &&
+ rp->rectype != REP_NEWCLIENT && rp->rectype != REP_MASTER_REQ) {
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_msgs_badgen++;
+ return (0);
+ }
+ if (rp->gen > gen && rp->rectype != REP_ALIVE &&
+ rp->rectype != REP_NEWMASTER)
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_MASTER_REQ, NULL, NULL, 0));
+
+ /*
+ * We need to check if we're in recovery and if we are
+ * then we need to ignore any messages except VERIFY, VOTE,
+ * ELECT (the master might fail while we are recovering), and
+ * ALIVE_REQ.
+ */
+ if (recovering)
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ case REP_ALIVE_REQ:
+ case REP_ELECT:
+ case REP_NEWCLIENT:
+ case REP_NEWMASTER:
+ case REP_NEWSITE:
+ case REP_VERIFY:
+ R_LOCK(dbenv, &dblp->reginfo);
+ cmp = log_compare(&lp->verify_lsn, &rp->lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (cmp != 0)
+ goto skip;
+ /* FALLTHROUGH */
+ case REP_VOTE1:
+ case REP_VOTE2:
+ break;
+ default:
+skip: /*
+ * We don't hold the rep mutex, and could
+ * miscount if we race.
+ */
+ rep->stat.st_msgs_recover++;
+
+ /* Check for need to retransmit. */
+ R_LOCK(dbenv, &dblp->reginfo);
+ do_req = *eidp == rep->master_id &&
+ ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs *= 2;
+ if (lp->wait_recs + rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ lp->rcvd_recs = 0;
+ lsn = lp->verify_lsn;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (do_req)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_VERIFY_REQ, &lsn, NULL, 0);
+
+ return (ret);
+ }
+
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ ANYSITE(dbenv);
+ if (rp->gen > gen && rp->flags)
+ return (__rep_new_master(dbenv, rp, *eidp));
+ break;
+ case REP_ALIVE_REQ:
+ ANYSITE(dbenv);
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_ALIVE, &lsn, NULL,
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ? 1 : 0));
+ case REP_ALL_REQ:
+ MASTER_ONLY(dbenv);
+ gbytes = bytes = 0;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ gbytes = rep->gbytes;
+ bytes = rep->bytes;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ check_limit = gbytes != 0 || bytes != 0;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ oldfilelsn = lsn = rp->lsn;
+ type = REP_LOG;
+ for (ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ ret == 0 && type == REP_LOG;
+ ret = logc->get(logc, &lsn, &data_dbt, DB_NEXT)) {
+ /*
+ * lsn.offset will only be 0 if this is the
+ * beginning of the log; DB_SET, but not DB_NEXT,
+ * can set the log cursor to [n][0].
+ */
+ if (lsn.offset == 0)
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+ else {
+ /*
+ * DB_NEXT will never run into offsets
+ * of 0; thus, when a log file changes,
+ * we'll have a real log record with
+ * some lsn [n][m], and we'll also want to send
+ * a NEWFILE message with lsn [n][0].
+ * So that the client can detect gaps,
+ * send in the rec parameter the
+ * last LSN in the old file.
+ */
+ if (lsn.file != oldfilelsn.file) {
+ newfilelsn.file = lsn.file;
+ newfilelsn.offset = 0;
+
+ memset(&lsndbt, 0, sizeof(DBT));
+ lsndbt.size = sizeof(DB_LSN);
+ lsndbt.data = &oldfilelsn;
+
+ if ((ret = __rep_send_message(dbenv,
+ *eidp, REP_NEWFILE, &newfilelsn,
+ &lsndbt, 0)) != 0)
+ break;
+ }
+ if (check_limit) {
+ /*
+ * data_dbt.size is only the size of
+ * the log record; it doesn't count
+ * the size of the control structure.
+ * Factor that in as well so we're
+ * not off by a lot if our log
+ * records are small.
+ */
+ while (bytes < data_dbt.size +
+ sizeof(REP_CONTROL)) {
+ if (gbytes > 0) {
+ bytes += GIGABYTE;
+ --gbytes;
+ continue;
+ }
+ /*
+ * We don't hold the rep mutex,
+ * and may miscount.
+ */
+ rep->stat.st_nthrottles++;
+ type = REP_LOG_MORE;
+ goto send;
+ }
+ bytes -= (data_dbt.size +
+ sizeof(REP_CONTROL));
+ }
+send: ret = __rep_send_message(dbenv, *eidp,
+ type, &lsn, &data_dbt, 0);
+ }
+
+ /*
+ * In case we're about to change files and need it
+ * for a NEWFILE message, save the current LSN.
+ */
+ oldfilelsn = lsn;
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_ELECT:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ rep->gen++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = IN_ELECTION(rep) ? 0 : DB_REP_HOLDELECTION;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+#ifdef NOTYET
+ case REP_FILE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_FILE_REQ:
+ MASTER_ONLY(dbenv);
+ return (__rep_send_file(dbenv, rec, *eidp));
+ break;
+#endif
+ case REP_LOG:
+ case REP_LOG_MORE:
+ CLIENT_ONLY(dbenv);
+ if ((ret = __rep_apply(dbenv, rp, rec)) != 0)
+ return (ret);
+ if (rp->rectype == REP_LOG_MORE) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ master = rep->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &lsn, NULL, 0);
+ }
+ return (ret);
+ case REP_LOG_REQ:
+ MASTER_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ lsn = rp->lsn;
+
+ /*
+ * There are three different cases here.
+ * 1. We asked for a particular LSN and got it.
+ * 2. We asked for an LSN of X,0 which is invalid and got the
+ * first log record in a particular file.
+ * 3. We asked for an LSN and it's not found because it is
+ * beyond the end of a log file and we need a NEWFILE msg.
+ */
+ ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ cmp = log_compare(&lsn, &rp->lsn);
+
+ if (ret == 0 && cmp == 0) /* Case 1 */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_LOG, &rp->lsn, &data_dbt, 0);
+ else if (ret == DB_NOTFOUND ||
+ (ret == 0 && cmp < 0 && rp->lsn.offset == 0))
+ /* Cases 2 and 3: Send a NEWFILE message. */
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_NEWFILE, &lsn, NULL, 0);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_NEWSITE:
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_newsites++;
+
+ /* This is a rebroadcast; simply tell the application. */
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ (void)__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0);
+ }
+ return (DB_REP_NEWSITE);
+ case REP_NEWCLIENT:
+ /*
+ * This message was received and should have resulted in the
+ * application entering the machine ID in its machine table.
+ * We respond to this with an ALIVE to send relevant information
+ * to the new client. But first, broadcast the new client's
+ * record to all the clients.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_NEWSITE, &rp->lsn, rec, 0)) != 0)
+ return (ret);
+
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ /* FALLTHROUGH */
+ case REP_MASTER_REQ:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ case REP_NEWFILE:
+ CLIENT_ONLY(dbenv);
+ return (__rep_apply(dbenv, rp, rec));
+ case REP_NEWMASTER:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER) &&
+ *eidp != dbenv->rep_eid) {
+ /* We don't hold the rep mutex, and may miscount. */
+ rep->stat.st_dupmasters++;
+ return (DB_REP_DUPMASTER);
+ }
+ return (__rep_new_master(dbenv, rp, *eidp));
+ case REP_PAGE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PAGE_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_PLIST: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PLIST_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_VERIFY:
+ CLIENT_ONLY(dbenv);
+ DB_ASSERT((F_ISSET(rep, REP_F_RECOVER) &&
+ !IS_ZERO_LSN(lp->verify_lsn)) ||
+ (!F_ISSET(rep, REP_F_RECOVER) &&
+ IS_ZERO_LSN(lp->verify_lsn)));
+ if (IS_ZERO_LSN(lp->verify_lsn))
+ return (0);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&mylog, 0, sizeof(mylog));
+ if ((ret = logc->get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
+ goto rep_verify_err;
+ if (mylog.size == rec->size &&
+ memcmp(mylog.data, rec->data, rec->size) == 0) {
+ /*
+ * If we're a logs-only client, we can simply truncate
+ * the log to the point where it last agreed with the
+ * master's; otherwise, recover to that point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ ZERO_LSN(lp->verify_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ INIT_LSN(init_lsn);
+ if ((ret = dbenv->log_flush(dbenv,
+ &rp->lsn)) != 0 ||
+ (ret = __log_vtruncate(dbenv,
+ &rp->lsn, &init_lsn)) != 0)
+ goto rep_verify_err;
+ } else if ((ret = __db_apprec(dbenv, &rp->lsn, 0)) != 0)
+ goto rep_verify_err;
+
+ /*
+ * The log has been truncated (either by __db_apprec or
+ * directly). We want to make sure we're waiting for
+ * the LSN at the new end-of-log, not some later point.
+ */
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->ready_lsn = lp->lsn;
+ ZERO_LSN(lp->waiting_lsn);
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * Discard any log records we have queued; we're
+ * about to re-request them, and can't trust the
+ * ones in the queue.
+ */
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ if ((ret = db_rep->rep_db->truncate(db_rep->rep_db,
+ NULL, &unused, 0)) != 0) {
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ goto rep_verify_err;
+ }
+ rep->stat.st_log_queued = 0;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to bring ourselves up to date again anyway.
+ */
+ if ((master = rep->master_id) == DB_EID_INVALID) {
+ DB_ASSERT(IN_ELECTION(rep));
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = 0;
+ } else {
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv, master,
+ REP_ALL_REQ, &rp->lsn, NULL, 0);
+ }
+ } else if ((ret =
+ logc->get(logc, &lsn, &mylog, DB_PREV)) == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = lsn;
+ lp->rcvd_recs = 0;
+ lp->wait_recs = rep->request_gap;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = __rep_send_message(dbenv,
+ *eidp, REP_VERIFY_REQ, &lsn, NULL, 0);
+ }
+
+rep_verify_err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VERIFY_FAIL:
+ rep->stat.st_outdated++;
+ return (DB_REP_OUTDATED);
+ case REP_VERIFY_REQ:
+ MASTER_ONLY(dbenv);
+ type = REP_VERIFY;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ d = &data_dbt;
+ memset(d, 0, sizeof(data_dbt));
+ F_SET(logc, DB_LOG_SILENT_ERR);
+ ret = logc->get(logc, &rp->lsn, d, DB_SET);
+ /*
+ * If the LSN was invalid, then we might get a not
+ * found, we might get an EIO, we could get anything.
+ * If we get a DB_NOTFOUND, then there is a chance that
+ * the LSN comes before the first file present in which
+ * case we need to return a fail so that the client can return
+ * a DB_OUTDATED.
+ */
+ if (ret == DB_NOTFOUND &&
+ __log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
+ old != 0)
+ type = REP_VERIFY_FAIL;
+
+ if (ret != 0)
+ d = NULL;
+
+ ret = __rep_send_message(dbenv, *eidp, type, &rp->lsn, d, 0);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_VOTE1:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Master received vote");
+#endif
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ vi = (REP_VOTE_INFO *)rec->data;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * If you get a vote and you're not in an election, simply
+ * return an indicator to hold an election which will trigger
+ * this site to send its vote again.
+ */
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Not in election, but received vote1");
+#endif
+ ret = DB_REP_HOLDELECTION;
+ goto unlock;
+ }
+
+ if (F_ISSET(rep, REP_F_EPHASE2))
+ goto unlock;
+
+ /* Check if this site knows about more sites than we do. */
+ if (vi->nsites > rep->nsites)
+ rep->nsites = vi->nsites;
+
+ /* Check if we've heard from this site already. */
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ for (i = 0; i < rep->sites; i++) {
+ if (tally[i] == *eidp)
+ /* Duplicate vote. */
+ goto unlock;
+ }
+
+ /*
+ * We are keeping vote, let's see if that changes our count of
+ * the number of sites.
+ */
+ if (rep->sites + 1 > rep->nsites)
+ rep->nsites = rep->sites + 1;
+ if (rep->nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0)
+ goto unlock;
+
+ tally[rep->sites] = *eidp;
+ rep->sites++;
+
+ /*
+ * Change winners if the incoming record has a higher
+ * priority, or an equal priority but a larger LSN, or
+ * an equal priority and LSN but higher "tiebreaker" value.
+ */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv,
+ "%s(eid)%d (pri)%d (gen)%d (sites)%d [%d,%d]",
+ "Existing vote: ",
+ rep->winner, rep->w_priority, rep->w_gen,
+ rep->sites, rep->w_lsn.file, rep->w_lsn.offset);
+ __db_err(dbenv,
+ "Incoming vote: (eid)%d (pri)%d (gen)%d [%d,%d]",
+ *eidp, vi->priority, rp->gen, rp->lsn.file,
+ rp->lsn.offset);
+ }
+#endif
+ cmp = log_compare(&rp->lsn, &rep->w_lsn);
+ if (vi->priority > rep->w_priority ||
+ (vi->priority != 0 && vi->priority == rep->w_priority &&
+ (cmp > 0 ||
+ (cmp == 0 && vi->tiebreaker > rep->w_tiebreaker)))) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Accepting new vote");
+#endif
+ rep->winner = *eidp;
+ rep->w_priority = vi->priority;
+ rep->w_lsn = rp->lsn;
+ rep->w_gen = rp->gen;
+ }
+ master = rep->winner;
+ lsn = rep->w_lsn;
+ done = rep->sites == rep->nsites && rep->w_priority != 0;
+ if (done) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION)) {
+ __db_err(dbenv, "Phase1 election done");
+ __db_err(dbenv, "Voting for %d%s",
+ master, master == rep->eid ? "(self)" : "");
+ }
+#endif
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+
+ if (done && master == rep->eid) {
+ rep->votes++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (0);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /* Vote for someone else. */
+ if (done)
+ return (__rep_send_message(dbenv,
+ master, REP_VOTE2, NULL, NULL, 0));
+
+ /* Election is still going on. */
+ break;
+ case REP_VOTE2:
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "We received a vote%s",
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ?
+ " (master)" : "");
+#endif
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ rep->stat.st_elections_won++;
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* If we have priority 0, we should never get a vote. */
+ DB_ASSERT(rep->priority != 0);
+
+ if (!IN_ELECTION(rep)) {
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "Not in election, got vote");
+#endif
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (DB_REP_HOLDELECTION);
+ }
+ /* avoid counting duplicates. */
+ rep->votes++;
+ done = rep->votes > rep->nsites / 2;
+ if (done) {
+ rep->master_id = rep->eid;
+ rep->gen = rep->w_gen + 1;
+ ELECTION_DONE(rep);
+ F_CLR(rep, REP_F_UPGRADE);
+ F_SET(rep, REP_F_MASTER);
+ *eidp = rep->master_id;
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv,
+ "Got enough votes to win; election done; winner is %d",
+ rep->master_id);
+#endif
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (done) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /* Declare me the winner. */
+#ifdef DIAGNOSTIC
+ if (FLD_ISSET(dbenv->verbose, DB_VERB_REPLICATION))
+ __db_err(dbenv, "I won, sending NEWMASTER");
+#endif
+ rep->stat.st_elections_won++;
+ if ((ret = __rep_send_message(dbenv, DB_EID_BROADCAST,
+ REP_NEWMASTER, &lsn, NULL, 0)) != 0)
+ break;
+ return (DB_REP_NEWMASTER);
+ }
+ break;
+ default:
+ __db_err(dbenv,
+ "DB_ENV->rep_process_message: unknown replication message: type %lu",
+ (u_long)rp->rectype);
+ return (EINVAL);
+ }
+
+ return (0);
+
+unlock: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_apply --
+ *
+ * Handle incoming log records on a client, applying when possible and
+ * entering into the bookkeeping table otherwise. This is the guts of
+ * the routine that handles the state machine that describes how we
+ * process and manage incoming log records.
+ */
+static int
+__rep_apply(dbenv, rp, rec)
+ DB_ENV *dbenv;
+ REP_CONTROL *rp;
+ DBT *rec;
+{
+ __dbreg_register_args dbreg_args;
+ __txn_ckp_args ckp_args;
+ DB_REP *db_rep;
+ DBT control_dbt, key_dbt, lsn_dbt, nextrec_dbt, rec_dbt;
+ DB *dbp;
+ DBC *dbc;
+ DB_LOG *dblp;
+ DB_LSN ckp_lsn, lsn, newfile_lsn, next_lsn, waiting_lsn;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL lsn_rc;
+ u_int32_t rectype, txnid;
+ int cmp, do_req, eid, have_mutex, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dbp = db_rep->rep_db;
+ dbc = NULL;
+ have_mutex = ret = 0;
+ memset(&control_dbt, 0, sizeof(control_dbt));
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+
+ /*
+ * If this is a log record and it's the next one in line, simply
+ * write it to the log. If it's a "normal" log record, i.e., not
+ * a COMMIT or CHECKPOINT or something that needs immediate processing,
+ * just return. If it's a COMMIT, CHECKPOINT or LOG_REGISTER (i.e.,
+ * not SIMPLE), handle it now. If it's a NEWFILE record, then we
+ * have to be prepared to deal with a logfile change.
+ */
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = dblp->reginfo.primary;
+ cmp = log_compare(&rp->lsn, &lp->ready_lsn);
+
+ /*
+ * This is written to assume that you don't end up with a lot of
+ * records after a hole. That is, it optimizes for the case where
+ * there is only a record or two after a hole. If you have a lot
+ * of records after a hole, what you'd really want to do is write
+ * all of them and then process all the commits, checkpoints, etc.
+ * together. That is more complicated processing that we can add
+ * later if necessary.
+ *
+ * That said, I really don't want to do db operations holding the
+ * log mutex, so the synchronization here is tricky.
+ */
+ if (cmp == 0) {
+ /* We got the log record that we are expecting. */
+ if (rp->rectype == REP_NEWFILE) {
+newfile: ret = __rep_newfile(dbenv, rp, rec, &lp->ready_lsn);
+
+ /* Make this evaluate to a simple rectype. */
+ rectype = 0;
+ } else {
+ DB_ASSERT(log_compare(&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv, &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ if (ret == 0)
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_records++;
+ }
+ while (ret == 0 && IS_SIMPLE(rectype) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) {
+ /*
+ * We just filled in a gap in the log record stream.
+ * Write subsequent records to the log.
+ */
+gap_check: lp->wait_recs = 0;
+ lp->rcvd_recs = 0;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (have_mutex == 0) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 1;
+ }
+ if (dbc == NULL &&
+ (ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+
+ /* The DBTs need to persist through another call. */
+ F_SET(&control_dbt, DB_DBT_REALLOC);
+ F_SET(&rec_dbt, DB_DBT_REALLOC);
+ if ((ret = dbc->c_get(dbc,
+ &control_dbt, &rec_dbt, DB_RMW | DB_FIRST)) != 0)
+ goto err;
+
+ rp = (REP_CONTROL *)control_dbt.data;
+ rec = &rec_dbt;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ R_LOCK(dbenv, &dblp->reginfo);
+ /*
+ * We need to check again, because it's possible that
+ * some other thread of control changed the waiting_lsn
+ * or removed that record from the database.
+ */
+ if (log_compare(&lp->ready_lsn, &rp->lsn) == 0) {
+ if (rp->rectype != REP_NEWFILE) {
+ DB_ASSERT(log_compare
+ (&rp->lsn, &lp->lsn) == 0);
+ ret = __log_rep_put(dbenv,
+ &rp->lsn, rec);
+ lp->ready_lsn = lp->lsn;
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ if (ret == 0)
+ rep->stat.st_log_records++;
+ } else {
+ ret = __rep_newfile(dbenv,
+ rp, rec, &lp->ready_lsn);
+ rectype = 0;
+ }
+ waiting_lsn = lp->waiting_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+
+ /*
+ * We may miscount, as we don't hold the rep
+ * mutex.
+ */
+ --rep->stat.st_log_queued;
+
+ /*
+ * Update waiting_lsn. We need to move it
+ * forward to the LSN of the next record
+ * in the queue.
+ */
+ memset(&lsn_dbt, 0, sizeof(lsn_dbt));
+ F_SET(&lsn_dbt, DB_DBT_USERMEM);
+ lsn_dbt.data = &lsn_rc;
+ lsn_dbt.ulen = sizeof(lsn_rc);
+ memset(&lsn_rc, 0, sizeof(lsn_rc));
+
+ /*
+ * If the next item in the database is a log
+ * record--the common case--we're not
+ * interested in its contents, just in its LSN.
+ * If it's a newfile message, though, the
+ * data field may be the LSN of the last
+ * record in the old file, and we need to use
+ * that to determine whether or not there's
+ * a gap.
+ *
+ * Optimize both these cases by doing a partial
+ * get of the data item. If it's a newfile
+ * record, we'll get the whole LSN, and if
+ * it's not, we won't waste time allocating.
+ */
+ memset(&nextrec_dbt, 0, sizeof(nextrec_dbt));
+ F_SET(&nextrec_dbt,
+ DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ nextrec_dbt.ulen =
+ nextrec_dbt.dlen = sizeof(newfile_lsn);
+ ZERO_LSN(newfile_lsn);
+ nextrec_dbt.data = &newfile_lsn;
+
+ ret = dbc->c_get(dbc,
+ &lsn_dbt, &nextrec_dbt, DB_NEXT);
+ if (ret != DB_NOTFOUND && ret != 0)
+ goto err;
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (ret == DB_NOTFOUND) {
+ /*
+ * Do a quick double-check to make
+ * sure waiting_lsn hasn't changed.
+ * It's possible that between the
+ * DB_NOTFOUND return and the R_LOCK,
+ * some record was added to the
+ * database, and we don't want to lose
+ * sight of the fact that it's there.
+ */
+ if (log_compare(&waiting_lsn,
+ &lp->waiting_lsn) == 0)
+ ZERO_LSN(
+ lp->waiting_lsn);
+
+ /*
+ * Whether or not the current record is
+ * simple, there's no next one, and
+ * therefore we haven't got anything
+ * else to do right now. Break out.
+ */
+ break;
+ }
+
+ DB_ASSERT(lsn_dbt.size == sizeof(lsn_rc));
+
+ /*
+ * NEWFILE records have somewhat convoluted
+ * semantics, so there are five cases
+ * pertaining to what the newly-gotten record
+ * is and what we want to do about it.
+ *
+ * 1) This isn't a NEWFILE record. Advance
+ * waiting_lsn and proceed.
+ *
+ * 2) NEWFILE, no LSN stored as the datum,
+ * lsn_rc.lsn == ready_lsn. The NEWFILE
+ * record is next, so set waiting_lsn =
+ * ready_lsn.
+ *
+ * 3) NEWFILE, no LSN stored as the datum, but
+ * lsn_rc.lsn > ready_lsn. There's still a
+ * gap; set waiting_lsn = lsn_rc.lsn.
+ *
+ * 4) NEWFILE, newfile_lsn in datum, and it's <
+ * ready_lsn. (If the datum is non-empty,
+ * it's the LSN of the last record in a log
+ * file, not the end of the log, and
+ * lsn_rc.lsn is the LSN of the start of
+ * the new file--we didn't have the end of
+ * the old log handy when we sent the
+ * record.) No gap--we're ready to
+ * proceed. Set both waiting and ready_lsn
+ * to lsn_rc.lsn.
+ *
+ * 5) NEWFILE, newfile_lsn in datum, and it's >=
+ * ready_lsn. We're still missing at
+ * least one record; set waiting_lsn,
+ * but not ready_lsn, to lsn_rc.lsn.
+ */
+ if (lsn_rc.rectype == REP_NEWFILE &&
+ nextrec_dbt.size > 0 && log_compare(
+ &newfile_lsn, &lp->ready_lsn) < 0)
+ /* Case 4. */
+ lp->ready_lsn =
+ lp->waiting_lsn = lsn_rc.lsn;
+ else {
+ /* Cases 1, 2, 3, and 5. */
+ DB_ASSERT(log_compare(&lsn_rc.lsn,
+ &lp->ready_lsn) >= 0);
+ lp->waiting_lsn = lsn_rc.lsn;
+ }
+
+ /*
+ * If the current rectype is simple, we're
+ * done with it, and we should check and see
+ * whether the next record queued is the next
+ * one we're ready for. This is just the loop
+ * condition, so we continue.
+ *
+ * Otherwise, we need to break out of this loop
+ * and process this record first.
+ */
+ if (!IS_SIMPLE(rectype))
+ break;
+ }
+ }
+
+ /*
+ * Check if we're at a gap in the table and if so, whether we
+ * need to ask for any records.
+ */
+ do_req = 0;
+ if (!IS_ZERO_LSN(lp->waiting_lsn) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) != 0) {
+ next_lsn = lp->ready_lsn;
+ do_req = ++lp->rcvd_recs >= lp->wait_recs;
+ if (do_req) {
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+ }
+
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (dbc != NULL) {
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ have_mutex = 0;
+ }
+ dbc = NULL;
+
+ if (do_req) {
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ eid = db_rep->region->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0)) != 0)
+ goto err;
+ }
+ }
+ } else if (cmp > 0) {
+ /*
+ * The LSN is higher than the one we were waiting for.
+ * If it is a NEWFILE message, this may not mean that
+ * there's a gap; in some cases, NEWFILE messages contain
+ * the LSN of the beginning of the new file instead
+ * of the end of the old.
+ *
+ * In these cases, the rec DBT will contain the last LSN
+ * of the old file, so we can tell whether there's a gap.
+ */
+ if (rp->rectype == REP_NEWFILE &&
+ rp->lsn.file == lp->ready_lsn.file + 1 &&
+ rp->lsn.offset == 0) {
+ DB_ASSERT(rec != NULL && rec->data != NULL &&
+ rec->size == sizeof(DB_LSN));
+ memcpy(&lsn, rec->data, sizeof(DB_LSN));
+ if (log_compare(&lp->ready_lsn, &lsn) > 0)
+ /*
+ * The last LSN in the old file is smaller
+ * than the one we're expecting, so there's
+ * no gap--the one we're expecting just
+ * doesn't exist.
+ */
+ goto newfile;
+ }
+
+ /*
+ * This record isn't in sequence; add it to the table and
+ * update waiting_lsn if necessary.
+ */
+ memset(&key_dbt, 0, sizeof(key_dbt));
+ key_dbt.data = rp;
+ key_dbt.size = sizeof(*rp);
+ next_lsn = lp->lsn;
+ do_req = 0;
+ if (lp->wait_recs == 0) {
+ /*
+ * This is a new gap. Initialize the number of
+ * records that we should wait before requesting
+ * that it be resent. We grab the limits out of
+ * the rep without the mutex.
+ */
+ lp->wait_recs = rep->request_gap;
+ lp->rcvd_recs = 0;
+ }
+
+ if (++lp->rcvd_recs >= lp->wait_recs) {
+ /*
+ * If we've waited long enough, request the record
+ * and double the wait interval.
+ */
+ do_req = 1;
+ lp->wait_recs <<= 1;
+ lp->rcvd_recs = 0;
+ if (lp->wait_recs > rep->max_gap)
+ lp->wait_recs = rep->max_gap;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->put(dbp, NULL, &key_dbt, rec, 0);
+ rep->stat.st_log_queued++;
+ rep->stat.st_log_queued_total++;
+ if (rep->stat.st_log_queued_max < rep->stat.st_log_queued)
+ rep->stat.st_log_queued_max = rep->stat.st_log_queued;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (ret != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (IS_ZERO_LSN(lp->waiting_lsn) ||
+ log_compare(&rp->lsn, &lp->waiting_lsn) < 0)
+ lp->waiting_lsn = rp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ if (do_req) {
+ /* Request the LSN we are still waiting for. */
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+
+ /* May as well do this after we grab the mutex. */
+ eid = db_rep->region->master_id;
+
+ /*
+ * If the master_id is invalid, this means that since
+ * the last record was sent, somebody declared an
+ * election and we may not have a master to request
+ * things of.
+ *
+ * This is not an error; when we find a new master,
+ * we'll re-negotiate where the end of the log is and
+ * try to to bring ourselves up to date again anyway.
+ */
+ if (eid != DB_EID_INVALID) {
+ rep->stat.st_log_requested++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv,
+ eid, REP_LOG_REQ, &next_lsn, NULL, 0);
+ } else
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ }
+ return (ret);
+ } else {
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ /*
+ * We may miscount if we race, since we
+ * don't currently hold the rep mutex.
+ */
+ rep->stat.st_log_duplicated++;
+ }
+ if (ret != 0 || cmp < 0 || (cmp == 0 && IS_SIMPLE(rectype)))
+ goto done;
+
+ /*
+ * If we got here, then we've got a log record in rp and rec that
+ * we need to process.
+ */
+ switch(rectype) {
+ case DB___dbreg_register:
+ /*
+ * DB opens occur in the context of a transaction, so we can
+ * simply handle them when we process the transaction. Closes,
+ * however, are not transaction-protected, so we have to
+ * handle them here.
+ *
+ * Note that it should be unsafe for the master to do a close
+ * of a file that was opened in an active transaction, so we
+ * should be guaranteed to get the ordering right.
+ */
+ memcpy(&txnid, (u_int8_t *)rec->data +
+ ((u_int8_t *)&dbreg_args.txnid - (u_int8_t *)&dbreg_args),
+ sizeof(u_int32_t));
+ if (txnid == TXN_INVALID &&
+ !F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, rec, &rp->lsn,
+ DB_TXN_APPLY, NULL);
+ break;
+ case DB___txn_ckp:
+ /* Sync the memory pool. */
+ memcpy(&ckp_lsn, (u_int8_t *)rec->data +
+ ((u_int8_t *)&ckp_args.ckp_lsn - (u_int8_t *)&ckp_args),
+ sizeof(DB_LSN));
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = dbenv->memp_sync(dbenv, &ckp_lsn);
+ else
+ /*
+ * We ought to make sure the logs on a logs-only
+ * replica get flushed now and again.
+ */
+ ret = dbenv->log_flush(dbenv, &ckp_lsn);
+ break;
+ case DB___txn_regop:
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ do {
+ /*
+ * If an application is doing app-specific
+ * recovery and acquires locks while applying
+ * a transaction, it can deadlock. Any other
+ * locks held by this thread should have been
+ * discarded in the __rep_process_txn error
+ * path, so if we simply retry, we should
+ * eventually succeed.
+ */
+ ret = __rep_process_txn(dbenv, rec);
+ } while (ret == DB_LOCK_DEADLOCK);
+ break;
+ default:
+ goto err;
+ }
+
+ /* Check if we need to go back into the table. */
+ if (ret == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0)
+ goto gap_check;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+done:
+err: if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (have_mutex)
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+
+ if (control_dbt.data != NULL)
+ __os_ufree(dbenv, control_dbt.data);
+ if (rec_dbt.data != NULL)
+ __os_ufree(dbenv, rec_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_process_txn --
+ *
+ * This is the routine that actually gets a transaction ready for
+ * processing.
+ *
+ * PUBLIC: int __rep_process_txn __P((DB_ENV *, DBT *));
+ */
+int
+__rep_process_txn(dbenv, rec)
+ DB_ENV *dbenv;
+ DBT *rec;
+{
+ DBT data_dbt;
+ DB_LOCKREQ req, *lvp;
+ DB_LOGC *logc;
+ DB_LSN prev_lsn, *lsnp;
+ DB_REP *db_rep;
+ LSN_COLLECTION lc;
+ REP *rep;
+ __txn_regop_args *txn_args;
+ __txn_xa_regop_args *prep_args;
+ u_int32_t lockid, op, rectype;
+ int i, ret, t_ret;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ void *txninfo;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ logc = NULL;
+ txninfo = NULL;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /*
+ * There are two phases: First, we have to traverse
+ * backwards through the log records gathering the list
+ * of all LSNs in the transaction. Once we have this information,
+ * we can loop through, acquire the locks we need for each record,
+ * and then apply it.
+ */
+ dtab = NULL;
+
+ /*
+ * We may be passed a prepare (if we're restoring a prepare
+ * on upgrade) instead of a commit (the common case).
+ * Check which and behave appropriately.
+ */
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ memset(&lc, 0, sizeof(lc));
+ if (rectype == DB___txn_regop) {
+ /*
+ * We're the end of a transaction. Make sure this is
+ * really a commit and not an abort!
+ */
+ if ((ret = __txn_regop_read(dbenv, rec->data, &txn_args)) != 0)
+ return (ret);
+ op = txn_args->opcode;
+ prev_lsn = txn_args->prev_lsn;
+ __os_free(dbenv, txn_args);
+ if (op != TXN_COMMIT)
+ return (0);
+ } else {
+ /* We're a prepare. */
+ DB_ASSERT(rectype == DB___txn_xa_regop);
+
+ if ((ret =
+ __txn_xa_regop_read(dbenv, rec->data, &prep_args)) != 0)
+ return (ret);
+ prev_lsn = prep_args->prev_lsn;
+ __os_free(dbenv, prep_args);
+ }
+
+ /* Phase 1. Get a list of the LSNs in this transaction, and sort it. */
+ if ((ret = __rep_collect_txn(dbenv, &prev_lsn, &lc)) != 0)
+ return (ret);
+ qsort(lc.array, lc.nlsns, sizeof(DB_LSN), __rep_lsn_cmp);
+
+ if ((ret = dbenv->lock_id(dbenv, &lockid)) != 0)
+ goto err;
+
+ /* Initialize the getpgno dispatch table. */
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+
+ /*
+ * The set of records for a transaction may include dbreg_register
+ * records. Create a txnlist so that they can keep track of file
+ * state between records.
+ */
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+
+ /* Phase 2: Apply updates. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ for (lsnp = &lc.array[0], i = 0; i < lc.nlsns; i++, lsnp++) {
+ if ((ret = __rep_lockpages(dbenv,
+ dtab, dtabsize, lsnp, NULL, NULL, lockid)) != 0)
+ goto err;
+ if ((ret = logc->get(logc, lsnp, &data_dbt, DB_SET)) != 0)
+ goto err;
+ if ((ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &data_dbt, lsnp,
+ DB_TXN_APPLY, txninfo)) != 0)
+ goto err;
+ }
+
+err: memset(&req, 0, sizeof(req));
+ req.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = dbenv->lock_vec(dbenv, lockid,
+ DB_LOCK_FREE_LOCKER, &req, 1, &lvp)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (lc.nalloc != 0)
+ __os_free(dbenv, lc.array);
+
+ if ((t_ret =
+ dbenv->lock_id_free(dbenv, lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab);
+
+ if (ret == 0)
+ /*
+ * We don't hold the rep mutex, and could miscount if we race.
+ */
+ rep->stat.st_txns_applied++;
+
+ return (ret);
+}
+
+/*
+ * __rep_collect_txn
+ * Recursive function that will let us visit every entry in a transaction
+ * chain including all child transactions so that we can then apply
+ * the entire transaction family at once.
+ */
+static int
+__rep_collect_txn(dbenv, lsnp, lc)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ LSN_COLLECTION *lc;
+{
+ __txn_child_args *argp;
+ DB_LOGC *logc;
+ DB_LSN c_lsn;
+ DBT data;
+ u_int32_t rectype;
+ int nalloc, ret, t_ret;
+
+ memset(&data, 0, sizeof(data));
+ F_SET(&data, DB_DBT_REALLOC);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ while (!IS_ZERO_LSN(*lsnp) &&
+ (ret = logc->get(logc, lsnp, &data, DB_SET)) == 0) {
+ memcpy(&rectype, data.data, sizeof(rectype));
+ if (rectype == DB___txn_child) {
+ if ((ret = __txn_child_read(dbenv,
+ data.data, &argp)) != 0)
+ goto err;
+ c_lsn = argp->c_lsn;
+ *lsnp = argp->prev_lsn;
+ __os_free(dbenv, argp);
+ ret = __rep_collect_txn(dbenv, &c_lsn, lc);
+ } else {
+ if (lc->nalloc < lc->nlsns + 1) {
+ nalloc = lc->nalloc == 0 ? 20 : lc->nalloc * 2;
+ if ((ret = __os_realloc(dbenv,
+ nalloc * sizeof(DB_LSN), &lc->array)) != 0)
+ goto err;
+ lc->nalloc = nalloc;
+ }
+ lc->array[lc->nlsns++] = *lsnp;
+
+ /*
+ * Explicitly copy the previous lsn. The record
+ * starts with a u_int32_t record type, a u_int32_t
+ * txn id, and then the DB_LSN (prev_lsn) that we
+ * want. We copy explicitly because we have no idea
+ * what kind of record this is.
+ */
+ memcpy(lsnp, (u_int8_t *)data.data +
+ sizeof(u_int32_t) + sizeof(u_int32_t),
+ sizeof(DB_LSN));
+ }
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (data.data != NULL)
+ __os_ufree(dbenv, data.data);
+ return (ret);
+}
+
+/*
+ * __rep_lsn_cmp --
+ * qsort-type-compatible wrapper for log_compare.
+ */
+static int
+__rep_lsn_cmp(lsn1, lsn2)
+ const void *lsn1, *lsn2;
+{
+
+ return (log_compare((DB_LSN *)lsn1, (DB_LSN *)lsn2));
+}
+
+/*
+ * __rep_newfile --
+ * NEWFILE messages can contain either the last LSN of the old file
+ * or the first LSN of the new one, depending on which we have available
+ * when the message is sent. When applying a NEWFILE message, make sure
+ * we haven't already swapped files, as it's possible (given the right sequence
+ * of out-of-order messages) to wind up with a NEWFILE message of each
+ * variety, and __rep_apply won't detect the two as duplicates of each other.
+ */
+static int
+__rep_newfile(dbenv, rc, msgdbt, lsnp)
+ DB_ENV *dbenv;
+ REP_CONTROL *rc;
+ DBT *msgdbt;
+ DB_LSN *lsnp;
+{
+ DB_LOG *dblp;
+ LOG *lp;
+ u_int32_t newfile;
+
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ /*
+ * A NEWFILE message containing the old file's LSN will be
+ * accompanied by a NULL rec DBT; one containing the new one's LSN
+ * will need to supply the last record in the old file by
+ * sending it in the rec DBT.
+ */
+ if (msgdbt == NULL || msgdbt->size == 0)
+ newfile = rc->lsn.file + 1;
+ else
+ newfile = rc->lsn.file;
+
+ if (newfile > lp->lsn.file)
+ return (__log_newfile(dblp, lsnp));
+ else {
+ /* We've already applied this NEWFILE. Just ignore it. */
+ *lsnp = lp->lsn;
+ return (0);
+ }
+}
diff --git a/bdb/rep/rep_region.c b/bdb/rep/rep_region.c
new file mode 100644
index 00000000000..1ac3fb8a20c
--- /dev/null
+++ b/bdb/rep/rep_region.c
@@ -0,0 +1,187 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_region.c,v 1.29 2002/08/06 04:50:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#endif
+
+#include <string.h>
+
+#include "db_int.h"
+#include "dbinc/rep.h"
+#include "dbinc/log.h"
+
+/*
+ * __rep_region_init --
+ * Initialize the shared memory state for the replication system.
+ *
+ * PUBLIC: int __rep_region_init __P((DB_ENV *));
+ */
+int
+__rep_region_init(dbenv)
+ DB_ENV *dbenv;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ DB_MUTEX *db_mutexp;
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if (renv->rep_off == INVALID_ROFF) {
+ /* Must create the region. */
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(REP), MUTEX_ALIGN, &rep)) != 0)
+ goto err;
+ memset(rep, 0, sizeof(*rep));
+ rep->tally_off = INVALID_ROFF;
+ renv->rep_off = R_OFFSET(infop, rep);
+
+ if ((ret = __db_mutex_setup(dbenv, infop, &rep->mutex,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /*
+ * We must create a place for the db_mutex separately;
+ * mutexes have to be aligned to MUTEX_ALIGN, and the only way
+ * to guarantee that is to make sure they're at the beginning
+ * of a shalloc'ed chunk.
+ */
+ if ((ret = __db_shalloc(infop->addr, sizeof(DB_MUTEX),
+ MUTEX_ALIGN, &db_mutexp)) != 0)
+ goto err;
+ rep->db_mutex_off = R_OFFSET(infop, db_mutexp);
+
+ /*
+ * Because we have no way to prevent deadlocks and cannot log
+ * changes made to it, we single-thread access to the client
+ * bookkeeping database. This is suboptimal, but it only gets
+ * accessed when messages arrive out-of-order, so it should
+ * stay small and not be used in a high-performance app.
+ */
+ if ((ret = __db_mutex_setup(dbenv, infop, db_mutexp,
+ MUTEX_NO_RECORD)) != 0)
+ goto err;
+
+ /* We have the region; fill in the values. */
+ rep->eid = DB_EID_INVALID;
+ rep->master_id = DB_EID_INVALID;
+ rep->gen = 0;
+
+ /*
+ * Set default values for the min and max log records that we
+ * wait before requesting a missing log record.
+ */
+ rep->request_gap = DB_REP_REQUEST_GAP;
+ rep->max_gap = DB_REP_MAX_GAP;
+ } else
+ rep = R_ADDR(infop, renv->rep_off);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ db_rep->mutexp = &rep->mutex;
+ db_rep->db_mutexp = R_ADDR(infop, rep->db_mutex_off);
+ db_rep->region = rep;
+
+ return (0);
+
+err: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+/*
+ * __rep_region_destroy --
+ * Destroy any system resources allocated in the replication region.
+ *
+ * PUBLIC: int __rep_region_destroy __P((DB_ENV *));
+ */
+int
+__rep_region_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ if (db_rep->mutexp != NULL)
+ ret = __db_mutex_destroy(db_rep->mutexp);
+ if (db_rep->db_mutexp != NULL)
+ t_ret = __db_mutex_destroy(db_rep->db_mutexp);
+ }
+
+ return (ret == 0 ? t_ret : ret);
+}
+
+/*
+ * __rep_dbenv_close --
+ * Replication-specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_close __P((DB_ENV *));
+ */
+int
+__rep_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ __os_free(dbenv, db_rep);
+ dbenv->rep_handle = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_preclose --
+ * If we are a client, shut down our client database and, if we're
+ * actually closing the environment, close all databases we've opened
+ * while applying messages.
+ *
+ * PUBLIC: int __rep_preclose __P((DB_ENV *, int));
+ */
+int
+__rep_preclose(dbenv, do_closefiles)
+ DB_ENV *dbenv;
+ int do_closefiles;
+{
+ DB *dbp;
+ DB_REP *db_rep;
+ int ret, t_ret;
+
+ ret = t_ret = 0;
+
+ /* If replication is not initialized, we have nothing to do. */
+ if ((db_rep = (DB_REP *)dbenv->rep_handle) == NULL)
+ return (0);
+
+ if ((dbp = db_rep->rep_db) != NULL) {
+ MUTEX_LOCK(dbenv, db_rep->db_mutexp);
+ ret = dbp->close(dbp, 0);
+ db_rep->rep_db = NULL;
+ MUTEX_UNLOCK(dbenv, db_rep->db_mutexp);
+ }
+
+ if (do_closefiles)
+ t_ret = __dbreg_close_files(dbenv);
+
+ return (ret == 0 ? t_ret : ret);
+}
diff --git a/bdb/rep/rep_util.c b/bdb/rep/rep_util.c
new file mode 100644
index 00000000000..9c99d33ed4a
--- /dev/null
+++ b/bdb/rep/rep_util.c
@@ -0,0 +1,867 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: rep_util.c,v 1.51 2002/09/05 02:30:00 margo Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_page.h"
+#include "dbinc/btree.h"
+#include "dbinc/fop.h"
+#include "dbinc/hash.h"
+#include "dbinc/log.h"
+#include "dbinc/qam.h"
+#include "dbinc/rep.h"
+#include "dbinc/txn.h"
+
+/*
+ * rep_util.c:
+ * Miscellaneous replication-related utility functions, including
+ * those called by other subsystems.
+ */
+static int __rep_cmp_bylsn __P((const void *, const void *));
+static int __rep_cmp_bypage __P((const void *, const void *));
+
+#ifdef REP_DIAGNOSTIC
+static void __rep_print_logmsg __P((DB_ENV *, const DBT *, DB_LSN *));
+#endif
+
+/*
+ * __rep_check_alloc --
+ * Make sure the array of TXN_REC entries is of at least size n.
+ * (This function is called by the __*_getpgnos() functions in
+ * *.src.)
+ *
+ * PUBLIC: int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+ */
+int
+__rep_check_alloc(dbenv, r, n)
+ DB_ENV *dbenv;
+ TXN_RECS *r;
+ int n;
+{
+ int nalloc, ret;
+
+ while (r->nalloc < r->npages + n) {
+ nalloc = r->nalloc == 0 ? 20 : r->nalloc * 2;
+
+ if ((ret = __os_realloc(dbenv, nalloc * sizeof(LSN_PAGE),
+ &r->array)) != 0)
+ return (ret);
+
+ r->nalloc = nalloc;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_send_message --
+ * This is a wrapper for sending a message. It takes care of constructing
+ * the REP_CONTROL structure and calling the user's specified send function.
+ *
+ * PUBLIC: int __rep_send_message __P((DB_ENV *, int,
+ * PUBLIC: u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+ DB_ENV *dbenv;
+ int eid;
+ u_int32_t rtype;
+ DB_LSN *lsnp;
+ const DBT *dbtp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ DBT cdbt, scrap_dbt;
+ REP_CONTROL cntrl;
+ u_int32_t send_flags;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /* Set up control structure. */
+ memset(&cntrl, 0, sizeof(cntrl));
+ if (lsnp == NULL)
+ ZERO_LSN(cntrl.lsn);
+ else
+ cntrl.lsn = *lsnp;
+ cntrl.rectype = rtype;
+ cntrl.flags = flags;
+ cntrl.rep_version = DB_REPVERSION;
+ cntrl.log_version = DB_LOGVERSION;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ cntrl.gen = rep->gen;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ memset(&cdbt, 0, sizeof(cdbt));
+ cdbt.data = &cntrl;
+ cdbt.size = sizeof(cntrl);
+
+ /* Don't assume the send function will be tolerant of NULL records. */
+ if (dbtp == NULL) {
+ memset(&scrap_dbt, 0, sizeof(DBT));
+ dbtp = &scrap_dbt;
+ }
+
+ send_flags = (LF_ISSET(DB_PERMANENT) ? DB_REP_PERMANENT : 0);
+
+#if 0
+ __rep_print_message(dbenv, eid, &cntrl, "rep_send_message");
+#endif
+#ifdef REP_DIAGNOSTIC
+ if (rtype == REP_LOG)
+ __rep_print_logmsg(dbenv, dbtp, lsnp);
+#endif
+ ret = db_rep->rep_send(dbenv, &cdbt, dbtp, eid, send_flags);
+
+ /*
+ * We don't hold the rep lock, so this could miscount if we race.
+ * I don't think it's worth grabbing the mutex for that bit of
+ * extra accuracy.
+ */
+ if (ret == 0)
+ rep->stat.st_msgs_sent++;
+ else
+ rep->stat.st_msgs_send_failures++;
+
+ return (ret);
+}
+
+#ifdef REP_DIAGNOSTIC
+
+/*
+ * __rep_print_logmsg --
+ * This is a debugging routine for printing out log records that
+ * we are about to transmit to a client.
+ */
+
+static void
+__rep_print_logmsg(dbenv, logdbt, lsnp)
+ DB_ENV *dbenv;
+ const DBT *logdbt;
+ DB_LSN *lsnp;
+{
+ /* Static structures to hold the printing functions. */
+ static int (**ptab)__P((DB_ENV *,
+ DBT *, DB_LSN *, db_recops, void *)) = NULL;
+ size_t ptabsize = 0;
+
+ if (ptabsize == 0) {
+ /* Initialize the table. */
+ (void)__bam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__crdel_init_print(dbenv, &ptab, &ptabsize);
+ (void)__db_init_print(dbenv, &ptab, &ptabsize);
+ (void)__dbreg_init_print(dbenv, &ptab, &ptabsize);
+ (void)__fop_init_print(dbenv, &ptab, &ptabsize);
+ (void)__qam_init_print(dbenv, &ptab, &ptabsize);
+ (void)__ham_init_print(dbenv, &ptab, &ptabsize);
+ (void)__txn_init_print(dbenv, &ptab, &ptabsize);
+ }
+
+ (void)__db_dispatch(dbenv,
+ ptab, ptabsize, (DBT *)logdbt, lsnp, DB_TXN_PRINT, NULL);
+}
+
+#endif
+/*
+ * __rep_new_master --
+ * Called after a master election to sync back up with a new master.
+ * It's possible that we already know of this new master in which case
+ * we don't need to do anything.
+ *
+ * This is written assuming that this message came from the master; we
+ * need to enforce that in __rep_process_record, but right now, we have
+ * no way to identify the master.
+ *
+ * PUBLIC: int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_new_master(dbenv, cntrl, eid)
+ DB_ENV *dbenv;
+ REP_CONTROL *cntrl;
+ int eid;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN last_lsn, lsn;
+ DB_REP *db_rep;
+ DBT dbt;
+ LOG *lp;
+ REP *rep;
+ int change, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ELECTION_DONE(rep);
+ change = rep->gen != cntrl->gen || rep->master_id != eid;
+ if (change) {
+ rep->gen = cntrl->gen;
+ rep->master_id = eid;
+ F_SET(rep, REP_F_RECOVER);
+ rep->stat.st_master_changes++;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (!change)
+ return (0);
+
+ /*
+ * If the master changed, we need to start the process of
+ * figuring out what our last valid log record is. However,
+ * if both the master and we agree that the max LSN is 0,0,
+ * then there is no recovery to be done. If we are at 0 and
+ * the master is not, then we just need to request all the log
+ * records from the master.
+ */
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ last_lsn = lsn = lp->lsn;
+ if (last_lsn.offset > sizeof(LOGP))
+ last_lsn.offset -= lp->len;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (IS_INIT_LSN(lsn) || IS_ZERO_LSN(lsn)) {
+empty: MUTEX_LOCK(dbenv, db_rep->mutexp);
+ F_CLR(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (IS_INIT_LSN(cntrl->lsn))
+ ret = 0;
+ else
+ ret = __rep_send_message(dbenv, rep->master_id,
+ REP_ALL_REQ, &lsn, NULL, 0);
+
+ if (ret == 0)
+ ret = DB_REP_NEWMASTER;
+ return (ret);
+ } else if (last_lsn.offset <= sizeof(LOGP)) {
+ /*
+ * We have just changed log files and need to set lastlsn
+ * to the last record in the previous log files.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(dbt));
+ ret = logc->get(logc, &last_lsn, &dbt, DB_LAST);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret == DB_NOTFOUND)
+ goto empty;
+ if (ret != 0)
+ return (ret);
+ }
+
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp->verify_lsn = last_lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_VERIFY_REQ, &last_lsn, NULL, 0)) != 0)
+ return (ret);
+
+ return (DB_REP_NEWMASTER);
+}
+
+/*
+ * __rep_lockpgno_init
+ * Create a dispatch table for acquiring locks on each log record.
+ *
+ * PUBLIC: int __rep_lockpgno_init __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__rep_lockpgno_init(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ /* Initialize dispatch table. */
+ *dtabsizep = 0;
+ *dtabp = NULL;
+ if ((ret = __bam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __crdel_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __db_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __dbreg_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __fop_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __qam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __ham_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __txn_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __rep_unlockpages --
+ * Unlock the pages locked in __rep_lockpages.
+ *
+ * PUBLIC: int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+ */
+int
+__rep_unlockpages(dbenv, lid)
+ DB_ENV *dbenv;
+ u_int32_t lid;
+{
+ DB_LOCKREQ req, *lvp;
+
+ req.op = DB_LOCK_PUT_ALL;
+ return (dbenv->lock_vec(dbenv, lid, 0, &req, 1, &lvp));
+}
+
+/*
+ * __rep_lockpages --
+ * Called to gather and lock pages in preparation for both
+ * single transaction apply as well as client synchronization
+ * with a new master. A non-NULL key_lsn means that we're locking
+ * in order to apply a single log record during client recovery
+ * to the joint LSN. A non-NULL max_lsn means that we are applying
+ * a transaction whose commit is at max_lsn.
+ *
+ * PUBLIC: int __rep_lockpages __P((DB_ENV *,
+ * PUBLIC: int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t, DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+ */
+int
+__rep_lockpages(dbenv, dtab, dtabsize, key_lsn, max_lsn, recs, lid)
+ DB_ENV *dbenv;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DB_LSN *key_lsn, *max_lsn;
+ TXN_RECS *recs;
+ u_int32_t lid;
+{
+ DBT data_dbt, lo;
+ DB_LOCK l;
+ DB_LOCKREQ *lvp;
+ DB_LOGC *logc;
+ DB_LSN tmp_lsn;
+ TXN_RECS tmp, *t;
+ db_pgno_t cur_pgno;
+ linfo_t locks;
+ int i, ret, t_ret, unique;
+ u_int32_t cur_fid;
+
+ /*
+ * There are two phases: First, we have to traverse backwards through
+ * the log records gathering the list of all the pages accessed. Once
+ * we have this information we can acquire all the locks we need.
+ */
+
+ /* Initialization */
+ memset(&locks, 0, sizeof(locks));
+ ret = 0;
+
+ t = recs != NULL ? recs : &tmp;
+ t->npages = t->nalloc = 0;
+ t->array = NULL;
+
+ /*
+ * We've got to be in one mode or the other; else life will either
+ * be excessively boring or overly exciting.
+ */
+ DB_ASSERT(key_lsn != NULL || max_lsn != NULL);
+ DB_ASSERT(key_lsn == NULL || max_lsn == NULL);
+
+ /*
+ * Phase 1: Fill in the pgno array.
+ */
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /* Single transaction apply. */
+ if (max_lsn != NULL) {
+ DB_ASSERT(0); /* XXX */
+ /*
+ tmp_lsn = *max_lsn;
+ if ((ret = __rep_apply_thread(dbenv, dtab, dtabsize,
+ &data_dbt, &tmp_lsn, t)) != 0)
+ goto err;
+ */
+ }
+
+ /* In recovery. */
+ if (key_lsn != NULL) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ ret = logc->get(logc, key_lsn, &data_dbt, DB_SET);
+
+ /* Save lsn values, since dispatch functions can change them. */
+ tmp_lsn = *key_lsn;
+ ret = __db_dispatch(dbenv,
+ dtab, dtabsize, &data_dbt, &tmp_lsn, DB_TXN_GETPGNOS, t);
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /*
+ * If ret == DB_DELETED, this record refers to a temporary
+ * file and there's nothing to apply.
+ */
+ if (ret == DB_DELETED) {
+ ret = 0;
+ goto out;
+ } else if (ret != 0)
+ goto err;
+ }
+
+ if (t->npages == 0)
+ goto out;
+
+ /* Phase 2: Write lock all the pages. */
+
+ /* Sort the entries in the array by page number. */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bypage);
+
+ /* Count the number of unique pages. */
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ unique = 0;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ unique++;
+ }
+ }
+
+ if (unique == 0)
+ goto out;
+
+ /* Handle single lock case specially, else allocate space for locks. */
+ if (unique == 1) {
+ memset(&lo, 0, sizeof(lo));
+ lo.data = &t->array[0].pgdesc;
+ lo.size = sizeof(t->array[0].pgdesc);
+ ret = dbenv->lock_get(dbenv, lid, 0, &lo, DB_LOCK_WRITE, &l);
+ goto out2;
+ }
+
+ /* Multi-lock case. */
+ locks.n = unique;
+ if ((ret = __os_calloc(dbenv,
+ unique, sizeof(DB_LOCKREQ), &locks.reqs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, unique, sizeof(DBT), &locks.objs)) != 0)
+ goto err;
+
+ unique = 0;
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ locks.reqs[unique].op = DB_LOCK_GET;
+ locks.reqs[unique].mode = DB_LOCK_WRITE;
+ locks.reqs[unique].obj = &locks.objs[unique];
+ locks.objs[unique].data = &t->array[i].pgdesc;
+ locks.objs[unique].size = sizeof(t->array[i].pgdesc);
+ unique++;
+ }
+ }
+
+ /* Finally, get the locks. */
+ if ((ret =
+ dbenv->lock_vec(dbenv, lid, 0, locks.reqs, unique, &lvp)) != 0) {
+ /*
+ * If we were unsuccessful, unlock any locks we acquired before
+ * the error and return the original error value.
+ */
+ (void)__rep_unlockpages(dbenv, lid);
+ }
+
+err:
+out: if (locks.objs != NULL)
+ __os_free(dbenv, locks.objs);
+ if (locks.reqs != NULL)
+ __os_free(dbenv, locks.reqs);
+
+ /*
+ * Before we return, sort by LSN so that we apply records in the
+ * right order.
+ */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bylsn);
+
+out2: if ((ret != 0 || recs == NULL) && t->nalloc != 0) {
+ __os_free(dbenv, t->array);
+ t->array = NULL;
+ t->npages = t->nalloc = 0;
+ }
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_ufree(dbenv, data_dbt.data);
+
+ return (ret);
+}
+
+/*
+ * __rep_cmp_bypage and __rep_cmp_bylsn --
+ * Sort functions for qsort. "bypage" sorts first by page numbers and
+ * then by the LSN. "bylsn" sorts first by the LSN, then by page numbers.
+ */
+static int
+__rep_cmp_bypage(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ return (0);
+}
+
+static int
+__rep_cmp_bylsn(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * __rep_is_client
+ * Used by other subsystems to figure out if this is a replication
+ * client sites.
+ *
+ * PUBLIC: int __rep_is_client __P((DB_ENV *));
+ */
+int
+__rep_is_client(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ if ((db_rep = dbenv->rep_handle) == NULL)
+ return (0);
+ rep = db_rep->region;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp);
+ ret = F_ISSET(rep, REP_F_UPGRADE | REP_F_LOGSONLY);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_send_vote
+ * Send this site's vote for the election.
+ *
+ * PUBLIC: int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int, int));
+ */
+int
+__rep_send_vote(dbenv, lsnp, nsites, pri, tiebreaker)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, pri, tiebreaker;
+{
+ DBT vote_dbt;
+ REP_VOTE_INFO vi;
+
+ memset(&vi, 0, sizeof(vi));
+
+ vi.priority = pri;
+ vi.nsites = nsites;
+ vi.tiebreaker = tiebreaker;
+
+ memset(&vote_dbt, 0, sizeof(vote_dbt));
+ vote_dbt.data = &vi;
+ vote_dbt.size = sizeof(vi);
+
+ return (__rep_send_message(dbenv,
+ DB_EID_BROADCAST, REP_VOTE1, lsnp, &vote_dbt, 0));
+}
+
+/*
+ * __rep_grow_sites --
+ * Called to allocate more space in the election tally information.
+ * Called with the rep mutex held. We need to call the region mutex, so
+ * we need to make sure that we *never* acquire those mutexes in the
+ * opposite order.
+ *
+ * PUBLIC: int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+ */
+int
+__rep_grow_sites(dbenv, nsites)
+ DB_ENV *dbenv;
+ int nsites;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REP *rep;
+ int nalloc, ret, *tally;
+
+ rep = ((DB_REP *)dbenv->rep_handle)->region;
+
+ /*
+ * Allocate either twice the current allocation or nsites,
+ * whichever is more.
+ */
+
+ nalloc = 2 * rep->asites;
+ if (nalloc < nsites)
+ nalloc = nsites;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ MUTEX_LOCK(dbenv, &renv->mutex);
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(nalloc * sizeof(int)), sizeof(int), &tally)) == 0) {
+ if (rep->tally_off != INVALID_ROFF)
+ __db_shalloc_free(infop->addr,
+ R_ADDR(infop, rep->tally_off));
+ rep->asites = nalloc;
+ rep->nsites = nsites;
+ rep->tally_off = R_OFFSET(infop, tally);
+ }
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+#ifdef NOTYET
+static int __rep_send_file __P((DB_ENV *, DBT *, u_int32_t));
+/*
+ * __rep_send_file --
+ * Send an entire file, one block at a time.
+ */
+static int
+__rep_send_file(dbenv, rec, eid)
+ DB_ENV *dbenv;
+ DBT *rec;
+ u_int32_t eid;
+{
+ DB *dbp;
+ DB_LOCK lk;
+ DB_MPOOLFILE *mpf;
+ DBC *dbc;
+ DBT rec_dbt;
+ PAGE *pagep;
+ db_pgno_t last_pgno, pgno;
+ int ret, t_ret;
+
+ dbp = NULL;
+ dbc = NULL;
+ pagep = NULL;
+ mpf = NULL;
+ LOCK_INIT(lk);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp, rec->data, NULL, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * Force last_pgno to some value that will let us read the meta-dat
+ * page in the following loop.
+ */
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+ last_pgno = 1;
+ for (pgno = 0; pgno <= last_pgno; pgno++) {
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lk)) != 0)
+ goto err;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ goto err;
+
+ if (pgno == 0)
+ last_pgno = ((DBMETA *)pagep)->last_pgno;
+
+ rec_dbt.data = pagep;
+ rec_dbt.size = dbp->pgsize;
+ if ((ret = __rep_send_message(dbenv, eid,
+ REP_FILE, NULL, &rec_dbt, pgno == last_pgno)) != 0)
+ goto err;
+ ret = mpf->put(mpf, pagep, 0);
+ pagep = NULL;
+ if (ret != 0)
+ goto err;
+ ret = __LPUT(dbc, lk);
+ LOCK_INIT(lk);
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (LOCK_ISSET(lk) && (t_ret = __LPUT(dbc, lk)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagep != NULL && (t_ret = mpf->put(mpf, pagep, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+#endif
+
+#if 0
+/*
+ * PUBLIC: void __rep_print_message __P((DB_ENV *, int, REP_CONTROL *, char *));
+ */
+void
+__rep_print_message(dbenv, eid, rp, str)
+ DB_ENV *dbenv;
+ int eid;
+ REP_CONTROL *rp;
+ char *str;
+{
+ char *type;
+ switch (rp->rectype) {
+ case REP_ALIVE:
+ type = "alive";
+ break;
+ case REP_ALIVE_REQ:
+ type = "alive_req";
+ break;
+ case REP_ALL_REQ:
+ type = "all_req";
+ break;
+ case REP_ELECT:
+ type = "elect";
+ break;
+ case REP_FILE:
+ type = "file";
+ break;
+ case REP_FILE_REQ:
+ type = "file_req";
+ break;
+ case REP_LOG:
+ type = "log";
+ break;
+ case REP_LOG_MORE:
+ type = "log_more";
+ break;
+ case REP_LOG_REQ:
+ type = "log_req";
+ break;
+ case REP_MASTER_REQ:
+ type = "master_req";
+ break;
+ case REP_NEWCLIENT:
+ type = "newclient";
+ break;
+ case REP_NEWFILE:
+ type = "newfile";
+ break;
+ case REP_NEWMASTER:
+ type = "newmaster";
+ break;
+ case REP_NEWSITE:
+ type = "newsite";
+ break;
+ case REP_PAGE:
+ type = "page";
+ break;
+ case REP_PAGE_REQ:
+ type = "page_req";
+ break;
+ case REP_PLIST:
+ type = "plist";
+ break;
+ case REP_PLIST_REQ:
+ type = "plist_req";
+ break;
+ case REP_VERIFY:
+ type = "verify";
+ break;
+ case REP_VERIFY_FAIL:
+ type = "verify_fail";
+ break;
+ case REP_VERIFY_REQ:
+ type = "verify_req";
+ break;
+ case REP_VOTE1:
+ type = "vote1";
+ break;
+ case REP_VOTE2:
+ type = "vote2";
+ break;
+ default:
+ type = "NOTYPE";
+ break;
+ }
+ printf("%s %s: gen = %d eid %d, type %s, LSN [%u][%u]\n",
+ dbenv->db_home, str, rp->gen, eid, type, rp->lsn.file,
+ rp->lsn.offset);
+}
+#endif
diff --git a/bdb/rpc_client/client.c b/bdb/rpc_client/client.c
index 70744f54b4c..b6367e21449 100644
--- a/bdb/rpc_client/client.c
+++ b/bdb/rpc_client/client.c
@@ -1,20 +1,23 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: client.c,v 1.21 2000/11/30 00:58:44 ubell Exp $";
+static const char revid[] = "$Id: client.c,v 1.51 2002/08/06 06:18:15 bostic Exp $";
#endif /* not lint */
#ifdef HAVE_RPC
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#ifdef HAVE_VXWORKS
+#include <rpcLib.h>
+#endif
#include <rpc/rpc.h>
#include <ctype.h>
@@ -22,71 +25,124 @@ static const char revid[] = "$Id: client.c,v 1.21 2000/11/30 00:58:44 ubell Exp
#include <string.h>
#include <unistd.h>
#endif
-#include "db_server.h"
#include "db_int.h"
-#include "txn.h"
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
+
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+
+static int __dbcl_c_destroy __P((DBC *));
+static int __dbcl_txn_close __P((DB_ENV *));
/*
- * __dbclenv_server --
+ * __dbcl_envrpcserver --
* Initialize an environment's server.
*
- * PUBLIC: int __dbcl_envserver __P((DB_ENV *, char *, long, long, u_int32_t));
+ * PUBLIC: int __dbcl_envrpcserver
+ * PUBLIC: __P((DB_ENV *, void *, const char *, long, long, u_int32_t));
*/
int
-__dbcl_envserver(dbenv, host, tsec, ssec, flags)
+__dbcl_envrpcserver(dbenv, clnt, host, tsec, ssec, flags)
DB_ENV *dbenv;
- char *host;
+ void *clnt;
+ const char *host;
long tsec, ssec;
u_int32_t flags;
{
CLIENT *cl;
- __env_create_msg req;
- __env_create_reply *replyp;
struct timeval tp;
- int ret;
COMPQUIET(flags, 0);
#ifdef HAVE_VXWORKS
- if ((ret = rpcTaskInit()) != 0) {
+ if (rpcTaskInit() != 0) {
__db_err(dbenv, "Could not initialize VxWorks RPC");
return (ERROR);
}
#endif
- if ((cl =
- clnt_create(host, DB_SERVERPROG, DB_SERVERVERS, "tcp")) == NULL) {
- __db_err(dbenv, clnt_spcreateerror(host));
- return (DB_NOSERVER);
+ if (RPC_ON(dbenv)) {
+ __db_err(dbenv, "Already set an RPC handle");
+ return (EINVAL);
}
- dbenv->cl_handle = cl;
-
- if (tsec != 0) {
- tp.tv_sec = tsec;
- tp.tv_usec = 0;
- (void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
- }
-
- req.timeout = ssec;
/*
- * CALL THE SERVER
+ * Only create the client and set its timeout if the user
+ * did not pass us a client structure to begin with.
*/
- if ((replyp = __db_env_create_1(&req, cl)) == NULL) {
- __db_err(dbenv, clnt_sperror(cl, "Berkeley DB"));
- return (DB_NOSERVER);
+ if (clnt == NULL) {
+ if ((cl = clnt_create((char *)host, DB_RPC_SERVERPROG,
+ DB_RPC_SERVERVERS, "tcp")) == NULL) {
+ __db_err(dbenv, clnt_spcreateerror((char *)host));
+ return (DB_NOSERVER);
+ }
+ if (tsec != 0) {
+ tp.tv_sec = tsec;
+ tp.tv_usec = 0;
+ (void)clnt_control(cl, CLSET_TIMEOUT, (char *)&tp);
+ }
+ } else {
+ cl = (CLIENT *)clnt;
+ F_SET(dbenv, DB_ENV_RPCCLIENT_GIVEN);
}
+ dbenv->cl_handle = cl;
- /*
- * Process reply and free up our space from request
- * SUCCESS: Store ID from server.
- */
- if ((ret = replyp->status) != 0)
+ return (__dbcl_env_create(dbenv, ssec));
+}
+
+/*
+ * __dbcl_env_open_wrap --
+ * Wrapper function for DB_ENV->open function for clients.
+ * We need a wrapper function to deal with DB_USE_ENVIRON* flags
+ * and we don't want to complicate the generated code for env_open.
+ *
+ * PUBLIC: int __dbcl_env_open_wrap
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, int));
+ */
+int
+__dbcl_env_open_wrap(dbenv, home, flags, mode)
+ DB_ENV * dbenv;
+ const char * home;
+ u_int32_t flags;
+ int mode;
+{
+ int ret;
+
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ if ((ret = __db_home(dbenv, home, flags)) != 0)
return (ret);
+ return (__dbcl_env_open(dbenv, dbenv->db_home, flags, mode));
+}
- dbenv->cl_id = replyp->envcl_id;
- return (0);
+/*
+ * __dbcl_db_open_wrap --
+ * Wrapper function for DB->open function for clients.
+ * We need a wrapper function to error on DB_THREAD flag.
+ * and we don't want to complicate the generated code.
+ *
+ * PUBLIC: int __dbcl_db_open_wrap
+ * PUBLIC: __P((DB *, DB_TXN *, const char *, const char *,
+ * PUBLIC: DBTYPE, u_int32_t, int));
+ */
+int
+__dbcl_db_open_wrap(dbp, txnp, name, subdb, type, flags, mode)
+ DB * dbp;
+ DB_TXN * txnp;
+ const char * name;
+ const char * subdb;
+ DBTYPE type;
+ u_int32_t flags;
+ int mode;
+{
+ if (LF_ISSET(DB_THREAD)) {
+ __db_err(dbp->dbenv, "DB_THREAD not allowed on RPC clients");
+ return (EINVAL);
+ }
+ return (__dbcl_db_open(dbp, txnp, name, subdb, type, flags, mode));
}
/*
@@ -114,17 +170,50 @@ __dbcl_refresh(dbenv)
ret = __dbcl_txn_close(dbenv);
dbenv->tx_handle = NULL;
}
- if (cl != NULL)
+ if (!F_ISSET(dbenv, DB_ENV_RPCCLIENT_GIVEN) && cl != NULL)
clnt_destroy(cl);
dbenv->cl_handle = NULL;
+ if (dbenv->db_home != NULL) {
+ __os_free(dbenv, dbenv->db_home);
+ dbenv->db_home = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * __dbcl_retcopy --
+ * Copy the returned data into the user's DBT, handling allocation flags,
+ * but not DB_DBT_PARTIAL.
+ *
+ * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *,
+ * PUBLIC: void *, u_int32_t, void **, u_int32_t *));
+ */
+int
+__dbcl_retcopy(dbenv, dbt, data, len, memp, memsize)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ void *data;
+ u_int32_t len;
+ void **memp;
+ u_int32_t *memsize;
+{
+ int ret;
+ u_int32_t orig_flags;
+
+ /*
+ * The RPC server handles DB_DBT_PARTIAL, so we mask it out here to
+ * avoid the handling of partials in __db_retcopy.
+ */
+ orig_flags = dbt->flags;
+ F_CLR(dbt, DB_DBT_PARTIAL);
+ ret = __db_retcopy(dbenv, dbt, data, len, memp, memsize);
+ dbt->flags = orig_flags;
return (ret);
}
/*
* __dbcl_txn_close --
* Clean up an environment's transactions.
- *
- * PUBLIC: int __dbcl_txn_close __P((DB_ENV *));
*/
int
__dbcl_txn_close(dbenv)
@@ -147,7 +236,7 @@ __dbcl_txn_close(dbenv)
while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL)
__dbcl_txn_end(txnp);
- __os_free(tmgrp, sizeof(*tmgrp));
+ __os_free(dbenv, tmgrp);
return (ret);
}
@@ -187,18 +276,57 @@ __dbcl_txn_end(txnp)
if (txnp->parent != NULL)
TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
- __os_free(txnp, sizeof(*txnp));
+ __os_free(dbenv, txnp);
+}
- return;
+/*
+ * __dbcl_txn_setup --
+ * Setup a client transaction structure.
+ *
+ * PUBLIC: void __dbcl_txn_setup __P((DB_ENV *, DB_TXN *, DB_TXN *, u_int32_t));
+ */
+void
+__dbcl_txn_setup(dbenv, txn, parent, id)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_TXN *parent;
+ u_int32_t id;
+{
+ txn->mgrp = dbenv->tx_handle;
+ txn->parent = parent;
+ txn->txnid = id;
+
+ /*
+ * XXX
+ * In DB library the txn_chain is protected by the mgrp->mutexp.
+ * However, that mutex is implemented in the environments shared
+ * memory region. The client library does not support all of the
+ * region - that just get forwarded to the server. Therefore,
+ * the chain is unprotected here, but properly protected on the
+ * server.
+ */
+ TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
+
+ TAILQ_INIT(&txn->kids);
+
+ if (parent != NULL)
+ TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+
+ txn->abort = __dbcl_txn_abort;
+ txn->commit = __dbcl_txn_commit;
+ txn->discard = __dbcl_txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __dbcl_txn_prepare;
+ txn->set_timeout = __dbcl_txn_timeout;
+
+ txn->flags = TXN_MALLOC;
}
/*
* __dbcl_c_destroy --
* Destroy a cursor.
- *
- * PUBLIC: int __dbcl_c_destroy __P((DBC *));
*/
-int
+static int
__dbcl_c_destroy(dbc)
DBC *dbc;
{
@@ -207,7 +335,14 @@ __dbcl_c_destroy(dbc)
dbp = dbc->dbp;
TAILQ_REMOVE(&dbp->free_queue, dbc, links);
- __os_free(dbc, sizeof(*dbc));
+ /* Discard any memory used to store returned data. */
+ if (dbc->my_rskey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rskey.data);
+ if (dbc->my_rkey.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rkey.data);
+ if (dbc->my_rdata.data != NULL)
+ __os_free(dbc->dbp->dbenv, dbc->my_rdata.data);
+ __os_free(NULL, dbc);
return (0);
}
@@ -219,24 +354,23 @@ __dbcl_c_destroy(dbc)
* PUBLIC: void __dbcl_c_refresh __P((DBC *));
*/
void
-__dbcl_c_refresh(dbcp)
- DBC *dbcp;
+__dbcl_c_refresh(dbc)
+ DBC *dbc;
{
DB *dbp;
- dbp = dbcp->dbp;
- dbcp->flags = 0;
- dbcp->cl_id = 0;
+ dbp = dbc->dbp;
+ dbc->flags = 0;
+ dbc->cl_id = 0;
/*
* If dbp->cursor fails locally, we use a local dbc so that
* we can close it. In that case, dbp will be NULL.
*/
if (dbp != NULL) {
- TAILQ_REMOVE(&dbp->active_queue, dbcp, links);
- TAILQ_INSERT_TAIL(&dbp->free_queue, dbcp, links);
+ TAILQ_REMOVE(&dbp->active_queue, dbc, links);
+ TAILQ_INSERT_TAIL(&dbp->free_queue, dbc, links);
}
- return;
}
/*
@@ -246,13 +380,13 @@ __dbcl_c_refresh(dbcp)
* PUBLIC: int __dbcl_c_setup __P((long, DB *, DBC **));
*/
int
-__dbcl_c_setup(cl_id, dbp, dbcpp)
+__dbcl_c_setup(cl_id, dbp, dbcp)
long cl_id;
DB *dbp;
- DBC **dbcpp;
+ DBC **dbcp;
{
DBC *dbc, tmpdbc;
- int ret, t_ret;
+ int ret;
if ((dbc = TAILQ_FIRST(&dbp->free_queue)) != NULL)
TAILQ_REMOVE(&dbp->free_queue, dbc, links);
@@ -260,12 +394,12 @@ __dbcl_c_setup(cl_id, dbp, dbcpp)
if ((ret =
__os_calloc(dbp->dbenv, 1, sizeof(DBC), &dbc)) != 0) {
/*
- * If we die here, set up a tmp dbc to call the
- * server to shut down that cursor.
- */
+ * If we die here, set up a tmp dbc to call the
+ * server to shut down that cursor.
+ */
tmpdbc.dbp = NULL;
tmpdbc.cl_id = cl_id;
- t_ret = __dbcl_dbc_close(&tmpdbc);
+ (void)__dbcl_dbc_close(&tmpdbc);
return (ret);
}
dbc->c_close = __dbcl_dbc_close;
@@ -273,62 +407,14 @@ __dbcl_c_setup(cl_id, dbp, dbcpp)
dbc->c_del = __dbcl_dbc_del;
dbc->c_dup = __dbcl_dbc_dup;
dbc->c_get = __dbcl_dbc_get;
+ dbc->c_pget = __dbcl_dbc_pget;
dbc->c_put = __dbcl_dbc_put;
dbc->c_am_destroy = __dbcl_c_destroy;
}
dbc->cl_id = cl_id;
dbc->dbp = dbp;
TAILQ_INSERT_TAIL(&dbp->active_queue, dbc, links);
- *dbcpp = dbc;
- return (0);
-}
-
-/*
- * __dbcl_retcopy --
- * Copy the returned data into the user's DBT, handling special flags
- * as they apply to a client. Modeled after __db_retcopy().
- *
- * PUBLIC: int __dbcl_retcopy __P((DB_ENV *, DBT *, void *, u_int32_t));
- */
-int
-__dbcl_retcopy(dbenv, dbt, data, len)
- DB_ENV *dbenv;
- DBT *dbt;
- void *data;
- u_int32_t len;
-{
- int ret;
-
- /*
- * No need to handle DB_DBT_PARTIAL here, server already did.
- */
- dbt->size = len;
-
- /*
- * Allocate memory to be owned by the application: DB_DBT_MALLOC
- * and DB_DBT_REALLOC. Always allocate even if we're copying 0 bytes.
- * Or use memory specified by application: DB_DBT_USERMEM.
- */
- if (F_ISSET(dbt, DB_DBT_MALLOC)) {
- if ((ret = __os_malloc(dbenv, len, NULL, &dbt->data)) != 0)
- return (ret);
- } else if (F_ISSET(dbt, DB_DBT_REALLOC)) {
- if ((ret = __os_realloc(dbenv, len, NULL, &dbt->data)) != 0)
- return (ret);
- } else if (F_ISSET(dbt, DB_DBT_USERMEM)) {
- if (len != 0 && (dbt->data == NULL || dbt->ulen < len))
- return (ENOMEM);
- } else {
- /*
- * If no user flags, then set the DBT to point to the
- * returned data pointer and return.
- */
- dbt->data = data;
- return (0);
- }
-
- if (len != 0)
- memcpy(dbt->data, data, len);
+ *dbcp = dbc;
return (0);
}
@@ -363,9 +449,16 @@ __dbcl_dbclose_common(dbp)
TAILQ_INIT(&dbp->free_queue);
TAILQ_INIT(&dbp->active_queue);
+ /* Discard any memory used to store returned data. */
+ if (dbp->my_rskey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rskey.data);
+ if (dbp->my_rkey.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rkey.data);
+ if (dbp->my_rdata.data != NULL)
+ __os_free(dbp->dbenv, dbp->my_rdata.data);
memset(dbp, CLEAR_BYTE, sizeof(*dbp));
- __os_free(dbp, sizeof(*dbp));
+ __os_free(NULL, dbp);
return (ret);
}
#endif /* HAVE_RPC */
diff --git a/bdb/rpc_client/gen_client_ret.c b/bdb/rpc_client/gen_client_ret.c
index 17e3f195fc3..f35589738cd 100644
--- a/bdb/rpc_client/gen_client_ret.c
+++ b/bdb/rpc_client/gen_client_ret.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: gen_client_ret.c,v 1.29 2000/12/31 19:26:23 bostic Exp $";
+static const char revid[] = "$Id: gen_client_ret.c,v 1.57 2002/08/06 06:18:37 bostic Exp $";
#endif /* not lint */
#ifdef HAVE_RPC
@@ -19,17 +19,19 @@ static const char revid[] = "$Id: gen_client_ret.c,v 1.29 2000/12/31 19:26:23 bo
#include <string.h>
#endif
-#include "db_server.h"
#include "db_int.h"
-#include "db_page.h"
-#include "txn.h"
-#include "db_ext.h"
-#include "rpc_client_ext.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/txn.h"
-static void __db_db_stat_statsfree __P((u_int32_t *));
-static int __db_db_stat_statslist __P((__db_stat_statsreplist *, u_int32_t **));
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+/*
+ * PUBLIC: int __dbcl_env_close_ret
+ * PUBLIC: __P((DB_ENV *, u_int32_t, __env_close_reply *));
+ */
int
__dbcl_env_close_ret(dbenv, flags, replyp)
DB_ENV *dbenv;
@@ -41,13 +43,36 @@ __dbcl_env_close_ret(dbenv, flags, replyp)
COMPQUIET(flags, 0);
ret = __dbcl_refresh(dbenv);
- __os_free(dbenv, sizeof(*dbenv));
+ __os_free(NULL, dbenv);
if (replyp->status == 0 && ret != 0)
return (ret);
else
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_env_create_ret
+ * PUBLIC: __P((DB_ENV *, long, __env_create_reply *));
+ */
+int
+__dbcl_env_create_ret(dbenv, timeout, replyp)
+ DB_ENV * dbenv;
+ long timeout;
+ __env_create_reply *replyp;
+{
+
+ COMPQUIET(timeout, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbenv->cl_id = replyp->envcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_env_open_ret __P((DB_ENV *,
+ * PUBLIC: const char *, u_int32_t, int, __env_open_reply *));
+ */
int
__dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
DB_ENV *dbenv;
@@ -68,6 +93,7 @@ __dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
if (replyp->status != 0)
return (replyp->status);
+ dbenv->cl_id = replyp->envcl_id;
/*
* If the user requested transactions, then we have some
* local client-side setup to do also.
@@ -84,6 +110,10 @@ __dbcl_env_open_ret(dbenv, home, flags, mode, replyp)
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_env_remove_ret
+ * PUBLIC: __P((DB_ENV *, const char *, u_int32_t, __env_remove_reply *));
+ */
int
__dbcl_env_remove_ret(dbenv, home, flags, replyp)
DB_ENV *dbenv;
@@ -97,13 +127,16 @@ __dbcl_env_remove_ret(dbenv, home, flags, replyp)
COMPQUIET(flags, 0);
ret = __dbcl_refresh(dbenv);
- __os_free(dbenv, sizeof(*dbenv));
+ __os_free(NULL, dbenv);
if (replyp->status == 0 && ret != 0)
return (ret);
else
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_txn_abort_ret __P((DB_TXN *, __txn_abort_reply *));
+ */
int
__dbcl_txn_abort_ret(txnp, replyp)
DB_TXN *txnp;
@@ -113,6 +146,10 @@ __dbcl_txn_abort_ret(txnp, replyp)
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_txn_begin_ret __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB_TXN **, u_int32_t, __txn_begin_reply *));
+ */
int
__dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
DB_ENV *envp;
@@ -130,29 +167,15 @@ __dbcl_txn_begin_ret(envp, parent, txnpp, flags, replyp)
if ((ret = __os_calloc(envp, 1, sizeof(DB_TXN), &txn)) != 0)
return (ret);
- txn->txnid = replyp->txnidcl_id;
- txn->mgrp = envp->tx_handle;
- txn->parent = parent;
- TAILQ_INIT(&txn->kids);
- txn->flags = TXN_MALLOC;
- if (parent != NULL)
- TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
-
- /*
- * XXX
- * In DB library the txn_chain is protected by the mgrp->mutexp.
- * However, that mutex is implemented in the environments shared
- * memory region. The client library does not support all of the
- * region - that just get forwarded to the server. Therefore,
- * the chain is unprotected here, but properly protected on the
- * server.
- */
- TAILQ_INSERT_TAIL(&txn->mgrp->txn_chain, txn, links);
-
+ __dbcl_txn_setup(envp, txn, parent, replyp->txnidcl_id);
*txnpp = txn;
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_txn_commit_ret
+ * PUBLIC: __P((DB_TXN *, u_int32_t, __txn_commit_reply *));
+ */
int
__dbcl_txn_commit_ret(txnp, flags, replyp)
DB_TXN *txnp;
@@ -165,6 +188,83 @@ __dbcl_txn_commit_ret(txnp, flags, replyp)
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_txn_discard_ret __P((DB_TXN *, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+int
+__dbcl_txn_discard_ret(txnp, flags, replyp)
+ DB_TXN * txnp;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+{
+ COMPQUIET(flags, 0);
+
+ __dbcl_txn_end(txnp);
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_txn_recover_ret __P((DB_ENV *, DB_PREPLIST *, long,
+ * PUBLIC: long *, u_int32_t, __txn_recover_reply *));
+ */
+int
+__dbcl_txn_recover_ret(dbenv, preplist, count, retp, flags, replyp)
+ DB_ENV * dbenv;
+ DB_PREPLIST * preplist;
+ long count;
+ long * retp;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+{
+ DB_PREPLIST *prep;
+ DB_TXN *txnarray, *txn;
+ u_int32_t i, *txnid;
+ int ret;
+ u_int8_t *gid;
+
+ COMPQUIET(flags, 0);
+ COMPQUIET(count, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ *retp = (long) replyp->retcount;
+
+ if (replyp->retcount == 0)
+ return (replyp->status);
+
+ if ((ret = __os_calloc(dbenv, replyp->retcount, sizeof(DB_TXN),
+ &txnarray)) != 0)
+ return (ret);
+ /*
+ * We have a bunch of arrays that need to iterate in
+ * lockstep with each other.
+ */
+ i = 0;
+ txn = txnarray;
+ txnid = (u_int32_t *)replyp->txn.txn_val;
+ gid = (u_int8_t *)replyp->gid.gid_val;
+ prep = preplist;
+ while (i++ < replyp->retcount) {
+ __dbcl_txn_setup(dbenv, txn, NULL, *txnid);
+ prep->txn = txn;
+ memcpy(&prep->gid, gid, DB_XIDDATASIZE);
+ /*
+ * Now increment all our array pointers.
+ */
+ txn++;
+ gid += DB_XIDDATASIZE;
+ txnid++;
+ prep++;
+ }
+
+ return (0);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_close_ret __P((DB *, u_int32_t, __db_close_reply *));
+ */
int
__dbcl_db_close_ret(dbp, flags, replyp)
DB *dbp;
@@ -183,6 +283,30 @@ __dbcl_db_close_ret(dbp, flags, replyp)
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_create_ret
+ * PUBLIC: __P((DB *, DB_ENV *, u_int32_t, __db_create_reply *));
+ */
+int
+__dbcl_db_create_ret(dbp, dbenv, flags, replyp)
+ DB * dbp;
+ DB_ENV * dbenv;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+{
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(flags, 0);
+
+ if (replyp->status != 0)
+ return (replyp->status);
+ dbp->cl_id = replyp->dbcl_id;
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_get_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_get_reply *));
+ */
int
__dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
DB *dbp;
@@ -206,20 +330,26 @@ __dbcl_db_get_ret(dbp, txnp, key, data, flags, replyp)
oldkey = key->data;
ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
- replyp->keydata.keydata_len);
+ replyp->keydata.keydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
if (ret)
return (ret);
ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
- replyp->datadata.datadata_len);
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
/*
* If an error on copying 'data' and we allocated for 'key'
* free it before returning the error.
*/
if (ret && oldkey != NULL)
- __os_free(key->data, key->size);
+ __os_free(dbenv, key->data);
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_key_range_ret __P((DB *, DB_TXN *,
+ * PUBLIC: DBT *, DB_KEY_RANGE *, u_int32_t, __db_key_range_reply *));
+ */
int
__dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
DB *dbp;
@@ -242,32 +372,114 @@ __dbcl_db_key_range_ret(dbp, txnp, key, range, flags, replyp)
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_db_open_ret __P((DB *, DB_TXN *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t, int, __db_open_reply *));
+ */
int
-__dbcl_db_open_ret(dbp, name, subdb, type, flags, mode, replyp)
+__dbcl_db_open_ret(dbp, txn, name, subdb, type, flags, mode, replyp)
DB *dbp;
+ DB_TXN *txn;
const char *name, *subdb;
DBTYPE type;
u_int32_t flags;
int mode;
__db_open_reply *replyp;
{
+ COMPQUIET(txn, NULL);
COMPQUIET(name, NULL);
COMPQUIET(subdb, NULL);
COMPQUIET(type, 0);
COMPQUIET(flags, 0);
COMPQUIET(mode, 0);
- dbp->type = replyp->type;
+ if (replyp->status == 0) {
+ dbp->cl_id = replyp->dbcl_id;
+ dbp->type = replyp->type;
+ /*
+ * We get back the database's byteorder on the server.
+ * Determine if our byteorder is the same or not by
+ * calling __db_set_lorder.
+ *
+ * XXX
+ * This MUST come before we set the flags because
+ * __db_set_lorder checks that it is called before
+ * the open flag is set.
+ */
+ (void)__db_set_lorder(dbp, replyp->lorder);
+
+ /*
+ * XXX
+ * This is only for Tcl which peeks at the dbp flags.
+ * When dbp->get_flags exists, this should go away.
+ */
+ dbp->flags = replyp->dbflags;
+ }
+ return (replyp->status);
+}
+
+/*
+ * PUBLIC: int __dbcl_db_pget_ret __P((DB *, DB_TXN *, DBT *, DBT *, DBT *,
+ * PUBLIC: u_int32_t, __db_pget_reply *));
+ */
+int
+__dbcl_db_pget_ret(dbp, txnp, skey, pkey, data, flags, replyp)
+ DB * dbp;
+ DB_TXN * txnp;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbp->my_rskey.data,
+ &dbp->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbp->my_rkey.data,
+ &dbp->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbp->my_rdata.data,
+ &dbp->my_rdata.ulen);
/*
- * XXX
- * This is only for Tcl which peeks at the dbp flags.
- * When dbp->get_flags exists, this should go away.
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
*/
- dbp->flags = replyp->dbflags;
- return (replyp->status);
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_put_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t, __db_put_reply *));
+ */
int
__dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
DB *dbp;
@@ -289,6 +501,10 @@ __dbcl_db_put_ret(dbp, txnp, key, data, flags, replyp)
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_remove_ret __P((DB *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_remove_reply *));
+ */
int
__dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
DB *dbp;
@@ -310,12 +526,16 @@ __dbcl_db_remove_ret(dbp, name, subdb, flags, replyp)
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_rename_ret __P((DB *, const char *,
+ * PUBLIC: const char *, const char *, u_int32_t, __db_rename_reply *));
+ */
int
__dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
DB *dbp;
const char *name, *subdb, *newname;
u_int32_t flags;
- __db_remove_reply *replyp;
+ __db_rename_reply *replyp;
{
int ret;
@@ -332,82 +552,66 @@ __dbcl_db_rename_ret(dbp, name, subdb, newname, flags, replyp)
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_db_stat_ret
+ * PUBLIC: __P((DB *, void *, u_int32_t, __db_stat_reply *));
+ */
int
-__dbcl_db_stat_ret(dbp, sp, func, flags, replyp)
+__dbcl_db_stat_ret(dbp, sp, flags, replyp)
DB *dbp;
void *sp;
- void *(*func) __P((size_t));
u_int32_t flags;
__db_stat_reply *replyp;
{
- int ret;
- u_int32_t *__db_statslist;
+ int len, ret;
+ u_int32_t i, *q, *p, *retsp;
- COMPQUIET(dbp, NULL);
- COMPQUIET(func, NULL);
COMPQUIET(flags, 0);
- if (replyp->status != 0)
+ if (replyp->status != 0 || sp == NULL)
return (replyp->status);
- if ((ret =
- __db_db_stat_statslist(replyp->statslist, &__db_statslist)) != 0)
- return (ret);
-
- if (sp == NULL)
- __db_db_stat_statsfree(__db_statslist);
- else
- *(u_int32_t **)sp = __db_statslist;
- return (replyp->status);
-}
-
-static int
-__db_db_stat_statslist(locp, ppp)
- __db_stat_statsreplist *locp;
- u_int32_t **ppp;
-{
- u_int32_t *pp;
- int cnt, ret, size;
- __db_stat_statsreplist *nl;
-
- for (cnt = 0, nl = locp; nl != NULL; cnt++, nl = nl->next)
- ;
-
- if (cnt == 0) {
- *ppp = NULL;
- return (0);
- }
- size = sizeof(*pp) * cnt;
- if ((ret = __os_malloc(NULL, size, NULL, ppp)) != 0)
+ len = replyp->stats.stats_len * sizeof(u_int32_t);
+ if ((ret = __os_umalloc(dbp->dbenv, len, &retsp)) != 0)
return (ret);
- memset(*ppp, 0, size);
- for (pp = *ppp, nl = locp; nl != NULL; nl = nl->next, pp++) {
- *pp = *(u_int32_t *)nl->ent.ent_val;
- }
+ for (i = 0, q = retsp, p = (u_int32_t *)replyp->stats.stats_val;
+ i < replyp->stats.stats_len; i++, q++, p++)
+ *q = *p;
+ *(u_int32_t **)sp = retsp;
return (0);
}
-static void
-__db_db_stat_statsfree(pp)
- u_int32_t *pp;
+/*
+ * PUBLIC: int __dbcl_db_truncate_ret __P((DB *, DB_TXN *, u_int32_t *,
+ * PUBLIC: u_int32_t, __db_truncate_reply *));
+ */
+int
+__dbcl_db_truncate_ret(dbp, txnp, countp, flags, replyp)
+ DB *dbp;
+ DB_TXN *txnp;
+ u_int32_t *countp, flags;
+ __db_truncate_reply *replyp;
{
- size_t size;
- u_int32_t *p;
+ COMPQUIET(dbp, NULL);
+ COMPQUIET(txnp, NULL);
+ COMPQUIET(flags, 0);
- if (pp == NULL)
- return;
- size = sizeof(*p);
- for (p = pp; *p != 0; p++)
- size += sizeof(*p);
+ if (replyp->status != 0)
+ return (replyp->status);
+ *countp = replyp->count;
- __os_free(pp, size);
+ return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_db_cursor_ret
+ * PUBLIC: __P((DB *, DB_TXN *, DBC **, u_int32_t, __db_cursor_reply *));
+ */
int
-__dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp)
+__dbcl_db_cursor_ret(dbp, txnp, dbcp, flags, replyp)
DB *dbp;
DB_TXN *txnp;
- DBC **dbcpp;
+ DBC **dbcp;
u_int32_t flags;
__db_cursor_reply *replyp;
{
@@ -417,13 +621,17 @@ __dbcl_db_cursor_ret(dbp, txnp, dbcpp, flags, replyp)
if (replyp->status != 0)
return (replyp->status);
- return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcpp));
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
}
+/*
+ * PUBLIC: int __dbcl_db_join_ret
+ * PUBLIC: __P((DB *, DBC **, DBC **, u_int32_t, __db_join_reply *));
+ */
int
-__dbcl_db_join_ret(dbp, curs, dbcpp, flags, replyp)
+__dbcl_db_join_ret(dbp, curs, dbcp, flags, replyp)
DB *dbp;
- DBC **curs, **dbcpp;
+ DBC **curs, **dbcp;
u_int32_t flags;
__db_join_reply *replyp;
{
@@ -440,21 +648,25 @@ __dbcl_db_join_ret(dbp, curs, dbcpp, flags, replyp)
* client-side cursor/db relationship to know what cursors
* are open in the db, and to store their ID. Nothing else.
*/
- return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcpp));
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbp, dbcp));
}
+/*
+ * PUBLIC: int __dbcl_dbc_close_ret __P((DBC *, __dbc_close_reply *));
+ */
int
-__dbcl_dbc_close_ret(dbcp, replyp)
- DBC *dbcp;
+__dbcl_dbc_close_ret(dbc, replyp)
+ DBC *dbc;
__dbc_close_reply *replyp;
{
- DB *dbp;
-
- dbp = dbcp->dbp;
- __dbcl_c_refresh(dbcp);
+ __dbcl_c_refresh(dbc);
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_dbc_count_ret
+ * PUBLIC: __P((DBC *, db_recno_t *, u_int32_t, __dbc_count_reply *));
+ */
int
__dbcl_dbc_count_ret(dbc, countp, flags, replyp)
DBC *dbc;
@@ -472,9 +684,13 @@ __dbcl_dbc_count_ret(dbc, countp, flags, replyp)
return (replyp->status);
}
+/*
+ * PUBLIC: int __dbcl_dbc_dup_ret
+ * PUBLIC: __P((DBC *, DBC **, u_int32_t, __dbc_dup_reply *));
+ */
int
-__dbcl_dbc_dup_ret(dbcp, dbcpp, flags, replyp)
- DBC *dbcp, **dbcpp;
+__dbcl_dbc_dup_ret(dbc, dbcp, flags, replyp)
+ DBC *dbc, **dbcp;
u_int32_t flags;
__dbc_dup_reply *replyp;
{
@@ -483,12 +699,16 @@ __dbcl_dbc_dup_ret(dbcp, dbcpp, flags, replyp)
if (replyp->status != 0)
return (replyp->status);
- return (__dbcl_c_setup(replyp->dbcidcl_id, dbcp->dbp, dbcpp));
+ return (__dbcl_c_setup(replyp->dbcidcl_id, dbc->dbp, dbcp));
}
+/*
+ * PUBLIC: int __dbcl_dbc_get_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_get_reply *));
+ */
int
-__dbcl_dbc_get_ret(dbcp, key, data, flags, replyp)
- DBC *dbcp;
+__dbcl_dbc_get_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
DBT *key, *data;
u_int32_t flags;
__dbc_get_reply *replyp;
@@ -503,27 +723,89 @@ __dbcl_dbc_get_ret(dbcp, key, data, flags, replyp)
if (replyp->status != 0)
return (replyp->status);
- dbenv = dbcp->dbp->dbenv;
+ dbenv = dbc->dbp->dbenv;
oldkey = key->data;
ret = __dbcl_retcopy(dbenv, key, replyp->keydata.keydata_val,
- replyp->keydata.keydata_len);
+ replyp->keydata.keydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
if (ret)
return (ret);
ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
- replyp->datadata.datadata_len);
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
/*
* If an error on copying 'data' and we allocated for 'key'
* free it before returning the error.
*/
if (ret && oldkey != NULL)
- __os_free(key->data, key->size);
+ __os_free(dbenv, key->data);
return (ret);
}
+/*
+ * PUBLIC: int __dbcl_dbc_pget_ret __P((DBC *, DBT *, DBT *, DBT *, u_int32_t,
+ * PUBLIC: __dbc_pget_reply *));
+ */
int
-__dbcl_dbc_put_ret(dbcp, key, data, flags, replyp)
- DBC *dbcp;
+__dbcl_dbc_pget_ret(dbc, skey, pkey, data, flags, replyp)
+ DBC * dbc;
+ DBT * skey;
+ DBT * pkey;
+ DBT * data;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+{
+ DB_ENV *dbenv;
+ int ret;
+ void *oldskey, *oldpkey;
+
+ COMPQUIET(flags, 0);
+
+ ret = 0;
+ if (replyp->status != 0)
+ return (replyp->status);
+
+ dbenv = dbc->dbp->dbenv;
+
+ oldskey = skey->data;
+ ret = __dbcl_retcopy(dbenv, skey, replyp->skeydata.skeydata_val,
+ replyp->skeydata.skeydata_len, &dbc->my_rskey.data,
+ &dbc->my_rskey.ulen);
+ if (ret)
+ return (ret);
+
+ oldpkey = pkey->data;
+ ret = __dbcl_retcopy(dbenv, pkey, replyp->pkeydata.pkeydata_val,
+ replyp->pkeydata.pkeydata_len, &dbc->my_rkey.data,
+ &dbc->my_rkey.ulen);
+ if (ret && oldskey != NULL) {
+ __os_free(dbenv, skey->data);
+ return (ret);
+ }
+ ret = __dbcl_retcopy(dbenv, data, replyp->datadata.datadata_val,
+ replyp->datadata.datadata_len, &dbc->my_rdata.data,
+ &dbc->my_rdata.ulen);
+ /*
+ * If an error on copying 'data' and we allocated for '*key'
+ * free it before returning the error.
+ */
+ if (ret) {
+ if (oldskey != NULL)
+ __os_free(dbenv, skey->data);
+ if (oldpkey != NULL)
+ __os_free(dbenv, pkey->data);
+ }
+ return (ret);
+}
+
+/*
+ * PUBLIC: int __dbcl_dbc_put_ret
+ * PUBLIC: __P((DBC *, DBT *, DBT *, u_int32_t, __dbc_put_reply *));
+ */
+int
+__dbcl_dbc_put_ret(dbc, key, data, flags, replyp)
+ DBC *dbc;
DBT *key, *data;
u_int32_t flags;
__dbc_put_reply *replyp;
@@ -533,7 +815,7 @@ __dbcl_dbc_put_ret(dbcp, key, data, flags, replyp)
if (replyp->status != 0)
return (replyp->status);
- if (replyp->status == 0 && dbcp->dbp->type == DB_RECNO &&
+ if (replyp->status == 0 && dbc->dbp->type == DB_RECNO &&
(flags == DB_AFTER || flags == DB_BEFORE))
*(db_recno_t *)key->data =
*(db_recno_t *)replyp->keydata.keydata_val;
diff --git a/bdb/rpc_server/c/db_server_proc.c b/bdb/rpc_server/c/db_server_proc.c
new file mode 100644
index 00000000000..d5d1f49508a
--- /dev/null
+++ b/bdb/rpc_server/c/db_server_proc.c
@@ -0,0 +1,2500 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id: db_server_proc.c,v 1.92 2002/07/29 15:21:20 sue Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/* BEGIN __env_cachesize_proc */
+/*
+ * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, __env_cachesize_reply *));
+ */
+void
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,
+ ncache, replyp)
+ long dbenvcl_id;
+ u_int32_t gbytes;
+ u_int32_t bytes;
+ u_int32_t ncache;
+ __env_cachesize_reply *replyp;
+/* END __env_cachesize_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+/*
+ * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));
+ */
+void
+__env_close_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __env_close_reply *replyp;
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+/*
+ * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));
+ */
+void
+__env_create_proc(timeout, replyp)
+ u_int32_t timeout;
+ __env_create_reply *replyp;
+/* END __env_create_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *ctp;
+ int ret;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+ if ((ret = db_env_create(&dbenv, 0)) == 0) {
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+/*
+ * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: __env_dbremove_reply *));
+ */
+void
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __env_dbremove_reply *replyp;
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(dbenv, txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+/*
+ * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,
+ * PUBLIC: u_int32_t, __env_dbrename_reply *));
+ */
+void
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,
+ subdb, newname, flags, replyp)
+ long dbenvcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __env_dbrename_reply *replyp;
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+ DB_TXN * txnp;
+ ct_entry *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(dbenv, txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+/*
+ * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_encrypt_reply *));
+ */
+void
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)
+ long dbenvcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __env_encrypt_reply *replyp;
+/* END __env_encrypt_proc */
+{
+ int ret;
+ DB_ENV * dbenv;
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(dbenv, passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+/*
+ * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __env_flags_reply *));
+ */
+void
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ u_int32_t onoff;
+ __env_flags_reply *replyp;
+/* END __env_flags_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(dbenv, flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+/*
+ * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,
+ * PUBLIC: __env_open_reply *));
+ */
+void
+__env_open_proc(dbenvcl_id, home, flags,
+ mode, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ u_int32_t mode;
+ __env_open_reply *replyp;
+/* END __env_open_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(dbenv, DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(dbenv, fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+/*
+ * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __env_remove_reply *));
+ */
+void
+__env_remove_proc(dbenvcl_id, home, flags, replyp)
+ long dbenvcl_id;
+ char *home;
+ u_int32_t flags;
+ __env_remove_reply *replyp;
+/* END __env_remove_proc */
+{
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(dbenv, fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+/*
+ * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));
+ */
+void
+__txn_abort_proc(txnpcl_id, replyp)
+ long txnpcl_id;
+ __txn_abort_reply *replyp;
+/* END __txn_abort_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort(txnp);
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+/*
+ * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,
+ * PUBLIC: __txn_begin_reply *));
+ */
+void
+__txn_begin_proc(dbenvcl_id, parentcl_id,
+ flags, replyp)
+ long dbenvcl_id;
+ long parentcl_id;
+ u_int32_t flags;
+ __txn_begin_reply *replyp;
+/* END __txn_begin_proc */
+{
+ DB_ENV *dbenv;
+ DB_TXN *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DB_TXN *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(dbenv, parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+/*
+ * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_commit_reply *));
+ */
+void
+__txn_commit_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_commit_reply *replyp;
+/* END __txn_commit_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+/*
+ * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,
+ * PUBLIC: __txn_discard_reply *));
+ */
+void
+__txn_discard_proc(txnpcl_id, flags, replyp)
+ long txnpcl_id;
+ u_int32_t flags;
+ __txn_discard_reply *replyp;
+/* END __txn_discard_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(txnp, flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+/*
+ * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,
+ * PUBLIC: __txn_prepare_reply *));
+ */
+void
+__txn_prepare_proc(txnpcl_id, gid, replyp)
+ long txnpcl_id;
+ u_int8_t *gid;
+ __txn_prepare_reply *replyp;
+/* END __txn_prepare_proc */
+{
+ DB_TXN *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(txnp, gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+/*
+ * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,
+ * PUBLIC: __txn_recover_reply *, int *));
+ */
+void
+__txn_recover_proc(dbenvcl_id, count,
+ flags, replyp, freep)
+ long dbenvcl_id;
+ u_int32_t count;
+ u_int32_t flags;
+ __txn_recover_reply *replyp;
+ int * freep;
+/* END __txn_recover_proc */
+{
+ DB_ENV *dbenv;
+ DB_PREPLIST *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ u_int8_t *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+ dbprep = NULL;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv, count * sizeof(DB_PREPLIST), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbenv, dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount;
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv, retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv, retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv, replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv, replyp->txn.txn_val);
+ __os_free(dbenv, replyp->gid.gid_val);
+ __os_free(dbenv, dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+/*
+ * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_maxkey_reply *));
+ */
+void
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)
+ long dbpcl_id;
+ u_int32_t maxkey;
+ __db_bt_maxkey_reply *replyp;
+/* END __db_bt_maxkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(dbp, maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+/*
+ * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,
+ * PUBLIC: __db_associate_reply *));
+ */
+void
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ long sdbpcl_id;
+ u_int32_t flags;
+ __db_associate_reply *replyp;
+/* END __db_associate_proc */
+{
+ DB *dbp, *sdbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (DB *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(dbp, txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+/*
+ * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,
+ * PUBLIC: __db_bt_minkey_reply *));
+ */
+void
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)
+ long dbpcl_id;
+ u_int32_t minkey;
+ __db_bt_minkey_reply *replyp;
+/* END __db_bt_minkey_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(dbp, minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+/*
+ * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));
+ */
+void
+__db_close_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_close_reply *replyp;
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+/*
+ * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));
+ */
+void
+__db_create_proc(dbenvcl_id, flags, replyp)
+ long dbenvcl_id;
+ u_int32_t flags;
+ __db_create_reply *replyp;
+/* END __db_create_proc */
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ if ((ret = db_create(&dbp, dbenv, flags)) == 0) {
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+/*
+ * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));
+ */
+void
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_del_reply *replyp;
+/* END __db_del_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+
+ /* Set up key DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.flags = keyflags;
+ key.size = keysize;
+ key.data = keydata;
+
+ ret = dbp->del(dbp, txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+/*
+ * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,
+ * PUBLIC: __db_encrypt_reply *));
+ */
+void
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)
+ long dbpcl_id;
+ char *passwd;
+ u_int32_t flags;
+ __db_encrypt_reply *replyp;
+/* END __db_encrypt_proc */
+{
+ int ret;
+ DB * dbp;
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(dbp, passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+/*
+ * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_extentsize_reply *));
+ */
+void
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)
+ long dbpcl_id;
+ u_int32_t extentsize;
+ __db_extentsize_reply *replyp;
+/* END __db_extentsize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(dbp, extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+/*
+ * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));
+ */
+void
+__db_flags_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_flags_reply *replyp;
+/* END __db_flags_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(dbp, flags);
+ dbp_ctp->ct_dbdp.setflags |= flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+/*
+ * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *));
+ */
+void
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_get_reply *replyp;
+ int * freep;
+/* END __db_get_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.ulen = keyulen;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.doff = datadoff;
+ data.ulen = dataulen;
+ /*
+ * Ignore memory related flags on server.
+ */
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(dbp, txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+/*
+ * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_ffactor_reply *));
+ */
+void
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)
+ long dbpcl_id;
+ u_int32_t ffactor;
+ __db_h_ffactor_reply *replyp;
+/* END __db_h_ffactor_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(dbp, ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+/*
+ * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,
+ * PUBLIC: __db_h_nelem_reply *));
+ */
+void
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)
+ long dbpcl_id;
+ u_int32_t nelem;
+ __db_h_nelem_reply *replyp;
+/* END __db_h_nelem_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(dbp, nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+/*
+ * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));
+ */
+void
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t flags;
+ __db_key_range_reply *replyp;
+/* END __db_key_range_proc */
+{
+ DB *dbp;
+ DBT key;
+ DB_KEY_RANGE range;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ memset(&key, 0, sizeof(key));
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ key.size = keysize;
+ key.data = keydata;
+ key.flags = keyflags;
+
+ ret = dbp->key_range(dbp, txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+/*
+ * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));
+ */
+void
+__db_lorder_proc(dbpcl_id, lorder, replyp)
+ long dbpcl_id;
+ u_int32_t lorder;
+ __db_lorder_reply *replyp;
+/* END __db_lorder_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(dbp, lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+/*
+ * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *));
+ */
+void
+__db_open_proc(dbpcl_id, txnpcl_id, name,
+ subdb, type, flags, mode, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t type;
+ u_int32_t flags;
+ u_int32_t mode;
+ __db_open_reply *replyp;
+/* END __db_open_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(dbp, txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(dbp, &dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(dbp, &isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+/*
+ * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,
+ * PUBLIC: __db_pagesize_reply *));
+ */
+void
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)
+ long dbpcl_id;
+ u_int32_t pagesize;
+ __db_pagesize_reply *replyp;
+/* END __db_pagesize_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(dbp, pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+/*
+ * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *));
+ */
+void
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,
+ skeydoff, skeyulen, skeyflags, skeydata,
+ skeysize, pkeydlen, pkeydoff, pkeyulen,
+ pkeyflags, pkeydata, pkeysize, datadlen,
+ datadoff, dataulen, dataflags, datadata,
+ datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_pget_reply *replyp;
+ int * freep;
+/* END __db_pget_proc */
+{
+ DB *dbp;
+ DBT skey, pkey, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(dbp, txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbp->dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, skey.data);
+ __os_ufree(dbp->dbenv, pkey.data);
+ __os_ufree(dbp->dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+/*
+ * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *));
+ */
+void
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,
+ keydoff, keyulen, keyflags, keydata,
+ keysize, datadlen, datadoff, dataulen,
+ dataflags, datadata, datasize, flags, replyp, freep)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __db_put_reply *replyp;
+ int * freep;
+/* END __db_put_proc */
+{
+ DB *dbp;
+ DBT key, data;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(dbp, txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbp->dbenv,
+ key.size, &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->dbenv, key.data);
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+/*
+ * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,
+ * PUBLIC: __db_re_delim_reply *));
+ */
+void
+__db_re_delim_proc(dbpcl_id, delim, replyp)
+ long dbpcl_id;
+ u_int32_t delim;
+ __db_re_delim_reply *replyp;
+/* END __db_re_delim_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(dbp, delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+/*
+ * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));
+ */
+void
+__db_re_len_proc(dbpcl_id, len, replyp)
+ long dbpcl_id;
+ u_int32_t len;
+ __db_re_len_reply *replyp;
+/* END __db_re_len_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(dbp, len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+/*
+ * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));
+ */
+void
+__db_re_pad_proc(dbpcl_id, pad, replyp)
+ long dbpcl_id;
+ u_int32_t pad;
+ __db_re_pad_reply *replyp;
+/* END __db_re_pad_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(dbp, pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+/*
+ * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,
+ * PUBLIC: __db_remove_reply *));
+ */
+void
+__db_remove_proc(dbpcl_id, name, subdb,
+ flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ u_int32_t flags;
+ __db_remove_reply *replyp;
+/* END __db_remove_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(dbp, name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+/*
+ * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,
+ * PUBLIC: __db_rename_reply *));
+ */
+void
+__db_rename_proc(dbpcl_id, name, subdb,
+ newname, flags, replyp)
+ long dbpcl_id;
+ char *name;
+ char *subdb;
+ char *newname;
+ u_int32_t flags;
+ __db_rename_reply *replyp;
+/* END __db_rename_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(dbp, name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+/*
+ * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,
+ * PUBLIC: int *));
+ */
+void
+__db_stat_proc(dbpcl_id, flags, replyp, freep)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_stat_reply *replyp;
+ int * freep;
+/* END __db_stat_proc */
+{
+ DB *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(dbp, &sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(dbp, &type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->dbenv, len * replyp->stats.stats_len,
+ &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+/*
+ * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));
+ */
+void
+__db_sync_proc(dbpcl_id, flags, replyp)
+ long dbpcl_id;
+ u_int32_t flags;
+ __db_sync_reply *replyp;
+/* END __db_sync_proc */
+{
+ DB *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(dbp, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+/*
+ * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_truncate_reply *));
+ */
+void
+__db_truncate_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_truncate_reply *replyp;
+/* END __db_truncate_proc */
+{
+ DB *dbp;
+ DB_TXN *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(dbp, txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+/*
+ * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,
+ * PUBLIC: __db_cursor_reply *));
+ */
+void
+__db_cursor_proc(dbpcl_id, txnpcl_id,
+ flags, replyp)
+ long dbpcl_id;
+ long txnpcl_id;
+ u_int32_t flags;
+ __db_cursor_reply *replyp;
+/* END __db_cursor_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DB_TXN *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DB_TXN *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+/*
+ * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,
+ * PUBLIC: __db_join_reply *));
+ */
+void
+__db_join_proc(dbpcl_id, curs, curslen,
+ flags, replyp)
+ long dbpcl_id;
+ u_int32_t * curs;
+ u_int32_t curslen;
+ u_int32_t flags;
+ __db_join_reply *replyp;
+/* END __db_join_proc */
+{
+ DB *dbp;
+ DBC **jcurs, **c;
+ DBC *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (DB *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(DBC *);
+ if ((ret = __os_calloc(dbp->dbenv,
+ curslen + 1, sizeof(DBC *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+/*
+ * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));
+ */
+void
+__dbc_close_proc(dbccl_id, replyp)
+ long dbccl_id;
+ __dbc_close_reply *replyp;
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+/*
+ * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));
+ */
+void
+__dbc_count_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_count_reply *replyp;
+/* END __dbc_count_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_count(dbc, &num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+/*
+ * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));
+ */
+void
+__dbc_del_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_del_reply *replyp;
+/* END __dbc_del_proc */
+{
+ DBC *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ ret = dbc->c_del(dbc, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+/*
+ * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));
+ */
+void
+__dbc_dup_proc(dbccl_id, flags, replyp)
+ long dbccl_id;
+ u_int32_t flags;
+ __dbc_dup_reply *replyp;
+/* END __dbc_dup_proc */
+{
+ DBC *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+/*
+ * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *));
+ */
+void
+__dbc_get_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_get_reply *replyp;
+ int * freep;
+/* END __dbc_get_proc */
+{
+ DBC *dbc;
+ DBT key, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ bulk_alloc = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = DB_DBT_MALLOC;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.size = datasize;
+ data.data = datadata;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.data == 0) {
+ ret = __os_umalloc(dbenv, data.ulen, &data.data);
+ if (ret != 0)
+ goto err;
+ bulk_alloc = 1;
+ }
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_get(dbc, &key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.data == keydata) {
+ ret = __os_umalloc(dbenv, key.size,
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.data, key.size);
+ } else
+ replyp->keydata.keydata_val = key.data;
+
+ replyp->keydata.keydata_len = key.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv, data.size,
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, key.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv, replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv, data.data);
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+/*
+ * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,
+ * PUBLIC: u_int32_t, __dbc_pget_reply *, int *));
+ */
+void
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,
+ skeyulen, skeyflags, skeydata, skeysize,
+ pkeydlen, pkeydoff, pkeyulen, pkeyflags,
+ pkeydata, pkeysize, datadlen, datadoff,
+ dataulen, dataflags, datadata, datasize,
+ flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t skeydlen;
+ u_int32_t skeydoff;
+ u_int32_t skeyulen;
+ u_int32_t skeyflags;
+ void *skeydata;
+ u_int32_t skeysize;
+ u_int32_t pkeydlen;
+ u_int32_t pkeydoff;
+ u_int32_t pkeyulen;
+ u_int32_t pkeyflags;
+ void *pkeydata;
+ u_int32_t pkeysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_pget_reply *replyp;
+ int * freep;
+/* END __dbc_pget_proc */
+{
+ DBC *dbc;
+ DBT skey, pkey, data;
+ DB_ENV *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbenv = dbc->dbp->dbenv;
+
+ *freep = 0;
+ memset(&skey, 0, sizeof(skey));
+ memset(&pkey, 0, sizeof(pkey));
+ memset(&data, 0, sizeof(data));
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data DBT */
+ skey.flags = DB_DBT_MALLOC;
+ skey.dlen = skeydlen;
+ skey.ulen = skeyulen;
+ skey.doff = skeydoff;
+ if (skeyflags & DB_DBT_PARTIAL)
+ skey.flags |= DB_DBT_PARTIAL;
+ skey.size = skeysize;
+ skey.data = skeydata;
+
+ pkey.flags = DB_DBT_MALLOC;
+ pkey.dlen = pkeydlen;
+ pkey.ulen = pkeyulen;
+ pkey.doff = pkeydoff;
+ if (pkeyflags & DB_DBT_PARTIAL)
+ pkey.flags |= DB_DBT_PARTIAL;
+ pkey.size = pkeysize;
+ pkey.data = pkeydata;
+
+ data.flags = DB_DBT_MALLOC;
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ if (dataflags & DB_DBT_PARTIAL)
+ data.flags |= DB_DBT_PARTIAL;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->c_pget(dbc, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.data == skeydata) {
+ ret = __os_umalloc(dbenv,
+ skey.size, &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.data,
+ skey.size);
+ } else
+ replyp->skeydata.skeydata_val = skey.data;
+ replyp->skeydata.skeydata_len = skey.size;
+
+ /*
+ * Primary key
+ */
+ if (pkey.data == pkeydata) {
+ ret = __os_umalloc(dbenv,
+ pkey.size, &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.data,
+ pkey.size);
+ } else
+ replyp->pkeydata.pkeydata_val = pkey.data;
+ replyp->pkeydata.pkeydata_len = pkey.size;
+
+ /*
+ * Data
+ */
+ if (data.data == datadata) {
+ ret = __os_umalloc(dbenv,
+ data.size, &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv, skey.data);
+ __os_ufree(dbenv, pkey.data);
+ __os_ufree(dbenv, data.data);
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.data,
+ data.size);
+ } else
+ replyp->datadata.datadata_val = data.data;
+ replyp->datadata.datadata_len = data.size;
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+/*
+ * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,
+ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,
+ * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *));
+ */
+void
+__dbc_put_proc(dbccl_id, keydlen, keydoff,
+ keyulen, keyflags, keydata, keysize,
+ datadlen, datadoff, dataulen, dataflags,
+ datadata, datasize, flags, replyp, freep)
+ long dbccl_id;
+ u_int32_t keydlen;
+ u_int32_t keydoff;
+ u_int32_t keyulen;
+ u_int32_t keyflags;
+ void *keydata;
+ u_int32_t keysize;
+ u_int32_t datadlen;
+ u_int32_t datadoff;
+ u_int32_t dataulen;
+ u_int32_t dataflags;
+ void *datadata;
+ u_int32_t datasize;
+ u_int32_t flags;
+ __dbc_put_reply *replyp;
+ int * freep;
+/* END __dbc_put_proc */
+{
+ DB *dbp;
+ DBC *dbc;
+ DBT key, data;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (DBC *)dbc_ctp->ct_anyp;
+ dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Set up key and data DBT */
+ key.dlen = keydlen;
+ key.ulen = keyulen;
+ key.doff = keydoff;
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.flags = 0;
+ if (keyflags & DB_DBT_PARTIAL)
+ key.flags |= DB_DBT_PARTIAL;
+ key.size = keysize;
+ key.data = keydata;
+
+ data.dlen = datadlen;
+ data.ulen = dataulen;
+ data.doff = datadoff;
+ data.flags = dataflags;
+ data.size = datasize;
+ data.data = datadata;
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->c_put(dbc, &key, &data, flags);
+
+ *freep = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
+ dbp->type == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = key.data;
+ replyp->keydata.keydata_len = key.size;
+ } else {
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/c/db_server_proc.sed b/bdb/rpc_server/c/db_server_proc.sed
new file mode 100644
index 00000000000..e11b2c33cfe
--- /dev/null
+++ b/bdb/rpc_server/c/db_server_proc.sed
@@ -0,0 +1,772 @@
+/^\/\* BEGIN __env_cachesize_proc/,/^\/\* END __env_cachesize_proc/c\
+/* BEGIN __env_cachesize_proc */\
+/*\
+\ * PUBLIC: void __env_cachesize_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, __env_cachesize_reply *));\
+\ */\
+void\
+__env_cachesize_proc(dbenvcl_id, gbytes, bytes,\
+\ \ ncache, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t gbytes;\
+\ u_int32_t bytes;\
+\ u_int32_t ncache;\
+\ __env_cachesize_reply *replyp;\
+/* END __env_cachesize_proc */
+/^\/\* BEGIN __env_close_proc/,/^\/\* END __env_close_proc/c\
+/* BEGIN __env_close_proc */\
+/*\
+\ * PUBLIC: void __env_close_proc __P((long, u_int32_t, __env_close_reply *));\
+\ */\
+void\
+__env_close_proc(dbenvcl_id, flags, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ __env_close_reply *replyp;\
+/* END __env_close_proc */
+/^\/\* BEGIN __env_create_proc/,/^\/\* END __env_create_proc/c\
+/* BEGIN __env_create_proc */\
+/*\
+\ * PUBLIC: void __env_create_proc __P((u_int32_t, __env_create_reply *));\
+\ */\
+void\
+__env_create_proc(timeout, replyp)\
+\ u_int32_t timeout;\
+\ __env_create_reply *replyp;\
+/* END __env_create_proc */
+/^\/\* BEGIN __env_dbremove_proc/,/^\/\* END __env_dbremove_proc/c\
+/* BEGIN __env_dbremove_proc */\
+/*\
+\ * PUBLIC: void __env_dbremove_proc __P((long, long, char *, char *, u_int32_t,\
+\ * PUBLIC: __env_dbremove_reply *));\
+\ */\
+void\
+__env_dbremove_proc(dbenvcl_id, txnpcl_id, name,\
+\ \ subdb, flags, replyp)\
+\ long dbenvcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t flags;\
+\ __env_dbremove_reply *replyp;\
+/* END __env_dbremove_proc */
+/^\/\* BEGIN __env_dbrename_proc/,/^\/\* END __env_dbrename_proc/c\
+/* BEGIN __env_dbrename_proc */\
+/*\
+\ * PUBLIC: void __env_dbrename_proc __P((long, long, char *, char *, char *,\
+\ * PUBLIC: u_int32_t, __env_dbrename_reply *));\
+\ */\
+void\
+__env_dbrename_proc(dbenvcl_id, txnpcl_id, name,\
+\ \ subdb, newname, flags, replyp)\
+\ long dbenvcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ char *newname;\
+\ u_int32_t flags;\
+\ __env_dbrename_reply *replyp;\
+/* END __env_dbrename_proc */
+/^\/\* BEGIN __env_encrypt_proc/,/^\/\* END __env_encrypt_proc/c\
+/* BEGIN __env_encrypt_proc */\
+/*\
+\ * PUBLIC: void __env_encrypt_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __env_encrypt_reply *));\
+\ */\
+void\
+__env_encrypt_proc(dbenvcl_id, passwd, flags, replyp)\
+\ long dbenvcl_id;\
+\ char *passwd;\
+\ u_int32_t flags;\
+\ __env_encrypt_reply *replyp;\
+/* END __env_encrypt_proc */
+/^\/\* BEGIN __env_flags_proc/,/^\/\* END __env_flags_proc/c\
+/* BEGIN __env_flags_proc */\
+/*\
+\ * PUBLIC: void __env_flags_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: __env_flags_reply *));\
+\ */\
+void\
+__env_flags_proc(dbenvcl_id, flags, onoff, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ u_int32_t onoff;\
+\ __env_flags_reply *replyp;\
+/* END __env_flags_proc */
+/^\/\* BEGIN __env_open_proc/,/^\/\* END __env_open_proc/c\
+/* BEGIN __env_open_proc */\
+/*\
+\ * PUBLIC: void __env_open_proc __P((long, char *, u_int32_t, u_int32_t,\
+\ * PUBLIC: __env_open_reply *));\
+\ */\
+void\
+__env_open_proc(dbenvcl_id, home, flags,\
+\ \ mode, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __env_open_reply *replyp;\
+/* END __env_open_proc */
+/^\/\* BEGIN __env_remove_proc/,/^\/\* END __env_remove_proc/c\
+/* BEGIN __env_remove_proc */\
+/*\
+\ * PUBLIC: void __env_remove_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __env_remove_reply *));\
+\ */\
+void\
+__env_remove_proc(dbenvcl_id, home, flags, replyp)\
+\ long dbenvcl_id;\
+\ char *home;\
+\ u_int32_t flags;\
+\ __env_remove_reply *replyp;\
+/* END __env_remove_proc */
+/^\/\* BEGIN __txn_abort_proc/,/^\/\* END __txn_abort_proc/c\
+/* BEGIN __txn_abort_proc */\
+/*\
+\ * PUBLIC: void __txn_abort_proc __P((long, __txn_abort_reply *));\
+\ */\
+void\
+__txn_abort_proc(txnpcl_id, replyp)\
+\ long txnpcl_id;\
+\ __txn_abort_reply *replyp;\
+/* END __txn_abort_proc */
+/^\/\* BEGIN __txn_begin_proc/,/^\/\* END __txn_begin_proc/c\
+/* BEGIN __txn_begin_proc */\
+/*\
+\ * PUBLIC: void __txn_begin_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __txn_begin_reply *));\
+\ */\
+void\
+__txn_begin_proc(dbenvcl_id, parentcl_id,\
+\ \ flags, replyp)\
+\ long dbenvcl_id;\
+\ long parentcl_id;\
+\ u_int32_t flags;\
+\ __txn_begin_reply *replyp;\
+/* END __txn_begin_proc */
+/^\/\* BEGIN __txn_commit_proc/,/^\/\* END __txn_commit_proc/c\
+/* BEGIN __txn_commit_proc */\
+/*\
+\ * PUBLIC: void __txn_commit_proc __P((long, u_int32_t,\
+\ * PUBLIC: __txn_commit_reply *));\
+\ */\
+void\
+__txn_commit_proc(txnpcl_id, flags, replyp)\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __txn_commit_reply *replyp;\
+/* END __txn_commit_proc */
+/^\/\* BEGIN __txn_discard_proc/,/^\/\* END __txn_discard_proc/c\
+/* BEGIN __txn_discard_proc */\
+/*\
+\ * PUBLIC: void __txn_discard_proc __P((long, u_int32_t,\
+\ * PUBLIC: __txn_discard_reply *));\
+\ */\
+void\
+__txn_discard_proc(txnpcl_id, flags, replyp)\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __txn_discard_reply *replyp;\
+/* END __txn_discard_proc */
+/^\/\* BEGIN __txn_prepare_proc/,/^\/\* END __txn_prepare_proc/c\
+/* BEGIN __txn_prepare_proc */\
+/*\
+\ * PUBLIC: void __txn_prepare_proc __P((long, u_int8_t *,\
+\ * PUBLIC: __txn_prepare_reply *));\
+\ */\
+void\
+__txn_prepare_proc(txnpcl_id, gid, replyp)\
+\ long txnpcl_id;\
+\ u_int8_t *gid;\
+\ __txn_prepare_reply *replyp;\
+/* END __txn_prepare_proc */
+/^\/\* BEGIN __txn_recover_proc/,/^\/\* END __txn_recover_proc/c\
+/* BEGIN __txn_recover_proc */\
+/*\
+\ * PUBLIC: void __txn_recover_proc __P((long, u_int32_t, u_int32_t,\
+\ * PUBLIC: __txn_recover_reply *, int *));\
+\ */\
+void\
+__txn_recover_proc(dbenvcl_id, count,\
+\ \ flags, replyp, freep)\
+\ long dbenvcl_id;\
+\ u_int32_t count;\
+\ u_int32_t flags;\
+\ __txn_recover_reply *replyp;\
+\ int * freep;\
+/* END __txn_recover_proc */
+/^\/\* BEGIN __db_associate_proc/,/^\/\* END __db_associate_proc/c\
+/* BEGIN __db_associate_proc */\
+/*\
+\ * PUBLIC: void __db_associate_proc __P((long, long, long, u_int32_t,\
+\ * PUBLIC: __db_associate_reply *));\
+\ */\
+void\
+__db_associate_proc(dbpcl_id, txnpcl_id, sdbpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ long sdbpcl_id;\
+\ u_int32_t flags;\
+\ __db_associate_reply *replyp;\
+/* END __db_associate_proc */
+/^\/\* BEGIN __db_bt_maxkey_proc/,/^\/\* END __db_bt_maxkey_proc/c\
+/* BEGIN __db_bt_maxkey_proc */\
+/*\
+\ * PUBLIC: void __db_bt_maxkey_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_bt_maxkey_reply *));\
+\ */\
+void\
+__db_bt_maxkey_proc(dbpcl_id, maxkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t maxkey;\
+\ __db_bt_maxkey_reply *replyp;\
+/* END __db_bt_maxkey_proc */
+/^\/\* BEGIN __db_bt_minkey_proc/,/^\/\* END __db_bt_minkey_proc/c\
+/* BEGIN __db_bt_minkey_proc */\
+/*\
+\ * PUBLIC: void __db_bt_minkey_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_bt_minkey_reply *));\
+\ */\
+void\
+__db_bt_minkey_proc(dbpcl_id, minkey, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t minkey;\
+\ __db_bt_minkey_reply *replyp;\
+/* END __db_bt_minkey_proc */
+/^\/\* BEGIN __db_close_proc/,/^\/\* END __db_close_proc/c\
+/* BEGIN __db_close_proc */\
+/*\
+\ * PUBLIC: void __db_close_proc __P((long, u_int32_t, __db_close_reply *));\
+\ */\
+void\
+__db_close_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_close_reply *replyp;\
+/* END __db_close_proc */
+/^\/\* BEGIN __db_create_proc/,/^\/\* END __db_create_proc/c\
+/* BEGIN __db_create_proc */\
+/*\
+\ * PUBLIC: void __db_create_proc __P((long, u_int32_t, __db_create_reply *));\
+\ */\
+void\
+__db_create_proc(dbenvcl_id, flags, replyp)\
+\ long dbenvcl_id;\
+\ u_int32_t flags;\
+\ __db_create_reply *replyp;\
+/* END __db_create_proc */
+/^\/\* BEGIN __db_del_proc/,/^\/\* END __db_del_proc/c\
+/* BEGIN __db_del_proc */\
+/*\
+\ * PUBLIC: void __db_del_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, __db_del_reply *));\
+\ */\
+void\
+__db_del_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_del_reply *replyp;\
+/* END __db_del_proc */
+/^\/\* BEGIN __db_encrypt_proc/,/^\/\* END __db_encrypt_proc/c\
+/* BEGIN __db_encrypt_proc */\
+/*\
+\ * PUBLIC: void __db_encrypt_proc __P((long, char *, u_int32_t,\
+\ * PUBLIC: __db_encrypt_reply *));\
+\ */\
+void\
+__db_encrypt_proc(dbpcl_id, passwd, flags, replyp)\
+\ long dbpcl_id;\
+\ char *passwd;\
+\ u_int32_t flags;\
+\ __db_encrypt_reply *replyp;\
+/* END __db_encrypt_proc */
+/^\/\* BEGIN __db_extentsize_proc/,/^\/\* END __db_extentsize_proc/c\
+/* BEGIN __db_extentsize_proc */\
+/*\
+\ * PUBLIC: void __db_extentsize_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_extentsize_reply *));\
+\ */\
+void\
+__db_extentsize_proc(dbpcl_id, extentsize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t extentsize;\
+\ __db_extentsize_reply *replyp;\
+/* END __db_extentsize_proc */
+/^\/\* BEGIN __db_flags_proc/,/^\/\* END __db_flags_proc/c\
+/* BEGIN __db_flags_proc */\
+/*\
+\ * PUBLIC: void __db_flags_proc __P((long, u_int32_t, __db_flags_reply *));\
+\ */\
+void\
+__db_flags_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_flags_reply *replyp;\
+/* END __db_flags_proc */
+/^\/\* BEGIN __db_get_proc/,/^\/\* END __db_get_proc/c\
+/* BEGIN __db_get_proc */\
+/*\
+\ * PUBLIC: void __db_get_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_get_reply *, int *));\
+\ */\
+void\
+__db_get_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, datadlen, datadoff, dataulen,\
+\ \ dataflags, datadata, datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_get_reply *replyp;\
+\ int * freep;\
+/* END __db_get_proc */
+/^\/\* BEGIN __db_h_ffactor_proc/,/^\/\* END __db_h_ffactor_proc/c\
+/* BEGIN __db_h_ffactor_proc */\
+/*\
+\ * PUBLIC: void __db_h_ffactor_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_h_ffactor_reply *));\
+\ */\
+void\
+__db_h_ffactor_proc(dbpcl_id, ffactor, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t ffactor;\
+\ __db_h_ffactor_reply *replyp;\
+/* END __db_h_ffactor_proc */
+/^\/\* BEGIN __db_h_nelem_proc/,/^\/\* END __db_h_nelem_proc/c\
+/* BEGIN __db_h_nelem_proc */\
+/*\
+\ * PUBLIC: void __db_h_nelem_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_h_nelem_reply *));\
+\ */\
+void\
+__db_h_nelem_proc(dbpcl_id, nelem, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t nelem;\
+\ __db_h_nelem_reply *replyp;\
+/* END __db_h_nelem_proc */
+/^\/\* BEGIN __db_key_range_proc/,/^\/\* END __db_key_range_proc/c\
+/* BEGIN __db_key_range_proc */\
+/*\
+\ * PUBLIC: void __db_key_range_proc __P((long, long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, __db_key_range_reply *));\
+\ */\
+void\
+__db_key_range_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t flags;\
+\ __db_key_range_reply *replyp;\
+/* END __db_key_range_proc */
+/^\/\* BEGIN __db_lorder_proc/,/^\/\* END __db_lorder_proc/c\
+/* BEGIN __db_lorder_proc */\
+/*\
+\ * PUBLIC: void __db_lorder_proc __P((long, u_int32_t, __db_lorder_reply *));\
+\ */\
+void\
+__db_lorder_proc(dbpcl_id, lorder, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t lorder;\
+\ __db_lorder_reply *replyp;\
+/* END __db_lorder_proc */
+/^\/\* BEGIN __db_open_proc/,/^\/\* END __db_open_proc/c\
+/* BEGIN __db_open_proc */\
+/*\
+\ * PUBLIC: void __db_open_proc __P((long, long, char *, char *, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_open_reply *));\
+\ */\
+void\
+__db_open_proc(dbpcl_id, txnpcl_id, name,\
+\ \ subdb, type, flags, mode, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t type;\
+\ u_int32_t flags;\
+\ u_int32_t mode;\
+\ __db_open_reply *replyp;\
+/* END __db_open_proc */
+/^\/\* BEGIN __db_pagesize_proc/,/^\/\* END __db_pagesize_proc/c\
+/* BEGIN __db_pagesize_proc */\
+/*\
+\ * PUBLIC: void __db_pagesize_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_pagesize_reply *));\
+\ */\
+void\
+__db_pagesize_proc(dbpcl_id, pagesize, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pagesize;\
+\ __db_pagesize_reply *replyp;\
+/* END __db_pagesize_proc */
+/^\/\* BEGIN __db_pget_proc/,/^\/\* END __db_pget_proc/c\
+/* BEGIN __db_pget_proc */\
+/*\
+\ * PUBLIC: void __db_pget_proc __P((long, long, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_pget_reply *, int *));\
+\ */\
+void\
+__db_pget_proc(dbpcl_id, txnpcl_id, skeydlen,\
+\ \ skeydoff, skeyulen, skeyflags, skeydata,\
+\ \ skeysize, pkeydlen, pkeydoff, pkeyulen,\
+\ \ pkeyflags, pkeydata, pkeysize, datadlen,\
+\ \ datadoff, dataulen, dataflags, datadata,\
+\ \ datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t skeydlen;\
+\ u_int32_t skeydoff;\
+\ u_int32_t skeyulen;\
+\ u_int32_t skeyflags;\
+\ void *skeydata;\
+\ u_int32_t skeysize;\
+\ u_int32_t pkeydlen;\
+\ u_int32_t pkeydoff;\
+\ u_int32_t pkeyulen;\
+\ u_int32_t pkeyflags;\
+\ void *pkeydata;\
+\ u_int32_t pkeysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_pget_reply *replyp;\
+\ int * freep;\
+/* END __db_pget_proc */
+/^\/\* BEGIN __db_put_proc/,/^\/\* END __db_put_proc/c\
+/* BEGIN __db_put_proc */\
+/*\
+\ * PUBLIC: void __db_put_proc __P((long, long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __db_put_reply *, int *));\
+\ */\
+void\
+__db_put_proc(dbpcl_id, txnpcl_id, keydlen,\
+\ \ keydoff, keyulen, keyflags, keydata,\
+\ \ keysize, datadlen, datadoff, dataulen,\
+\ \ dataflags, datadata, datasize, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __db_put_reply *replyp;\
+\ int * freep;\
+/* END __db_put_proc */
+/^\/\* BEGIN __db_re_delim_proc/,/^\/\* END __db_re_delim_proc/c\
+/* BEGIN __db_re_delim_proc */\
+/*\
+\ * PUBLIC: void __db_re_delim_proc __P((long, u_int32_t,\
+\ * PUBLIC: __db_re_delim_reply *));\
+\ */\
+void\
+__db_re_delim_proc(dbpcl_id, delim, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t delim;\
+\ __db_re_delim_reply *replyp;\
+/* END __db_re_delim_proc */
+/^\/\* BEGIN __db_re_len_proc/,/^\/\* END __db_re_len_proc/c\
+/* BEGIN __db_re_len_proc */\
+/*\
+\ * PUBLIC: void __db_re_len_proc __P((long, u_int32_t, __db_re_len_reply *));\
+\ */\
+void\
+__db_re_len_proc(dbpcl_id, len, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t len;\
+\ __db_re_len_reply *replyp;\
+/* END __db_re_len_proc */
+/^\/\* BEGIN __db_re_pad_proc/,/^\/\* END __db_re_pad_proc/c\
+/* BEGIN __db_re_pad_proc */\
+/*\
+\ * PUBLIC: void __db_re_pad_proc __P((long, u_int32_t, __db_re_pad_reply *));\
+\ */\
+void\
+__db_re_pad_proc(dbpcl_id, pad, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t pad;\
+\ __db_re_pad_reply *replyp;\
+/* END __db_re_pad_proc */
+/^\/\* BEGIN __db_remove_proc/,/^\/\* END __db_remove_proc/c\
+/* BEGIN __db_remove_proc */\
+/*\
+\ * PUBLIC: void __db_remove_proc __P((long, char *, char *, u_int32_t,\
+\ * PUBLIC: __db_remove_reply *));\
+\ */\
+void\
+__db_remove_proc(dbpcl_id, name, subdb,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ u_int32_t flags;\
+\ __db_remove_reply *replyp;\
+/* END __db_remove_proc */
+/^\/\* BEGIN __db_rename_proc/,/^\/\* END __db_rename_proc/c\
+/* BEGIN __db_rename_proc */\
+/*\
+\ * PUBLIC: void __db_rename_proc __P((long, char *, char *, char *, u_int32_t,\
+\ * PUBLIC: __db_rename_reply *));\
+\ */\
+void\
+__db_rename_proc(dbpcl_id, name, subdb,\
+\ \ newname, flags, replyp)\
+\ long dbpcl_id;\
+\ char *name;\
+\ char *subdb;\
+\ char *newname;\
+\ u_int32_t flags;\
+\ __db_rename_reply *replyp;\
+/* END __db_rename_proc */
+/^\/\* BEGIN __db_stat_proc/,/^\/\* END __db_stat_proc/c\
+/* BEGIN __db_stat_proc */\
+/*\
+\ * PUBLIC: void __db_stat_proc __P((long, u_int32_t, __db_stat_reply *,\
+\ * PUBLIC: int *));\
+\ */\
+void\
+__db_stat_proc(dbpcl_id, flags, replyp, freep)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_stat_reply *replyp;\
+\ int * freep;\
+/* END __db_stat_proc */
+/^\/\* BEGIN __db_sync_proc/,/^\/\* END __db_sync_proc/c\
+/* BEGIN __db_sync_proc */\
+/*\
+\ * PUBLIC: void __db_sync_proc __P((long, u_int32_t, __db_sync_reply *));\
+\ */\
+void\
+__db_sync_proc(dbpcl_id, flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t flags;\
+\ __db_sync_reply *replyp;\
+/* END __db_sync_proc */
+/^\/\* BEGIN __db_truncate_proc/,/^\/\* END __db_truncate_proc/c\
+/* BEGIN __db_truncate_proc */\
+/*\
+\ * PUBLIC: void __db_truncate_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __db_truncate_reply *));\
+\ */\
+void\
+__db_truncate_proc(dbpcl_id, txnpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __db_truncate_reply *replyp;\
+/* END __db_truncate_proc */
+/^\/\* BEGIN __db_cursor_proc/,/^\/\* END __db_cursor_proc/c\
+/* BEGIN __db_cursor_proc */\
+/*\
+\ * PUBLIC: void __db_cursor_proc __P((long, long, u_int32_t,\
+\ * PUBLIC: __db_cursor_reply *));\
+\ */\
+void\
+__db_cursor_proc(dbpcl_id, txnpcl_id,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ long txnpcl_id;\
+\ u_int32_t flags;\
+\ __db_cursor_reply *replyp;\
+/* END __db_cursor_proc */
+/^\/\* BEGIN __db_join_proc/,/^\/\* END __db_join_proc/c\
+/* BEGIN __db_join_proc */\
+/*\
+\ * PUBLIC: void __db_join_proc __P((long, u_int32_t *, u_int32_t, u_int32_t,\
+\ * PUBLIC: __db_join_reply *));\
+\ */\
+void\
+__db_join_proc(dbpcl_id, curs, curslen,\
+\ \ flags, replyp)\
+\ long dbpcl_id;\
+\ u_int32_t * curs;\
+\ u_int32_t curslen;\
+\ u_int32_t flags;\
+\ __db_join_reply *replyp;\
+/* END __db_join_proc */
+/^\/\* BEGIN __dbc_close_proc/,/^\/\* END __dbc_close_proc/c\
+/* BEGIN __dbc_close_proc */\
+/*\
+\ * PUBLIC: void __dbc_close_proc __P((long, __dbc_close_reply *));\
+\ */\
+void\
+__dbc_close_proc(dbccl_id, replyp)\
+\ long dbccl_id;\
+\ __dbc_close_reply *replyp;\
+/* END __dbc_close_proc */
+/^\/\* BEGIN __dbc_count_proc/,/^\/\* END __dbc_count_proc/c\
+/* BEGIN __dbc_count_proc */\
+/*\
+\ * PUBLIC: void __dbc_count_proc __P((long, u_int32_t, __dbc_count_reply *));\
+\ */\
+void\
+__dbc_count_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_count_reply *replyp;\
+/* END __dbc_count_proc */
+/^\/\* BEGIN __dbc_del_proc/,/^\/\* END __dbc_del_proc/c\
+/* BEGIN __dbc_del_proc */\
+/*\
+\ * PUBLIC: void __dbc_del_proc __P((long, u_int32_t, __dbc_del_reply *));\
+\ */\
+void\
+__dbc_del_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_del_reply *replyp;\
+/* END __dbc_del_proc */
+/^\/\* BEGIN __dbc_dup_proc/,/^\/\* END __dbc_dup_proc/c\
+/* BEGIN __dbc_dup_proc */\
+/*\
+\ * PUBLIC: void __dbc_dup_proc __P((long, u_int32_t, __dbc_dup_reply *));\
+\ */\
+void\
+__dbc_dup_proc(dbccl_id, flags, replyp)\
+\ long dbccl_id;\
+\ u_int32_t flags;\
+\ __dbc_dup_reply *replyp;\
+/* END __dbc_dup_proc */
+/^\/\* BEGIN __dbc_get_proc/,/^\/\* END __dbc_get_proc/c\
+/* BEGIN __dbc_get_proc */\
+/*\
+\ * PUBLIC: void __dbc_get_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __dbc_get_reply *, int *));\
+\ */\
+void\
+__dbc_get_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyulen, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataulen, dataflags,\
+\ \ datadata, datasize, flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_get_reply *replyp;\
+\ int * freep;\
+/* END __dbc_get_proc */
+/^\/\* BEGIN __dbc_pget_proc/,/^\/\* END __dbc_pget_proc/c\
+/* BEGIN __dbc_pget_proc */\
+/*\
+\ * PUBLIC: void __dbc_pget_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *, u_int32_t,\
+\ * PUBLIC: u_int32_t, __dbc_pget_reply *, int *));\
+\ */\
+void\
+__dbc_pget_proc(dbccl_id, skeydlen, skeydoff,\
+\ \ skeyulen, skeyflags, skeydata, skeysize,\
+\ \ pkeydlen, pkeydoff, pkeyulen, pkeyflags,\
+\ \ pkeydata, pkeysize, datadlen, datadoff,\
+\ \ dataulen, dataflags, datadata, datasize,\
+\ \ flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t skeydlen;\
+\ u_int32_t skeydoff;\
+\ u_int32_t skeyulen;\
+\ u_int32_t skeyflags;\
+\ void *skeydata;\
+\ u_int32_t skeysize;\
+\ u_int32_t pkeydlen;\
+\ u_int32_t pkeydoff;\
+\ u_int32_t pkeyulen;\
+\ u_int32_t pkeyflags;\
+\ void *pkeydata;\
+\ u_int32_t pkeysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_pget_reply *replyp;\
+\ int * freep;\
+/* END __dbc_pget_proc */
+/^\/\* BEGIN __dbc_put_proc/,/^\/\* END __dbc_put_proc/c\
+/* BEGIN __dbc_put_proc */\
+/*\
+\ * PUBLIC: void __dbc_put_proc __P((long, u_int32_t, u_int32_t, u_int32_t,\
+\ * PUBLIC: u_int32_t, void *, u_int32_t, u_int32_t, u_int32_t, u_int32_t, u_int32_t, void *,\
+\ * PUBLIC: u_int32_t, u_int32_t, __dbc_put_reply *, int *));\
+\ */\
+void\
+__dbc_put_proc(dbccl_id, keydlen, keydoff,\
+\ \ keyulen, keyflags, keydata, keysize,\
+\ \ datadlen, datadoff, dataulen, dataflags,\
+\ \ datadata, datasize, flags, replyp, freep)\
+\ long dbccl_id;\
+\ u_int32_t keydlen;\
+\ u_int32_t keydoff;\
+\ u_int32_t keyulen;\
+\ u_int32_t keyflags;\
+\ void *keydata;\
+\ u_int32_t keysize;\
+\ u_int32_t datadlen;\
+\ u_int32_t datadoff;\
+\ u_int32_t dataulen;\
+\ u_int32_t dataflags;\
+\ void *datadata;\
+\ u_int32_t datasize;\
+\ u_int32_t flags;\
+\ __dbc_put_reply *replyp;\
+\ int * freep;\
+/* END __dbc_put_proc */
diff --git a/bdb/rpc_server/c/db_server_svc.c b/bdb/rpc_server/c/db_server_svc.c
new file mode 100644
index 00000000000..96dd959ec8c
--- /dev/null
+++ b/bdb/rpc_server/c/db_server_svc.c
@@ -0,0 +1,435 @@
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <rpc/rpc.h>
+#include <rpc/pmap_clnt.h>
+#include <stdio.h>
+#include <stdlib.h> /* getenv, exit */
+#include <memory.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+#ifdef DEBUG
+#define RPC_SVC_FG
+#endif
+
+static void
+db_rpc_serverprog_4001(rqstp, transp)
+ struct svc_req *rqstp;
+ register SVCXPRT *transp;
+{
+ union {
+ __env_cachesize_msg __db_env_cachesize_4001_arg;
+ __env_close_msg __db_env_close_4001_arg;
+ __env_create_msg __db_env_create_4001_arg;
+ __env_dbremove_msg __db_env_dbremove_4001_arg;
+ __env_dbrename_msg __db_env_dbrename_4001_arg;
+ __env_encrypt_msg __db_env_encrypt_4001_arg;
+ __env_flags_msg __db_env_flags_4001_arg;
+ __env_open_msg __db_env_open_4001_arg;
+ __env_remove_msg __db_env_remove_4001_arg;
+ __txn_abort_msg __db_txn_abort_4001_arg;
+ __txn_begin_msg __db_txn_begin_4001_arg;
+ __txn_commit_msg __db_txn_commit_4001_arg;
+ __txn_discard_msg __db_txn_discard_4001_arg;
+ __txn_prepare_msg __db_txn_prepare_4001_arg;
+ __txn_recover_msg __db_txn_recover_4001_arg;
+ __db_associate_msg __db_db_associate_4001_arg;
+ __db_bt_maxkey_msg __db_db_bt_maxkey_4001_arg;
+ __db_bt_minkey_msg __db_db_bt_minkey_4001_arg;
+ __db_close_msg __db_db_close_4001_arg;
+ __db_create_msg __db_db_create_4001_arg;
+ __db_del_msg __db_db_del_4001_arg;
+ __db_encrypt_msg __db_db_encrypt_4001_arg;
+ __db_extentsize_msg __db_db_extentsize_4001_arg;
+ __db_flags_msg __db_db_flags_4001_arg;
+ __db_get_msg __db_db_get_4001_arg;
+ __db_h_ffactor_msg __db_db_h_ffactor_4001_arg;
+ __db_h_nelem_msg __db_db_h_nelem_4001_arg;
+ __db_key_range_msg __db_db_key_range_4001_arg;
+ __db_lorder_msg __db_db_lorder_4001_arg;
+ __db_open_msg __db_db_open_4001_arg;
+ __db_pagesize_msg __db_db_pagesize_4001_arg;
+ __db_pget_msg __db_db_pget_4001_arg;
+ __db_put_msg __db_db_put_4001_arg;
+ __db_re_delim_msg __db_db_re_delim_4001_arg;
+ __db_re_len_msg __db_db_re_len_4001_arg;
+ __db_re_pad_msg __db_db_re_pad_4001_arg;
+ __db_remove_msg __db_db_remove_4001_arg;
+ __db_rename_msg __db_db_rename_4001_arg;
+ __db_stat_msg __db_db_stat_4001_arg;
+ __db_sync_msg __db_db_sync_4001_arg;
+ __db_truncate_msg __db_db_truncate_4001_arg;
+ __db_cursor_msg __db_db_cursor_4001_arg;
+ __db_join_msg __db_db_join_4001_arg;
+ __dbc_close_msg __db_dbc_close_4001_arg;
+ __dbc_count_msg __db_dbc_count_4001_arg;
+ __dbc_del_msg __db_dbc_del_4001_arg;
+ __dbc_dup_msg __db_dbc_dup_4001_arg;
+ __dbc_get_msg __db_dbc_get_4001_arg;
+ __dbc_pget_msg __db_dbc_pget_4001_arg;
+ __dbc_put_msg __db_dbc_put_4001_arg;
+ } argument;
+ char *result;
+ bool_t (*xdr_argument)(), (*xdr_result)();
+ char *(*local)();
+
+ switch (rqstp->rq_proc) {
+ case NULLPROC:
+ (void) svc_sendreply(transp, (xdrproc_t)xdr_void,
+ (char *)NULL);
+ return;
+
+ case __DB_env_cachesize:
+ xdr_argument = xdr___env_cachesize_msg;
+ xdr_result = xdr___env_cachesize_reply;
+ local = (char *(*)()) __db_env_cachesize_4001;
+ break;
+
+ case __DB_env_close:
+ xdr_argument = xdr___env_close_msg;
+ xdr_result = xdr___env_close_reply;
+ local = (char *(*)()) __db_env_close_4001;
+ break;
+
+ case __DB_env_create:
+ xdr_argument = xdr___env_create_msg;
+ xdr_result = xdr___env_create_reply;
+ local = (char *(*)()) __db_env_create_4001;
+ break;
+
+ case __DB_env_dbremove:
+ xdr_argument = xdr___env_dbremove_msg;
+ xdr_result = xdr___env_dbremove_reply;
+ local = (char *(*)()) __db_env_dbremove_4001;
+ break;
+
+ case __DB_env_dbrename:
+ xdr_argument = xdr___env_dbrename_msg;
+ xdr_result = xdr___env_dbrename_reply;
+ local = (char *(*)()) __db_env_dbrename_4001;
+ break;
+
+ case __DB_env_encrypt:
+ xdr_argument = xdr___env_encrypt_msg;
+ xdr_result = xdr___env_encrypt_reply;
+ local = (char *(*)()) __db_env_encrypt_4001;
+ break;
+
+ case __DB_env_flags:
+ xdr_argument = xdr___env_flags_msg;
+ xdr_result = xdr___env_flags_reply;
+ local = (char *(*)()) __db_env_flags_4001;
+ break;
+
+ case __DB_env_open:
+ xdr_argument = xdr___env_open_msg;
+ xdr_result = xdr___env_open_reply;
+ local = (char *(*)()) __db_env_open_4001;
+ break;
+
+ case __DB_env_remove:
+ xdr_argument = xdr___env_remove_msg;
+ xdr_result = xdr___env_remove_reply;
+ local = (char *(*)()) __db_env_remove_4001;
+ break;
+
+ case __DB_txn_abort:
+ xdr_argument = xdr___txn_abort_msg;
+ xdr_result = xdr___txn_abort_reply;
+ local = (char *(*)()) __db_txn_abort_4001;
+ break;
+
+ case __DB_txn_begin:
+ xdr_argument = xdr___txn_begin_msg;
+ xdr_result = xdr___txn_begin_reply;
+ local = (char *(*)()) __db_txn_begin_4001;
+ break;
+
+ case __DB_txn_commit:
+ xdr_argument = xdr___txn_commit_msg;
+ xdr_result = xdr___txn_commit_reply;
+ local = (char *(*)()) __db_txn_commit_4001;
+ break;
+
+ case __DB_txn_discard:
+ xdr_argument = xdr___txn_discard_msg;
+ xdr_result = xdr___txn_discard_reply;
+ local = (char *(*)()) __db_txn_discard_4001;
+ break;
+
+ case __DB_txn_prepare:
+ xdr_argument = xdr___txn_prepare_msg;
+ xdr_result = xdr___txn_prepare_reply;
+ local = (char *(*)()) __db_txn_prepare_4001;
+ break;
+
+ case __DB_txn_recover:
+ xdr_argument = xdr___txn_recover_msg;
+ xdr_result = xdr___txn_recover_reply;
+ local = (char *(*)()) __db_txn_recover_4001;
+ break;
+
+ case __DB_db_associate:
+ xdr_argument = xdr___db_associate_msg;
+ xdr_result = xdr___db_associate_reply;
+ local = (char *(*)()) __db_db_associate_4001;
+ break;
+
+ case __DB_db_bt_maxkey:
+ xdr_argument = xdr___db_bt_maxkey_msg;
+ xdr_result = xdr___db_bt_maxkey_reply;
+ local = (char *(*)()) __db_db_bt_maxkey_4001;
+ break;
+
+ case __DB_db_bt_minkey:
+ xdr_argument = xdr___db_bt_minkey_msg;
+ xdr_result = xdr___db_bt_minkey_reply;
+ local = (char *(*)()) __db_db_bt_minkey_4001;
+ break;
+
+ case __DB_db_close:
+ xdr_argument = xdr___db_close_msg;
+ xdr_result = xdr___db_close_reply;
+ local = (char *(*)()) __db_db_close_4001;
+ break;
+
+ case __DB_db_create:
+ xdr_argument = xdr___db_create_msg;
+ xdr_result = xdr___db_create_reply;
+ local = (char *(*)()) __db_db_create_4001;
+ break;
+
+ case __DB_db_del:
+ xdr_argument = xdr___db_del_msg;
+ xdr_result = xdr___db_del_reply;
+ local = (char *(*)()) __db_db_del_4001;
+ break;
+
+ case __DB_db_encrypt:
+ xdr_argument = xdr___db_encrypt_msg;
+ xdr_result = xdr___db_encrypt_reply;
+ local = (char *(*)()) __db_db_encrypt_4001;
+ break;
+
+ case __DB_db_extentsize:
+ xdr_argument = xdr___db_extentsize_msg;
+ xdr_result = xdr___db_extentsize_reply;
+ local = (char *(*)()) __db_db_extentsize_4001;
+ break;
+
+ case __DB_db_flags:
+ xdr_argument = xdr___db_flags_msg;
+ xdr_result = xdr___db_flags_reply;
+ local = (char *(*)()) __db_db_flags_4001;
+ break;
+
+ case __DB_db_get:
+ xdr_argument = xdr___db_get_msg;
+ xdr_result = xdr___db_get_reply;
+ local = (char *(*)()) __db_db_get_4001;
+ break;
+
+ case __DB_db_h_ffactor:
+ xdr_argument = xdr___db_h_ffactor_msg;
+ xdr_result = xdr___db_h_ffactor_reply;
+ local = (char *(*)()) __db_db_h_ffactor_4001;
+ break;
+
+ case __DB_db_h_nelem:
+ xdr_argument = xdr___db_h_nelem_msg;
+ xdr_result = xdr___db_h_nelem_reply;
+ local = (char *(*)()) __db_db_h_nelem_4001;
+ break;
+
+ case __DB_db_key_range:
+ xdr_argument = xdr___db_key_range_msg;
+ xdr_result = xdr___db_key_range_reply;
+ local = (char *(*)()) __db_db_key_range_4001;
+ break;
+
+ case __DB_db_lorder:
+ xdr_argument = xdr___db_lorder_msg;
+ xdr_result = xdr___db_lorder_reply;
+ local = (char *(*)()) __db_db_lorder_4001;
+ break;
+
+ case __DB_db_open:
+ xdr_argument = xdr___db_open_msg;
+ xdr_result = xdr___db_open_reply;
+ local = (char *(*)()) __db_db_open_4001;
+ break;
+
+ case __DB_db_pagesize:
+ xdr_argument = xdr___db_pagesize_msg;
+ xdr_result = xdr___db_pagesize_reply;
+ local = (char *(*)()) __db_db_pagesize_4001;
+ break;
+
+ case __DB_db_pget:
+ xdr_argument = xdr___db_pget_msg;
+ xdr_result = xdr___db_pget_reply;
+ local = (char *(*)()) __db_db_pget_4001;
+ break;
+
+ case __DB_db_put:
+ xdr_argument = xdr___db_put_msg;
+ xdr_result = xdr___db_put_reply;
+ local = (char *(*)()) __db_db_put_4001;
+ break;
+
+ case __DB_db_re_delim:
+ xdr_argument = xdr___db_re_delim_msg;
+ xdr_result = xdr___db_re_delim_reply;
+ local = (char *(*)()) __db_db_re_delim_4001;
+ break;
+
+ case __DB_db_re_len:
+ xdr_argument = xdr___db_re_len_msg;
+ xdr_result = xdr___db_re_len_reply;
+ local = (char *(*)()) __db_db_re_len_4001;
+ break;
+
+ case __DB_db_re_pad:
+ xdr_argument = xdr___db_re_pad_msg;
+ xdr_result = xdr___db_re_pad_reply;
+ local = (char *(*)()) __db_db_re_pad_4001;
+ break;
+
+ case __DB_db_remove:
+ xdr_argument = xdr___db_remove_msg;
+ xdr_result = xdr___db_remove_reply;
+ local = (char *(*)()) __db_db_remove_4001;
+ break;
+
+ case __DB_db_rename:
+ xdr_argument = xdr___db_rename_msg;
+ xdr_result = xdr___db_rename_reply;
+ local = (char *(*)()) __db_db_rename_4001;
+ break;
+
+ case __DB_db_stat:
+ xdr_argument = xdr___db_stat_msg;
+ xdr_result = xdr___db_stat_reply;
+ local = (char *(*)()) __db_db_stat_4001;
+ break;
+
+ case __DB_db_sync:
+ xdr_argument = xdr___db_sync_msg;
+ xdr_result = xdr___db_sync_reply;
+ local = (char *(*)()) __db_db_sync_4001;
+ break;
+
+ case __DB_db_truncate:
+ xdr_argument = xdr___db_truncate_msg;
+ xdr_result = xdr___db_truncate_reply;
+ local = (char *(*)()) __db_db_truncate_4001;
+ break;
+
+ case __DB_db_cursor:
+ xdr_argument = xdr___db_cursor_msg;
+ xdr_result = xdr___db_cursor_reply;
+ local = (char *(*)()) __db_db_cursor_4001;
+ break;
+
+ case __DB_db_join:
+ xdr_argument = xdr___db_join_msg;
+ xdr_result = xdr___db_join_reply;
+ local = (char *(*)()) __db_db_join_4001;
+ break;
+
+ case __DB_dbc_close:
+ xdr_argument = xdr___dbc_close_msg;
+ xdr_result = xdr___dbc_close_reply;
+ local = (char *(*)()) __db_dbc_close_4001;
+ break;
+
+ case __DB_dbc_count:
+ xdr_argument = xdr___dbc_count_msg;
+ xdr_result = xdr___dbc_count_reply;
+ local = (char *(*)()) __db_dbc_count_4001;
+ break;
+
+ case __DB_dbc_del:
+ xdr_argument = xdr___dbc_del_msg;
+ xdr_result = xdr___dbc_del_reply;
+ local = (char *(*)()) __db_dbc_del_4001;
+ break;
+
+ case __DB_dbc_dup:
+ xdr_argument = xdr___dbc_dup_msg;
+ xdr_result = xdr___dbc_dup_reply;
+ local = (char *(*)()) __db_dbc_dup_4001;
+ break;
+
+ case __DB_dbc_get:
+ xdr_argument = xdr___dbc_get_msg;
+ xdr_result = xdr___dbc_get_reply;
+ local = (char *(*)()) __db_dbc_get_4001;
+ break;
+
+ case __DB_dbc_pget:
+ xdr_argument = xdr___dbc_pget_msg;
+ xdr_result = xdr___dbc_pget_reply;
+ local = (char *(*)()) __db_dbc_pget_4001;
+ break;
+
+ case __DB_dbc_put:
+ xdr_argument = xdr___dbc_put_msg;
+ xdr_result = xdr___dbc_put_reply;
+ local = (char *(*)()) __db_dbc_put_4001;
+ break;
+
+ default:
+ svcerr_noproc(transp);
+ return;
+ }
+ (void) memset((char *)&argument, 0, sizeof (argument));
+ if (!svc_getargs(transp, (xdrproc_t)xdr_argument, (char *)&argument)) {
+ svcerr_decode(transp);
+ return;
+ }
+ result = (*local)(&argument, rqstp);
+ if (result != NULL && !svc_sendreply(transp, (xdrproc_t)xdr_result, result)) {
+ svcerr_systemerr(transp);
+ }
+ if (!svc_freeargs(transp, (xdrproc_t)xdr_argument, (char *)&argument)) {
+ fprintf(stderr, "unable to free arguments");
+ exit(1);
+ }
+ __dbsrv_timeout(0);
+ return;
+}
+
+void __dbsrv_main()
+{
+ register SVCXPRT *transp;
+
+ (void) pmap_unset(DB_RPC_SERVERPROG, DB_RPC_SERVERVERS);
+
+ transp = svctcp_create(RPC_ANYSOCK, 0, 0);
+ if (transp == NULL) {
+ fprintf(stderr, "cannot create tcp service.");
+ exit(1);
+ }
+ if (!svc_register(transp, DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, db_rpc_serverprog_4001, IPPROTO_TCP)) {
+ fprintf(stderr, "unable to register (DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, tcp).");
+ exit(1);
+ }
+
+ svc_run();
+ fprintf(stderr, "svc_run returned");
+ exit(1);
+ /* NOTREACHED */
+}
diff --git a/bdb/rpc_server/db_server_util.c b/bdb/rpc_server/c/db_server_util.c
index 862bbd05efb..2ea270c2d19 100644
--- a/bdb/rpc_server/db_server_util.c
+++ b/bdb/rpc_server/c/db_server_util.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 2000
+ * Copyright (c) 2000-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: db_server_util.c,v 1.32 2001/01/18 18:36:59 bostic Exp $";
+static const char revid[] = "$Id: db_server_util.c,v 1.59 2002/03/27 04:32:50 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -34,16 +34,17 @@ static const char revid[] = "$Id: db_server_util.c,v 1.32 2001/01/18 18:36:59 bo
#include <string.h>
#include <unistd.h>
#endif
-#include "db_server.h"
+#include "dbinc_auto/db_server.h"
#include "db_int.h"
-#include "clib_ext.h"
-#include "db_server_int.h"
-#include "rpc_server_ext.h"
-#include "common_ext.h"
+#include "dbinc_auto/clib_ext.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
extern int __dbsrv_main __P((void));
static int add_home __P((char *));
+static int add_passwd __P((char *));
static int env_recover __P((char *));
static void __dbclear_child __P((ct_entry *));
@@ -66,14 +67,15 @@ main(argc, argv)
char **argv;
{
extern char *optarg;
- extern int optind;
CLIENT *cl;
int ch, ret;
+ char *passwd;
prog = argv[0];
version_check();
+ ret = 0;
/*
* Check whether another server is running or not. There
* is a race condition where two servers could be racing to
@@ -87,37 +89,54 @@ main(argc, argv)
* time on the same environments.
*/
if ((cl = clnt_create("localhost",
- DB_SERVERPROG, DB_SERVERVERS, "tcp")) != NULL) {
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
fprintf(stderr,
"%s: Berkeley DB RPC server already running.\n", prog);
clnt_destroy(cl);
- exit(1);
+ return (EXIT_FAILURE);
}
LIST_INIT(&__dbsrv_home);
- while ((ch = getopt(argc, argv, "h:I:L:t:T:Vv")) != EOF)
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
switch (ch) {
case 'h':
(void)add_home(optarg);
break;
case 'I':
- (void)__db_getlong(NULL, prog, optarg, 1,
- LONG_MAX, &__dbsrv_idleto);
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
break;
case 'L':
logfile = optarg;
break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
case 't':
- (void)__db_getlong(NULL, prog, optarg, 1,
- LONG_MAX, &__dbsrv_defto);
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
break;
case 'T':
- (void)__db_getlong(NULL, prog, optarg, 1,
- LONG_MAX, &__dbsrv_maxto);
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
break;
case 'V':
printf("%s\n", db_version(NULL, NULL, NULL));
- exit(0);
+ return (EXIT_SUCCESS);
case 'v':
__dbsrv_verbose = 1;
break;
@@ -135,7 +154,8 @@ main(argc, argv)
* It would be bad to timeout environments sooner than txns.
*/
if (__dbsrv_defto > __dbsrv_idleto)
-printf("%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
prog, __dbsrv_idleto, __dbsrv_defto);
LIST_INIT(&__dbsrv_head);
@@ -149,14 +169,14 @@ printf("%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
#endif
if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
- exit(1);
+ return (EXIT_FAILURE);
/*
* Now that we are ready to start, run recovery on all the
* environments specified.
*/
- if ((ret = env_recover(prog)) != 0)
- exit(1);
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
/*
* We've done our setup, now call the generated server loop
@@ -174,9 +194,9 @@ usage(prog)
char *prog;
{
fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
- "[-Vv] [-h home]",
+ "[-Vv] [-h home] [-P passwd]",
"[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
- exit(1);
+ exit(EXIT_FAILURE);
}
static void
@@ -192,7 +212,7 @@ version_check()
"%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
DB_VERSION_PATCH, v_major, v_minor, v_patch);
- exit (1);
+ exit(EXIT_FAILURE);
}
}
@@ -220,7 +240,6 @@ __dbsrv_timeout(force)
int force;
{
static long to_hint = -1;
- DBC *dbcp;
time_t t;
long to;
ct_entry *ctp, *nextctp;
@@ -258,7 +277,8 @@ __dbsrv_timeout(force)
if (__dbsrv_verbose)
printf("Timing out txn id %ld\n",
ctp->ct_id);
- (void)txn_abort((DB_TXN *)ctp->ct_anyp);
+ (void)((DB_TXN *)ctp->ct_anyp)->
+ abort((DB_TXN *)ctp->ct_anyp);
__dbdel_ctp(ctp);
/*
* If we timed out an txn, we may have closed
@@ -278,7 +298,6 @@ __dbsrv_timeout(force)
if (__dbsrv_verbose)
printf("Timing out cursor %ld\n",
ctp->ct_id);
- dbcp = (DBC *)ctp->ct_anyp;
(void)__dbc_close_int(ctp);
/*
* Start over with a guaranteed good ctp.
@@ -305,7 +324,7 @@ __dbsrv_timeout(force)
if (to < t || force) {
if (__dbsrv_verbose)
printf("Timing out env id %ld\n", ctp->ct_id);
- (void)__dbenv_close_int(ctp->ct_id, 0);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
/*
* If we timed out an env, we may have closed
* all sorts of ctp's (maybe even all of them.
@@ -352,7 +371,7 @@ __dbclear_ctp(ctp)
ct_entry *ctp;
{
LIST_REMOVE(ctp, entries);
- __os_free(ctp, sizeof(ct_entry));
+ __os_free(NULL, ctp);
}
/*
@@ -367,20 +386,21 @@ __dbdel_ctp(parent)
}
/*
- * PUBLIC: ct_entry *new_ct_ent __P((u_int32_t *));
+ * PUBLIC: ct_entry *new_ct_ent __P((int *));
*/
ct_entry *
new_ct_ent(errp)
- u_int32_t *errp;
+ int *errp;
{
time_t t;
ct_entry *ctp, *octp;
int ret;
- if ((ret = __os_malloc(NULL, sizeof(ct_entry), NULL, &ctp)) != 0) {
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
*errp = ret;
return (NULL);
}
+ memset(ctp, 0, sizeof(ct_entry));
/*
* Get the time as ID. We may service more than one request per
* second however. If we are, then increment id value until we
@@ -389,8 +409,8 @@ new_ct_ent(errp)
* we know for certain that we can use our entry.
*/
if ((t = time(NULL)) == -1) {
- *errp = t;
- __os_free(ctp, sizeof(ct_entry));
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
return (NULL);
}
octp = LIST_FIRST(&__dbsrv_head);
@@ -400,6 +420,7 @@ new_ct_ent(errp)
ctp->ct_idle = __dbsrv_idleto;
ctp->ct_activep = &ctp->ct_active;
ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
return (ctp);
@@ -422,6 +443,116 @@ get_tableent(id)
}
/*
+ * PUBLIC: ct_entry *__dbsrv_sharedb __P((ct_entry *, const char *,
+ * PUBLIC: const char *, DBTYPE, u_int32_t));
+ */
+ct_entry *
+__dbsrv_sharedb(db_ctp, name, subdb, type, flags)
+ ct_entry *db_ctp;
+ const char *name, *subdb;
+ DBTYPE type;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
+ * PUBLIC: ct_entry *__dbsrv_shareenv __P((ct_entry *, home_entry *, u_int32_t));
+ */
+ct_entry *
+__dbsrv_shareenv(env_ctp, home, flags)
+ ct_entry *env_ctp;
+ home_entry *home;
+ u_int32_t flags;
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+/*
* PUBLIC: void __dbsrv_active __P((ct_entry *));
*/
void
@@ -443,6 +574,37 @@ __dbsrv_active(ctp)
}
/*
+ * PUBLIC: int __db_close_int __P((long, u_int32_t));
+ */
+int
+__db_close_int(id, flags)
+ long id;
+ u_int32_t flags;
+{
+ DB *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(dbp, flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+/*
* PUBLIC: int __dbc_close_int __P((ct_entry *));
*/
int
@@ -480,22 +642,35 @@ __dbc_close_int(dbc_ctp)
}
/*
- * PUBLIC: int __dbenv_close_int __P((long, int));
+ * PUBLIC: int __dbenv_close_int __P((long, u_int32_t, int));
*/
int
-__dbenv_close_int(id, flags)
+__dbenv_close_int(id, flags, force)
long id;
- int flags;
+ u_int32_t flags;
+ int force;
{
DB_ENV *dbenv;
int ret;
ct_entry *ctp;
+ ret = 0;
ctp = get_tableent(id);
if (ctp == NULL)
return (DB_NOSERVER_ID);
DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
ret = dbenv->close(dbenv, flags);
__dbdel_ctp(ctp);
@@ -509,12 +684,13 @@ add_home(home)
home_entry *hp, *homep;
int ret;
- if ((ret = __os_malloc(NULL, sizeof(home_entry), NULL, &hp)) != 0)
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
return (ret);
- if ((ret = __os_malloc(NULL, strlen(home)+1, NULL, &hp->home)) != 0)
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
return (ret);
memcpy(hp->home, home, strlen(home)+1);
hp->dir = home;
+ hp->passwd = NULL;
/*
* This loop is to remove any trailing path separators,
* to assure hp->name points to the last component.
@@ -536,6 +712,8 @@ add_home(home)
if (strcmp(homep->name, hp->name) == 0) {
printf("Already added home name %s, at directory %s\n",
hp->name, homep->dir);
+ __os_free(NULL, hp->home);
+ __os_free(NULL, hp);
return (-1);
}
LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
@@ -544,10 +722,32 @@ add_home(home)
return (0);
}
+static int
+add_passwd(passwd)
+ char *passwd;
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
/*
- * PUBLIC: char *get_home __P((char *));
+ * PUBLIC: home_entry *get_home __P((char *));
*/
-char *
+home_entry *
get_home(name)
char *name;
{
@@ -556,7 +756,7 @@ get_home(name)
for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
hp = LIST_NEXT(hp, entries))
if (strcmp(name, hp->name) == 0)
- return (hp->home);
+ return (hp);
return (NULL);
}
@@ -575,7 +775,7 @@ env_recover(progname)
if ((ret = db_env_create(&dbenv, 0)) != 0) {
fprintf(stderr, "%s: db_env_create: %s\n",
progname, db_strerror(ret));
- exit(1);
+ exit(EXIT_FAILURE);
}
if (__dbsrv_verbose == 1) {
(void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
@@ -583,6 +783,9 @@ env_recover(progname)
}
dbenv->set_errfile(dbenv, stderr);
dbenv->set_errpfx(dbenv, progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(dbenv, hp->passwd,
+ DB_ENCRYPT_AES);
/*
* Initialize the env with DB_RECOVER. That is all we
@@ -591,9 +794,9 @@ env_recover(progname)
if (__dbsrv_verbose)
printf("Running recovery on %s\n", hp->home);
flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
- DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON | DB_RECOVER;
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
if ((ret = dbenv->open(dbenv, hp->home, flags, 0)) != 0) {
- dbenv->err(dbenv, ret, "DBENV->open");
+ dbenv->err(dbenv, ret, "DB_ENV->open");
goto error;
}
diff --git a/bdb/rpc_server/c/db_server_xdr.c b/bdb/rpc_server/c/db_server_xdr.c
new file mode 100644
index 00000000000..bfe2b6c09c7
--- /dev/null
+++ b/bdb/rpc_server/c/db_server_xdr.c
@@ -0,0 +1,1512 @@
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+/*
+ * Please do not edit this file.
+ * It was generated using rpcgen.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <rpc/rpc.h>
+
+#include <strings.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+
+bool_t
+xdr___env_cachesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->gbytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->bytes))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ncache))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_cachesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_cachesize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->timeout))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_create_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbremove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbremove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbremove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbremove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbrename_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbrename_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->newname, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_dbrename_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_dbrename_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_encrypt_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_encrypt_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->passwd, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_encrypt_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_encrypt_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->onoff))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_flags_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_open_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->envcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->home, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___env_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __env_remove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_abort_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_abort_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->parentcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_begin_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_begin_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_commit_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_commit_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_discard_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_discard_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_discard_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_discard_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_prepare_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_prepare_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_opaque(xdrs, objp->gid, 128))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_prepare_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_prepare_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_recover_msg(xdrs, objp)
+ register XDR *xdrs;
+ __txn_recover_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->count))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___txn_recover_reply(xdrs, objp)
+ register XDR *xdrs;
+ __txn_recover_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->txn.txn_val, (u_int *) &objp->txn.txn_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->gid.gid_val, (u_int *) &objp->gid.gid_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->retcount))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_associate_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_associate_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->sdbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_associate_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_associate_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->maxkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_maxkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_maxkey_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->minkey))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_bt_minkey_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_bt_minkey_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbenvcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_create_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_create_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_del_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_encrypt_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_encrypt_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->passwd, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_encrypt_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_encrypt_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->extentsize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_extentsize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_extentsize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_flags_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_flags_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_get_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->ffactor))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_ffactor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_ffactor_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->nelem))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_h_nelem_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_h_nelem_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_key_range_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_key_range_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->less))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->equal))
+ return (FALSE);
+ if (!xdr_double(xdrs, &objp->greater))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->lorder))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_lorder_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_lorder_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->mode))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_open_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_open_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->type))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbflags))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->lorder))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pagesize))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pagesize_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_pagesize_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pget_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_pget_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_pget_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_pget_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_put_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->delim))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_delim_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_delim_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->len))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_len_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_len_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pad))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_re_pad_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_re_pad_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_remove_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_remove_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->name, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->subdb, ~0))
+ return (FALSE);
+ if (!xdr_string(xdrs, &objp->newname, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_rename_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_rename_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_stat_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_stat_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->stats.stats_val, (u_int *) &objp->stats.stats_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_sync_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_sync_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_truncate_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_truncate_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_truncate_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_truncate_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->count))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->txnpcl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_cursor_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_cursor_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_msg(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbpcl_id))
+ return (FALSE);
+ if (!xdr_array(xdrs, (char **)&objp->curs.curs_val, (u_int *) &objp->curs.curs_len, ~0,
+ sizeof (u_int), (xdrproc_t) xdr_u_int))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___db_join_reply(xdrs, objp)
+ register XDR *xdrs;
+ __db_join_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_close_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_close_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_count_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_count_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dupcount))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_del_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_del_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_dup_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_dup_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dbcidcl_id))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_get_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_get_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_pget_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_pget_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->skeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->pkeyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_pget_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_pget_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->skeydata.skeydata_val, (u_int *) &objp->skeydata.skeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->pkeydata.pkeydata_val, (u_int *) &objp->pkeydata.pkeydata_len, ~0))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_msg(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_msg *objp;
+{
+
+ if (!xdr_u_int(xdrs, &objp->dbccl_id))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keydoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->keyflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadlen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->datadoff))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataulen))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->dataflags))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->datadata.datadata_val, (u_int *) &objp->datadata.datadata_len, ~0))
+ return (FALSE);
+ if (!xdr_u_int(xdrs, &objp->flags))
+ return (FALSE);
+ return (TRUE);
+}
+
+bool_t
+xdr___dbc_put_reply(xdrs, objp)
+ register XDR *xdrs;
+ __dbc_put_reply *objp;
+{
+
+ if (!xdr_int(xdrs, &objp->status))
+ return (FALSE);
+ if (!xdr_bytes(xdrs, (char **)&objp->keydata.keydata_val, (u_int *) &objp->keydata.keydata_len, ~0))
+ return (FALSE);
+ return (TRUE);
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/c/gen_db_server.c b/bdb/rpc_server/c/gen_db_server.c
new file mode 100644
index 00000000000..0181fb06dce
--- /dev/null
+++ b/bdb/rpc_server/c/gen_db_server.c
@@ -0,0 +1,1169 @@
+/* Do not edit: automatically built by gen_rpc.awk. */
+#include "db_config.h"
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+#include <rpc/xdr.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc_auto/db_server.h"
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+
+/*
+ * PUBLIC: __env_cachesize_reply *__db_env_cachesize_4001
+ * PUBLIC: __P((__env_cachesize_msg *, struct svc_req *));
+ */
+__env_cachesize_reply *
+__db_env_cachesize_4001(msg, req)
+ __env_cachesize_msg *msg;
+ struct svc_req *req;
+{
+ static __env_cachesize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_cachesize_proc(msg->dbenvcl_id,
+ msg->gbytes,
+ msg->bytes,
+ msg->ncache,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_close_reply *__db_env_close_4001 __P((__env_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_close_reply *
+__db_env_close_4001(msg, req)
+ __env_close_msg *msg;
+ struct svc_req *req;
+{
+ static __env_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_close_proc(msg->dbenvcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_create_reply *__db_env_create_4001 __P((__env_create_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_create_reply *
+__db_env_create_4001(msg, req)
+ __env_create_msg *msg;
+ struct svc_req *req;
+{
+ static __env_create_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_create_proc(msg->timeout,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_dbremove_reply *__db_env_dbremove_4001
+ * PUBLIC: __P((__env_dbremove_msg *, struct svc_req *));
+ */
+__env_dbremove_reply *
+__db_env_dbremove_4001(msg, req)
+ __env_dbremove_msg *msg;
+ struct svc_req *req;
+{
+ static __env_dbremove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_dbremove_proc(msg->dbenvcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_dbrename_reply *__db_env_dbrename_4001
+ * PUBLIC: __P((__env_dbrename_msg *, struct svc_req *));
+ */
+__env_dbrename_reply *
+__db_env_dbrename_4001(msg, req)
+ __env_dbrename_msg *msg;
+ struct svc_req *req;
+{
+ static __env_dbrename_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_dbrename_proc(msg->dbenvcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ (*msg->newname == '\0') ? NULL : msg->newname,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_encrypt_reply *__db_env_encrypt_4001
+ * PUBLIC: __P((__env_encrypt_msg *, struct svc_req *));
+ */
+__env_encrypt_reply *
+__db_env_encrypt_4001(msg, req)
+ __env_encrypt_msg *msg;
+ struct svc_req *req;
+{
+ static __env_encrypt_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_encrypt_proc(msg->dbenvcl_id,
+ (*msg->passwd == '\0') ? NULL : msg->passwd,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_flags_reply *__db_env_flags_4001 __P((__env_flags_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_flags_reply *
+__db_env_flags_4001(msg, req)
+ __env_flags_msg *msg;
+ struct svc_req *req;
+{
+ static __env_flags_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_flags_proc(msg->dbenvcl_id,
+ msg->flags,
+ msg->onoff,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_open_reply *__db_env_open_4001 __P((__env_open_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_open_reply *
+__db_env_open_4001(msg, req)
+ __env_open_msg *msg;
+ struct svc_req *req;
+{
+ static __env_open_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_open_proc(msg->dbenvcl_id,
+ (*msg->home == '\0') ? NULL : msg->home,
+ msg->flags,
+ msg->mode,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __env_remove_reply *__db_env_remove_4001 __P((__env_remove_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__env_remove_reply *
+__db_env_remove_4001(msg, req)
+ __env_remove_msg *msg;
+ struct svc_req *req;
+{
+ static __env_remove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __env_remove_proc(msg->dbenvcl_id,
+ (*msg->home == '\0') ? NULL : msg->home,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_abort_reply *__db_txn_abort_4001 __P((__txn_abort_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_abort_reply *
+__db_txn_abort_4001(msg, req)
+ __txn_abort_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_abort_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_abort_proc(msg->txnpcl_id,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_begin_reply *__db_txn_begin_4001 __P((__txn_begin_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_begin_reply *
+__db_txn_begin_4001(msg, req)
+ __txn_begin_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_begin_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_begin_proc(msg->dbenvcl_id,
+ msg->parentcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_commit_reply *__db_txn_commit_4001 __P((__txn_commit_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__txn_commit_reply *
+__db_txn_commit_4001(msg, req)
+ __txn_commit_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_commit_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_commit_proc(msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_discard_reply *__db_txn_discard_4001
+ * PUBLIC: __P((__txn_discard_msg *, struct svc_req *));
+ */
+__txn_discard_reply *
+__db_txn_discard_4001(msg, req)
+ __txn_discard_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_discard_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_discard_proc(msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_prepare_reply *__db_txn_prepare_4001
+ * PUBLIC: __P((__txn_prepare_msg *, struct svc_req *));
+ */
+__txn_prepare_reply *
+__db_txn_prepare_4001(msg, req)
+ __txn_prepare_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_prepare_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __txn_prepare_proc(msg->txnpcl_id,
+ msg->gid,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __txn_recover_reply *__db_txn_recover_4001
+ * PUBLIC: __P((__txn_recover_msg *, struct svc_req *));
+ */
+__txn_recover_reply *
+__db_txn_recover_4001(msg, req)
+ __txn_recover_msg *msg;
+ struct svc_req *req;
+{
+ static __txn_recover_reply reply; /* must be static */
+ static int __txn_recover_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__txn_recover_free)
+ xdr_free((xdrproc_t)xdr___txn_recover_reply, (void *)&reply);
+ __txn_recover_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.txn.txn_val = NULL;
+ reply.gid.gid_val = NULL;
+
+ __txn_recover_proc(msg->dbenvcl_id,
+ msg->count,
+ msg->flags,
+ &reply,
+ &__txn_recover_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_associate_reply *__db_db_associate_4001
+ * PUBLIC: __P((__db_associate_msg *, struct svc_req *));
+ */
+__db_associate_reply *
+__db_db_associate_4001(msg, req)
+ __db_associate_msg *msg;
+ struct svc_req *req;
+{
+ static __db_associate_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_associate_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->sdbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_bt_maxkey_reply *__db_db_bt_maxkey_4001
+ * PUBLIC: __P((__db_bt_maxkey_msg *, struct svc_req *));
+ */
+__db_bt_maxkey_reply *
+__db_db_bt_maxkey_4001(msg, req)
+ __db_bt_maxkey_msg *msg;
+ struct svc_req *req;
+{
+ static __db_bt_maxkey_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_bt_maxkey_proc(msg->dbpcl_id,
+ msg->maxkey,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_bt_minkey_reply *__db_db_bt_minkey_4001
+ * PUBLIC: __P((__db_bt_minkey_msg *, struct svc_req *));
+ */
+__db_bt_minkey_reply *
+__db_db_bt_minkey_4001(msg, req)
+ __db_bt_minkey_msg *msg;
+ struct svc_req *req;
+{
+ static __db_bt_minkey_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_bt_minkey_proc(msg->dbpcl_id,
+ msg->minkey,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_close_reply *__db_db_close_4001 __P((__db_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_close_reply *
+__db_db_close_4001(msg, req)
+ __db_close_msg *msg;
+ struct svc_req *req;
+{
+ static __db_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_close_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_create_reply *__db_db_create_4001 __P((__db_create_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_create_reply *
+__db_db_create_4001(msg, req)
+ __db_create_msg *msg;
+ struct svc_req *req;
+{
+ static __db_create_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_create_proc(msg->dbenvcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_del_reply *__db_db_del_4001 __P((__db_del_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_del_reply *
+__db_db_del_4001(msg, req)
+ __db_del_msg *msg;
+ struct svc_req *req;
+{
+ static __db_del_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_del_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_encrypt_reply *__db_db_encrypt_4001 __P((__db_encrypt_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_encrypt_reply *
+__db_db_encrypt_4001(msg, req)
+ __db_encrypt_msg *msg;
+ struct svc_req *req;
+{
+ static __db_encrypt_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_encrypt_proc(msg->dbpcl_id,
+ (*msg->passwd == '\0') ? NULL : msg->passwd,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_extentsize_reply *__db_db_extentsize_4001
+ * PUBLIC: __P((__db_extentsize_msg *, struct svc_req *));
+ */
+__db_extentsize_reply *
+__db_db_extentsize_4001(msg, req)
+ __db_extentsize_msg *msg;
+ struct svc_req *req;
+{
+ static __db_extentsize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_extentsize_proc(msg->dbpcl_id,
+ msg->extentsize,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_flags_reply *__db_db_flags_4001 __P((__db_flags_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_flags_reply *
+__db_db_flags_4001(msg, req)
+ __db_flags_msg *msg;
+ struct svc_req *req;
+{
+ static __db_flags_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_flags_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_get_reply *__db_db_get_4001 __P((__db_get_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_get_reply *
+__db_db_get_4001(msg, req)
+ __db_get_msg *msg;
+ struct svc_req *req;
+{
+ static __db_get_reply reply; /* must be static */
+ static int __db_get_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_get_free)
+ xdr_free((xdrproc_t)xdr___db_get_reply, (void *)&reply);
+ __db_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __db_get_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_get_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_h_ffactor_reply *__db_db_h_ffactor_4001
+ * PUBLIC: __P((__db_h_ffactor_msg *, struct svc_req *));
+ */
+__db_h_ffactor_reply *
+__db_db_h_ffactor_4001(msg, req)
+ __db_h_ffactor_msg *msg;
+ struct svc_req *req;
+{
+ static __db_h_ffactor_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_h_ffactor_proc(msg->dbpcl_id,
+ msg->ffactor,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_h_nelem_reply *__db_db_h_nelem_4001 __P((__db_h_nelem_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_h_nelem_reply *
+__db_db_h_nelem_4001(msg, req)
+ __db_h_nelem_msg *msg;
+ struct svc_req *req;
+{
+ static __db_h_nelem_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_h_nelem_proc(msg->dbpcl_id,
+ msg->nelem,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_key_range_reply *__db_db_key_range_4001
+ * PUBLIC: __P((__db_key_range_msg *, struct svc_req *));
+ */
+__db_key_range_reply *
+__db_db_key_range_4001(msg, req)
+ __db_key_range_msg *msg;
+ struct svc_req *req;
+{
+ static __db_key_range_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_key_range_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_lorder_reply *__db_db_lorder_4001 __P((__db_lorder_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_lorder_reply *
+__db_db_lorder_4001(msg, req)
+ __db_lorder_msg *msg;
+ struct svc_req *req;
+{
+ static __db_lorder_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_lorder_proc(msg->dbpcl_id,
+ msg->lorder,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_open_reply *__db_db_open_4001 __P((__db_open_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_open_reply *
+__db_db_open_4001(msg, req)
+ __db_open_msg *msg;
+ struct svc_req *req;
+{
+ static __db_open_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_open_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->type,
+ msg->flags,
+ msg->mode,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_pagesize_reply *__db_db_pagesize_4001
+ * PUBLIC: __P((__db_pagesize_msg *, struct svc_req *));
+ */
+__db_pagesize_reply *
+__db_db_pagesize_4001(msg, req)
+ __db_pagesize_msg *msg;
+ struct svc_req *req;
+{
+ static __db_pagesize_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_pagesize_proc(msg->dbpcl_id,
+ msg->pagesize,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_pget_reply *__db_db_pget_4001 __P((__db_pget_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_pget_reply *
+__db_db_pget_4001(msg, req)
+ __db_pget_msg *msg;
+ struct svc_req *req;
+{
+ static __db_pget_reply reply; /* must be static */
+ static int __db_pget_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_pget_free)
+ xdr_free((xdrproc_t)xdr___db_pget_reply, (void *)&reply);
+ __db_pget_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.skeydata.skeydata_val = NULL;
+ reply.pkeydata.pkeydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __db_pget_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->skeydlen,
+ msg->skeydoff,
+ msg->skeyulen,
+ msg->skeyflags,
+ msg->skeydata.skeydata_val,
+ msg->skeydata.skeydata_len,
+ msg->pkeydlen,
+ msg->pkeydoff,
+ msg->pkeyulen,
+ msg->pkeyflags,
+ msg->pkeydata.pkeydata_val,
+ msg->pkeydata.pkeydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_pget_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_put_reply *__db_db_put_4001 __P((__db_put_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_put_reply *
+__db_db_put_4001(msg, req)
+ __db_put_msg *msg;
+ struct svc_req *req;
+{
+ static __db_put_reply reply; /* must be static */
+ static int __db_put_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_put_free)
+ xdr_free((xdrproc_t)xdr___db_put_reply, (void *)&reply);
+ __db_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __db_put_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__db_put_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_delim_reply *__db_db_re_delim_4001
+ * PUBLIC: __P((__db_re_delim_msg *, struct svc_req *));
+ */
+__db_re_delim_reply *
+__db_db_re_delim_4001(msg, req)
+ __db_re_delim_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_delim_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_delim_proc(msg->dbpcl_id,
+ msg->delim,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_len_reply *__db_db_re_len_4001 __P((__db_re_len_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_re_len_reply *
+__db_db_re_len_4001(msg, req)
+ __db_re_len_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_len_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_len_proc(msg->dbpcl_id,
+ msg->len,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_re_pad_reply *__db_db_re_pad_4001 __P((__db_re_pad_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_re_pad_reply *
+__db_db_re_pad_4001(msg, req)
+ __db_re_pad_msg *msg;
+ struct svc_req *req;
+{
+ static __db_re_pad_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_re_pad_proc(msg->dbpcl_id,
+ msg->pad,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_remove_reply *__db_db_remove_4001 __P((__db_remove_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_remove_reply *
+__db_db_remove_4001(msg, req)
+ __db_remove_msg *msg;
+ struct svc_req *req;
+{
+ static __db_remove_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_remove_proc(msg->dbpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_rename_reply *__db_db_rename_4001 __P((__db_rename_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_rename_reply *
+__db_db_rename_4001(msg, req)
+ __db_rename_msg *msg;
+ struct svc_req *req;
+{
+ static __db_rename_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_rename_proc(msg->dbpcl_id,
+ (*msg->name == '\0') ? NULL : msg->name,
+ (*msg->subdb == '\0') ? NULL : msg->subdb,
+ (*msg->newname == '\0') ? NULL : msg->newname,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_stat_reply *__db_db_stat_4001 __P((__db_stat_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_stat_reply *
+__db_db_stat_4001(msg, req)
+ __db_stat_msg *msg;
+ struct svc_req *req;
+{
+ static __db_stat_reply reply; /* must be static */
+ static int __db_stat_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__db_stat_free)
+ xdr_free((xdrproc_t)xdr___db_stat_reply, (void *)&reply);
+ __db_stat_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.stats.stats_val = NULL;
+
+ __db_stat_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply,
+ &__db_stat_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_sync_reply *__db_db_sync_4001 __P((__db_sync_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_sync_reply *
+__db_db_sync_4001(msg, req)
+ __db_sync_msg *msg;
+ struct svc_req *req;
+{
+ static __db_sync_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_sync_proc(msg->dbpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_truncate_reply *__db_db_truncate_4001
+ * PUBLIC: __P((__db_truncate_msg *, struct svc_req *));
+ */
+__db_truncate_reply *
+__db_db_truncate_4001(msg, req)
+ __db_truncate_msg *msg;
+ struct svc_req *req;
+{
+ static __db_truncate_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_truncate_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_cursor_reply *__db_db_cursor_4001 __P((__db_cursor_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_cursor_reply *
+__db_db_cursor_4001(msg, req)
+ __db_cursor_msg *msg;
+ struct svc_req *req;
+{
+ static __db_cursor_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_cursor_proc(msg->dbpcl_id,
+ msg->txnpcl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __db_join_reply *__db_db_join_4001 __P((__db_join_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__db_join_reply *
+__db_db_join_4001(msg, req)
+ __db_join_msg *msg;
+ struct svc_req *req;
+{
+ static __db_join_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __db_join_proc(msg->dbpcl_id,
+ msg->curs.curs_val,
+ msg->curs.curs_len,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_close_reply *__db_dbc_close_4001 __P((__dbc_close_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_close_reply *
+__db_dbc_close_4001(msg, req)
+ __dbc_close_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_close_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_close_proc(msg->dbccl_id,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_count_reply *__db_dbc_count_4001 __P((__dbc_count_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_count_reply *
+__db_dbc_count_4001(msg, req)
+ __dbc_count_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_count_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_count_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_del_reply *__db_dbc_del_4001 __P((__dbc_del_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_del_reply *
+__db_dbc_del_4001(msg, req)
+ __dbc_del_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_del_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_del_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_dup_reply *__db_dbc_dup_4001 __P((__dbc_dup_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_dup_reply *
+__db_dbc_dup_4001(msg, req)
+ __dbc_dup_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_dup_reply reply; /* must be static */
+ COMPQUIET(req, NULL);
+
+ __dbc_dup_proc(msg->dbccl_id,
+ msg->flags,
+ &reply);
+
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_get_reply *__db_dbc_get_4001 __P((__dbc_get_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_get_reply *
+__db_dbc_get_4001(msg, req)
+ __dbc_get_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_get_reply reply; /* must be static */
+ static int __dbc_get_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_get_free)
+ xdr_free((xdrproc_t)xdr___dbc_get_reply, (void *)&reply);
+ __dbc_get_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __dbc_get_proc(msg->dbccl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_get_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_pget_reply *__db_dbc_pget_4001 __P((__dbc_pget_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_pget_reply *
+__db_dbc_pget_4001(msg, req)
+ __dbc_pget_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_pget_reply reply; /* must be static */
+ static int __dbc_pget_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_pget_free)
+ xdr_free((xdrproc_t)xdr___dbc_pget_reply, (void *)&reply);
+ __dbc_pget_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.skeydata.skeydata_val = NULL;
+ reply.pkeydata.pkeydata_val = NULL;
+ reply.datadata.datadata_val = NULL;
+
+ __dbc_pget_proc(msg->dbccl_id,
+ msg->skeydlen,
+ msg->skeydoff,
+ msg->skeyulen,
+ msg->skeyflags,
+ msg->skeydata.skeydata_val,
+ msg->skeydata.skeydata_len,
+ msg->pkeydlen,
+ msg->pkeydoff,
+ msg->pkeyulen,
+ msg->pkeyflags,
+ msg->pkeydata.pkeydata_val,
+ msg->pkeydata.pkeydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_pget_free);
+ return (&reply);
+}
+
+/*
+ * PUBLIC: __dbc_put_reply *__db_dbc_put_4001 __P((__dbc_put_msg *,
+ * PUBLIC: struct svc_req *));
+ */
+__dbc_put_reply *
+__db_dbc_put_4001(msg, req)
+ __dbc_put_msg *msg;
+ struct svc_req *req;
+{
+ static __dbc_put_reply reply; /* must be static */
+ static int __dbc_put_free = 0; /* must be static */
+
+ COMPQUIET(req, NULL);
+ if (__dbc_put_free)
+ xdr_free((xdrproc_t)xdr___dbc_put_reply, (void *)&reply);
+ __dbc_put_free = 0;
+
+ /* Reinitialize allocated fields */
+ reply.keydata.keydata_val = NULL;
+
+ __dbc_put_proc(msg->dbccl_id,
+ msg->keydlen,
+ msg->keydoff,
+ msg->keyulen,
+ msg->keyflags,
+ msg->keydata.keydata_val,
+ msg->keydata.keydata_len,
+ msg->datadlen,
+ msg->datadoff,
+ msg->dataulen,
+ msg->dataflags,
+ msg->datadata.datadata_val,
+ msg->datadata.datadata_len,
+ msg->flags,
+ &reply,
+ &__dbc_put_free);
+ return (&reply);
+}
+
diff --git a/bdb/rpc_server/clsrv.html b/bdb/rpc_server/clsrv.html
index ae089c4b382..599ad56f557 100644
--- a/bdb/rpc_server/clsrv.html
+++ b/bdb/rpc_server/clsrv.html
@@ -1,52 +1,52 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
- <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
- <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
-</HEAD>
-<BODY>
-
-<CENTER>
-<H1>
-Client/Server Interface for Berkeley DB</H1></CENTER>
-
-<CENTER><I>Susan LoVerso</I>
-<BR><I>sue@sleepycat.com</I>
-<BR><I>Rev 1.3</I>
-<BR><I>1999 Nov 29</I></CENTER>
-
-<P>We provide an interface allowing client/server access to Berkeley DB.&nbsp;&nbsp;
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.76 [en] (X11; U; FreeBSD 4.3-RELEASE i386) [Netscape]">
+</head>
+<body>
+
+<center>
+<h1>
+&nbsp;Client/Server Interface for Berkeley DB</h1></center>
+
+<center><i>Susan LoVerso</i>
+<br><i>sue@sleepycat.com</i>
+<br><i>Rev 1.3</i>
+<br><i>1999 Nov 29</i></center>
+
+<p>We provide an interface allowing client/server access to Berkeley DB.&nbsp;&nbsp;
Our goal is to provide a client and server library to allow users to separate
the functionality of their applications yet still have access to the full
benefits of Berkeley DB.&nbsp; The goal is to provide a totally seamless
interface with minimal modification to existing applications as well.
-<P>The client/server interface for Berkeley DB can be broken up into several
+<p>The client/server interface for Berkeley DB can be broken up into several
layers.&nbsp; At the lowest level there is the transport mechanism to send
out the messages over the network.&nbsp; Above that layer is the messaging
layer to interpret what comes over the wire, and bundle/unbundle message
contents.&nbsp; The next layer is Berkeley DB itself.
-<P>The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).&nbsp;
+<p>The transport layer uses ONC RPC (RFC 1831) and XDR (RFC 1832).&nbsp;
We declare our message types and operations supported by our program and
the RPC library and utilities pretty much take care of the rest.&nbsp;
The
-<I>rpcgen</I> program generates all of the low level code needed.&nbsp;
+<i>rpcgen</i> program generates all of the low level code needed.&nbsp;
We need to define both sides of the RPC.
-<BR>&nbsp;
-<H2>
-<A NAME="DB Modifications"></A>DB Modifications</H2>
+<br>&nbsp;
+<h2>
+<a NAME="DB Modifications"></a>DB Modifications</h2>
To achieve the goal of a seamless interface, it is necessary to impose
a constraint on the application. That constraint is simply that all database
access must be done through an open environment.&nbsp; I.e. this model
does not support standalone databases.&nbsp; The reason for this constraint
is so that we have an environment structure internally to store our connection
to the server.&nbsp; Imposing this constraint means that we can provide
-the seamless interface just by adding a single environment method: <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>.
-<P>The planned interface for this method is:
-<PRE>DBENV->set_server(dbenv,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* DB_ENV structure */
+the seamless interface just by adding a single environment method: <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>.
+<p>The planned interface for this method is:
+<pre>DBENV->set_rpc_server(dbenv,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* DB_ENV structure */
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; hostname&nbsp;&nbsp;&nbsp; /* Host of server */
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; cl_timeout, /* Client timeout (sec) */
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; srv_timeout,/* Server timeout (sec) */
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags);&nbsp;&nbsp;&nbsp;&nbsp; /* Flags: unused */</PRE>
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags);&nbsp;&nbsp;&nbsp;&nbsp; /* Flags: unused */</pre>
This new method takes the hostname of the server, establishes our connection
and an environment on the server.&nbsp; If a server timeout is specified,
then we send that to the server as well (and the server may or may not
@@ -61,30 +61,30 @@ is currently unused, but exists because we always need to have a placeholder
for flags and it would be used for specifying authentication desired (were
we to provide an authentication scheme at some point) or other uses not
thought of yet!
-<P>This client code is part of the monolithic DB library.&nbsp; The user
-accesses the client functions via a new flag to <A HREF="../docs/api_c/db_env_create.html">db_env_create()</A>.&nbsp;
+<p>This client code is part of the monolithic DB library.&nbsp; The user
+accesses the client functions via a new flag to <a href="../docs/api_c/db_env_create.html">db_env_create()</a>.&nbsp;
That flag is DB_CLIENT.&nbsp; By using this flag the user indicates they
want to have the client methods rather than the standard methods for the
environment.&nbsp; Also by issuing this flag, the user needs to connect
-to the server via the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+to the server via the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
method.
-<P>We need two new fields in the <I>DB_ENV </I>structure.&nbsp; One is
+<p>We need two new fields in the <i>DB_ENV </i>structure.&nbsp; One is
the socket descriptor to communicate to the server, the other field is
-the client identifier the server gives to us.&nbsp; The <I>DB, </I>and<I>
-DBC </I>only need one additional field, the client identifier.&nbsp; The
-<I>DB_TXN</I>
-structure does not need modification, we are overloading the <I>txn_id
-</I>field.
-<H2>
-Issues</H2>
+the client identifier the server gives to us.&nbsp; The <i>DB, </i>and<i>
+DBC </i>only need one additional field, the client identifier.&nbsp; The
+<i>DB_TXN</i>
+structure does not need modification, we are overloading the <i>txn_id
+</i>field.
+<h2>
+Issues</h2>
We need to figure out what to do in case of client and server crashes.&nbsp;
Both the client library and the server program are stateful.&nbsp; They
both consume local resources during the lifetime of the connection.&nbsp;
Should one end drop that connection, the other side needs to release those
resources.
-<P>If the server crashes, then the client will get an error back.&nbsp;
+<p>If the server crashes, then the client will get an error back.&nbsp;
I have chosen to implement time-outs on the client side, using a default
-or allowing the application to specify one through the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+or allowing the application to specify one through the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
method.&nbsp; Either the current operation will time-out waiting for the
reply or the next operation called will time out (or get back some other
kind of error regarding the server's non-existence).&nbsp; In any case,
@@ -102,65 +102,65 @@ on recover.&nbsp; The client can then re-establish its connection and begin
again.&nbsp; This is effectively like beginning over.&nbsp; The client
cannot use ID's from its previous connection to the server.&nbsp; However,
if recovery is run, then consistency is assured.
-<P>If the client crashes, the server needs to somehow figure this out.&nbsp;
+<p>If the client crashes, the server needs to somehow figure this out.&nbsp;
The server is just sitting there waiting for a request to come in.&nbsp;
A server must be able to time-out a client.&nbsp; Similar to ftpd, if a
connection is idle for N seconds, then the server decides the client is
dead and releases that client's resources, aborting any open transactions,
closing any open databases and environments.&nbsp;&nbsp; The server timing
out a client is not a trivial issue however.&nbsp; The generated function
-for the server just calls <I>svc_run()</I>.&nbsp; The server code I write
+for the server just calls <i>svc_run()</i>.&nbsp; The server code I write
contains procedures to do specific things.&nbsp; We do not have access
-to the code calling <I>select()</I>.&nbsp; Timing out the select is not
+to the code calling <i>select()</i>.&nbsp; Timing out the select is not
good enough even if we could do so.&nbsp; We want to time-out idle environments,
not simply cause a time-out if the server is idle a while.&nbsp; See the
-discussion of the <A HREF="#The Server Program">server program</A> for
+discussion of the <a href="#The Server Program">server program</a> for
a description of how we accomplish this.
-<P>Since rpcgen generates the main() function of the server, I do not yet
+<p>Since rpcgen generates the main() function of the server, I do not yet
know how we are going to have the server multi-threaded or multi-process
without changing the generated code.&nbsp; The RPC book indicates that
the only way to accomplish this is through modifying the generated code
-in the server.&nbsp; <B>For the moment we will ignore this issue while
-we get the core server working, as it is only a performance issue.</B>
-<P>We do not do any security or authentication.&nbsp; Someone could get
+in the server.&nbsp; <b>For the moment we will ignore this issue while
+we get the core server working, as it is only a performance issue.</b>
+<p>We do not do any security or authentication.&nbsp; Someone could get
the code and modify it to spoof messages, trick the server, etc.&nbsp;
RPC has some amount of authentication built into it.&nbsp; I haven't yet
looked into it much to know if we want to use it or just point a user at
it.&nbsp; The changes to the client code are fairly minor, the changes
to our server procs are fairly minor.&nbsp; We would have to add code to
-a <I>sed</I> script or <I>awk</I> script to change the generated server
+a <i>sed</i> script or <i>awk</i> script to change the generated server
code (yet again) in the dispatch routine to perform authentication.
-<P>We will need to get an official program number from Sun.&nbsp; We can
-get this by sending mail to <I>rpc@sun.com</I> and presumably at some point
+<p>We will need to get an official program number from Sun.&nbsp; We can
+get this by sending mail to <i>rpc@sun.com</i> and presumably at some point
they will send us back a program number that we will encode into our XDR
description file.&nbsp; Until we release this we can use a program number
in the "user defined" number space.
-<BR>&nbsp;
-<H2>
-<A NAME="The Server Program"></A>The Server Program</H2>
+<br>&nbsp;
+<h2>
+<a NAME="The Server Program"></a>The Server Program</h2>
The server is a standalone program that the user builds and runs, probably
as a daemon like process.&nbsp; This program is linked against the Berkeley
DB library and the RPC library (which is part of the C library on my FreeBSD
-machine, others may have/need <I>-lrpclib</I>).&nbsp; The server basically
+machine, others may have/need <i>-lrpclib</i>).&nbsp; The server basically
is a slave to the client process.&nbsp; All messages from the client are
synchronous and two-way.&nbsp; The server handles messages one at a time,
and sends a reply back before getting another message.&nbsp; There are
no asynchronous messages generated by the server to the client.
-<P>We have made a choice to modify the generated code for the server.&nbsp;
+<p>We have made a choice to modify the generated code for the server.&nbsp;
The changes will be minimal, generally calling functions we write, that
are in other source files.&nbsp; The first change is adding a call to our
time-out function as described below.&nbsp; The second change is changing
-the name of the generated <I>main()</I> function to <I>__dbsrv_main()</I>,
-and adding our own <I>main()</I> function so that we can parse options,
-and set up other initialization we require.&nbsp; I have a <I>sed</I> script
+the name of the generated <i>main()</i> function to <i>__dbsrv_main()</i>,
+and adding our own <i>main()</i> function so that we can parse options,
+and set up other initialization we require.&nbsp; I have a <i>sed</i> script
that is run from the distribution scripts that massages the generated code
to make these minor changes.
-<P>Primarily the code needed for the server is the collection of the specified
+<p>Primarily the code needed for the server is the collection of the specified
RPC functions.&nbsp; Each function receives the structure indicated, and
our code takes out what it needs and passes the information into DB itself.&nbsp;
The server needs to maintain a translation table for identifiers that we
pass back to the client for the environment, transaction and database handles.
-<P>The table that the server maintains, assuming one client per server
+<p>The table that the server maintains, assuming one client per server
process/thread, should contain the handle to the environment, database
or transaction, a link to maintain parent/child relationships between transactions,
or databases and cursors, this handle's identifier, a type so that we can
@@ -169,7 +169,7 @@ handle's environment entry (for time out/activity purposes).&nbsp; The
table contains, in entries used by environments, a time-out value and an
activity time stamp.&nbsp; Its use is described below for timing out idle
clients.
-<P>Here is how we time out clients in the server.&nbsp; We have to modify
+<p>Here is how we time out clients in the server.&nbsp; We have to modify
the generated server code, but only to add one line during the dispatch
function to run the time-out function.&nbsp; The call is made right before
the return of the dispatch function, after the reply is sent to the client,
@@ -181,16 +181,16 @@ we know we do not need to run through the list of open handles.&nbsp; If
the hint is expired, then we go through the list of open environment handles,
and if they are past their expiration, then we close them and clean up.&nbsp;
If they are not, we set up the hint for the next time.
-<P>Each entry in the open handle table has a pointer back to its environment's
+<p>Each entry in the open handle table has a pointer back to its environment's
entry.&nbsp; Every operation within this environment can then update the
single environment activity record.&nbsp; Every environment can have a
-different time-out.&nbsp; The <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server
-</A>call
+different time-out.&nbsp; The <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server
+</a>call
takes a server time-out value.&nbsp; If this value is 0 then a default
(currently 5 minutes) is used.&nbsp; This time-out value is only a hint
to the server.&nbsp; It may choose to disregard this value or set the time-out
based on its own implementation.
-<P>For completeness, the flaws of this time-out implementation should be
+<p>For completeness, the flaws of this time-out implementation should be
pointed out.&nbsp; First, it is possible that a client could crash with
open handles, and no other requests come in to the server.&nbsp; Therefore
the time-out function never gets run and those resources are not released
@@ -205,222 +205,222 @@ of 1 minute.&nbsp; If this environment becomes idle (and other operations
are going on), the time-out function will not release that environment
until the original 5 minute hint expires.&nbsp; This is not a problem since
the resources will eventually be released.
-<P>On a similar note, if a client crashes during an RPC, our reply generates
-a SIGPIPE, and our server crashes unless we catch it.&nbsp; Using <I>signal(SIGPIPE,
-SIG_IGN) </I>we can ignore it, and the server will go on.&nbsp; This is
-a call&nbsp; in our <I>main()</I> function that we write.&nbsp; Eventually
+<p>On a similar note, if a client crashes during an RPC, our reply generates
+a SIGPIPE, and our server crashes unless we catch it.&nbsp; Using <i>signal(SIGPIPE,
+SIG_IGN) </i>we can ignore it, and the server will go on.&nbsp; This is
+a call&nbsp; in our <i>main()</i> function that we write.&nbsp; Eventually
this client's handles would be timed out as described above.&nbsp; We need
this only for the unfortunate window of a client crashing during the RPC.
-<P>The options below are primarily for control of the program itself,.&nbsp;
+<p>The options below are primarily for control of the program itself,.&nbsp;
Details relating to databases and environments should be passed from the
client to the server, since the server can serve many clients, many environments
and many databases.&nbsp; Therefore it makes more sense for the client
to set the cache size of its own environment, rather than setting a default
cachesize on the server that applies as a blanket to any environment it
may be called upon to open.&nbsp; Options are:
-<UL>
-<LI>
-<B>-t&nbsp;</B> to set the default time-out given to an environment.</LI>
+<ul>
+<li>
+<b>-t&nbsp;</b> to set the default time-out given to an environment.</li>
-<LI>
-<B>-T</B> to set the maximum time-out allowed for the server.</LI>
+<li>
+<b>-T</b> to set the maximum time-out allowed for the server.</li>
-<LI>
-<B>-L</B> to log the execution of the server process to a specified file.</LI>
+<li>
+<b>-L</b> to log the execution of the server process to a specified file.</li>
-<LI>
-<B>-v</B> to run in verbose mode.</LI>
+<li>
+<b>-v</b> to run in verbose mode.</li>
-<LI>
-<B>-M</B>&nbsp; to specify the maximum number of outstanding child server
+<li>
+<b>-M</b>&nbsp; to specify the maximum number of outstanding child server
processes/threads we can have at any given time.&nbsp; The default is 10.
-<B>[We
-are not yet doing multiple threads/processes.]</B></LI>
-</UL>
+<b>[We
+are not yet doing multiple threads/processes.]</b></li>
+</ul>
-<H2>
-The Client Code</H2>
+<h2>
+The Client Code</h2>
The client code contains all of the supported functions and methods used
-in this model.&nbsp; There are several methods in the <I>__db_env
-</I>and
-<I>__db</I>
+in this model.&nbsp; There are several methods in the <i>__db_env
+</i>and
+<i>__db</i>
structures that currently do not apply, such as the callbacks.&nbsp; Those
fields that are not applicable to the client model point to NULL to notify
the user of their error.&nbsp; Some method functions remain unchanged,
as well such as the error calls.
-<P>The client code contains each method function that goes along with the
-<A HREF="#Remote Procedure Calls">RPC
-calls</A> described elsewhere.&nbsp; The client library also contains its
-own version of <A HREF="../docs/api_c/env_create.html">db_env_create()</A>,
+<p>The client code contains each method function that goes along with the
+<a href="#Remote Procedure Calls">RPC
+calls</a> described elsewhere.&nbsp; The client library also contains its
+own version of <a href="../docs/api_c/env_create.html">db_env_create()</a>,
which does not result in any messages going over to the server (since we
do not yet know what server we are talking to).&nbsp; This function sets
up the pointers to the correct client functions.
-<P>All of the method functions that handle the messaging have a basic flow
+<p>All of the method functions that handle the messaging have a basic flow
similar to this:
-<UL>
-<LI>
-Local arg parsing that may be needed</LI>
+<ul>
+<li>
+Local arg parsing that may be needed</li>
-<LI>
+<li>
Marshalling the message header and the arguments we need to send to the
-server</LI>
+server</li>
-<LI>
-Sending the message</LI>
+<li>
+Sending the message</li>
-<LI>
-Receiving a reply</LI>
+<li>
+Receiving a reply</li>
-<LI>
-Unmarshalling the reply</LI>
+<li>
+Unmarshalling the reply</li>
-<LI>
-Local results processing that may be needed</LI>
-</UL>
+<li>
+Local results processing that may be needed</li>
+</ul>
-<H2>
-Generated Code</H2>
+<h2>
+Generated Code</h2>
Almost all of the code is generated from a source file describing the interface
-and an <I>awk</I> script.&nbsp;&nbsp; This awk script generates six (6)
+and an <i>awk</i> script.&nbsp;&nbsp; This awk script generates six (6)
files for us.&nbsp; It also modifies one.&nbsp; The files are:
-<OL>
-<LI>
-Client file - The C source file created containing the client code.</LI>
+<ol>
+<li>
+Client file - The C source file created containing the client code.</li>
-<LI>
+<li>
Client template file - The C template source file created containing interfaces
for handling client-local issues such as resource allocation, but with
-a consistent interface with the client code generated.</LI>
+a consistent interface with the client code generated.</li>
-<LI>
-Server file - The C source file created containing the server code.</LI>
+<li>
+Server file - The C source file created containing the server code.</li>
-<LI>
+<li>
Server template file - The C template source file created containing interfaces
for handling server-local issues such as resource allocation, calling into
-the DB library but with a consistent interface with the server code generated.</LI>
+the DB library but with a consistent interface with the server code generated.</li>
-<LI>
-XDR file - The XDR message description file created.</LI>
+<li>
+XDR file - The XDR message description file created.</li>
-<LI>
+<li>
Server sed file - A sed script that contains commands to apply to the server
procedure file (i.e. the real source file that the server template file
becomes) so that minor interface changes can be consistently and easily
-applied to the real code.</LI>
+applied to the real code.</li>
-<LI>
+<li>
Server procedure file - This is the file that is modified by the sed script
-generated.&nbsp; It originated from the server template file.</LI>
-</OL>
-The awk script reads a source file, <I>db_server/rpc.src </I>that describes
+generated.&nbsp; It originated from the server template file.</li>
+</ol>
+The awk script reads a source file, <i>db_server/rpc.src </i>that describes
each operation and what sorts of arguments it takes and what it returns
from the server.&nbsp; The syntax of the source file describes the interface
to that operation.&nbsp; There are four (4) parts to the syntax:
-<OL>
-<LI>
-<B>BEGIN</B> <B><I>function version# codetype</I></B> - begins a new functional
-interface for the given <B><I>function</I></B>.&nbsp; Each function has
-a <B><I>version number</I></B>, currently all of them are at version number
-one (1).&nbsp; The <B><I>code type</I></B> indicates to the awk script
-what kind of code to generate.&nbsp; The choices are:</LI>
-
-<UL>
-<LI>
-<B>CODE </B>- Generate all code, and return a status value.&nbsp; If specified,
+<ol>
+<li>
+<b>BEGIN</b> <b><i>function version# codetype</i></b> - begins a new functional
+interface for the given <b><i>function</i></b>.&nbsp; Each function has
+a <b><i>version number</i></b>, currently all of them are at version number
+one (1).&nbsp; The <b><i>code type</i></b> indicates to the awk script
+what kind of code to generate.&nbsp; The choices are:</li>
+
+<ul>
+<li>
+<b>CODE </b>- Generate all code, and return a status value.&nbsp; If specified,
the client code will simply return the status to the user upon completion
-of the RPC call.</LI>
+of the RPC call.</li>
-<LI>
-<B>RETCODE </B>- Generate all code and call a return function in the client
+<li>
+<b>RETCODE </b>- Generate all code and call a return function in the client
template file to deal with client issues or with other returned items.&nbsp;
If specified, the client code generated will call a function of the form
-<I>__dbcl_&lt;name>_ret()
-</I>where
+<i>__dbcl_&lt;name>_ret()
+</i>where
&lt;name> is replaced with the function name given here.&nbsp; This function
is placed in the template file because this indicates that something special
must occur on return.&nbsp; The arguments to this function are the same
as those for the client function, with the addition of the reply message
-structure.</LI>
+structure.</li>
-<LI>
-<B>NOCLNTCODE - </B>Generate XDR and server code, but no corresponding
+<li>
+<b>NOCLNTCODE - </b>Generate XDR and server code, but no corresponding
client code. (This is used for functions that are not named the same thing
on both sides.&nbsp; The only use of this at the moment is db_env_create
and db_create.&nbsp; The environment create call to the server is actually
-called from the <A HREF="../docs/api_c/env_set_server.html">DBENV->set_server()</A>
+called from the <a href="../docs/api_c/env_set_rpc_server.html">DBENV->set_rpc_server()</a>
method.&nbsp; The db_create code exists elsewhere in the library and we
-modify that code for the client call.)</LI>
-</UL>
-
-<LI>
-<B>ARG <I>RPC-type C-type varname [list-type]</I></B>- each line of this
-describes an argument to the function.&nbsp; The argument is called <B><I>varname</I></B>.&nbsp;
-The <B><I>C-type</I></B> given is what it should look like in the C code
-generated, such as <B>DB *, u_int32_t, const char *</B>.&nbsp; The
-<B><I>RPC-type</I></B>
+modify that code for the client call.)</li>
+</ul>
+
+<li>
+<b>ARG <i>RPC-type C-type varname [list-type]</i></b>- each line of this
+describes an argument to the function.&nbsp; The argument is called <b><i>varname</i></b>.&nbsp;
+The <b><i>C-type</i></b> given is what it should look like in the C code
+generated, such as <b>DB *, u_int32_t, const char *</b>.&nbsp; The
+<b><i>RPC-type</i></b>
is an indication about how the RPC request message should be constructed.&nbsp;
-The RPC-types allowed are described below.</LI>
+The RPC-types allowed are described below.</li>
-<LI>
-<B>RET <I>RPC-type C-type varname [list-type]</I></B>- each line of this
+<li>
+<b>RET <i>RPC-type C-type varname [list-type]</i></b>- each line of this
describes what the server should return from this procedure call (in addition
to a status, which is always returned and should not be specified).&nbsp;
-The argument is called <B><I>varname</I></B>.&nbsp; The <B><I>C-type</I></B>
-given is what it should look like in the C code generated, such as <B>DB
-*, u_int32_t, const char *</B>.&nbsp; The <B><I>RPC-type</I></B> is an
+The argument is called <b><i>varname</i></b>.&nbsp; The <b><i>C-type</i></b>
+given is what it should look like in the C code generated, such as <b>DB
+*, u_int32_t, const char *</b>.&nbsp; The <b><i>RPC-type</i></b> is an
indication about how the RPC reply message should be constructed.&nbsp;
-The RPC-types are described below.</LI>
-
-<LI>
-<B>END </B>- End the description of this function.&nbsp; The result is
-that when the awk script encounters the <B>END</B> tag, it now has all
-the information it needs to construct the generated code for this function.</LI>
-</OL>
-The <B><I>RPC-type</I></B> must be one of the following:
-<UL>
-<LI>
-<B>IGNORE </B>- This argument is not passed to the server and should be
-ignored when constructing the XDR code.&nbsp; <B>Only allowed for an ARG
-specfication.</B></LI>
-
-<LI>
-<B>STRING</B> - This argument is a string.</LI>
-
-<LI>
-<B>INT </B>- This argument is an integer of some sort.</LI>
-
-<LI>
-<B>DBT </B>- This argument is a DBT, resulting in its decomposition into
-the request message.</LI>
-
-<LI>
-<B>LIST</B> - This argument is an opaque list passed to the server (NULL-terminated).&nbsp;
-If an argument of this type is given, it must have a <B><I>list-type</I></B>
-specified that is one of:</LI>
-
-<UL>
-<LI>
-<B>STRING</B></LI>
-
-<LI>
-<B>INT</B></LI>
-
-<LI>
-<B>ID</B>.</LI>
-</UL>
-
-<LI>
-<B>ID</B> - This argument is an identifier.</LI>
-</UL>
+The RPC-types are described below.</li>
+
+<li>
+<b>END </b>- End the description of this function.&nbsp; The result is
+that when the awk script encounters the <b>END</b> tag, it now has all
+the information it needs to construct the generated code for this function.</li>
+</ol>
+The <b><i>RPC-type</i></b> must be one of the following:
+<ul>
+<li>
+<b>IGNORE </b>- This argument is not passed to the server and should be
+ignored when constructing the XDR code.&nbsp; <b>Only allowed for an ARG
+specfication.</b></li>
+
+<li>
+<b>STRING</b> - This argument is a string.</li>
+
+<li>
+<b>INT </b>- This argument is an integer of some sort.</li>
+
+<li>
+<b>DBT </b>- This argument is a DBT, resulting in its decomposition into
+the request message.</li>
+
+<li>
+<b>LIST</b> - This argument is an opaque list passed to the server (NULL-terminated).&nbsp;
+If an argument of this type is given, it must have a <b><i>list-type</i></b>
+specified that is one of:</li>
+
+<ul>
+<li>
+<b>STRING</b></li>
+
+<li>
+<b>INT</b></li>
+
+<li>
+<b>ID</b>.</li>
+</ul>
+
+<li>
+<b>ID</b> - This argument is an identifier.</li>
+</ul>
So, for example, the source for the DB->join RPC call looks like:
-<PRE>BEGIN&nbsp;&nbsp; dbjoin&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; RETCODE
+<pre>BEGIN&nbsp;&nbsp; dbjoin&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; RETCODE
ARG&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; DB *&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbp&nbsp;
ARG&nbsp;&nbsp;&nbsp;&nbsp; LIST&nbsp;&nbsp;&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curs&nbsp;&nbsp;&nbsp; ID
ARG&nbsp;&nbsp;&nbsp;&nbsp; IGNORE&nbsp; DBC **&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcpp&nbsp;
ARG&nbsp;&nbsp;&nbsp;&nbsp; INT&nbsp;&nbsp;&nbsp;&nbsp; u_int32_t&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; flags
RET&nbsp;&nbsp;&nbsp;&nbsp; ID&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; long&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dbcid
-END</PRE>
+END</pre>
Our first line tells us we are writing the dbjoin function.&nbsp; It requires
special code on the client so we indicate that with the RETCODE.&nbsp;
This method takes four arguments.&nbsp; For the RPC request we need the
@@ -429,17 +429,17 @@ the cursor list, we ignore the argument to return the cursor handle to
the user, and we pass along the flags.&nbsp; On the return, the reply contains
a status, by default, and additionally, it contains the ID of the newly
created cursor.
-<H2>
-Building and Installing</H2>
+<h2>
+Building and Installing</h2>
I need to verify with Don Anderson, but I believe we should just build
the server program, just like we do for db_stat, db_checkpoint, etc.&nbsp;
Basically it can be treated as a utility program from the building and
installation perspective.
-<P>As mentioned early on, in the section on <A HREF="#DB Modifications">DB
-Modifications</A>, we have a single library, but allowing the user to access
-the client portion by sending a flag to <A HREF="../docs/api_c/env_create.html">db_env_create()</A>.&nbsp;
+<p>As mentioned early on, in the section on <a href="#DB Modifications">DB
+Modifications</a>, we have a single library, but allowing the user to access
+the client portion by sending a flag to <a href="../docs/api_c/env_create.html">db_env_create()</a>.&nbsp;
The Makefile is modified to include the new files.
-<P>Testing is performed in two ways.&nbsp; First I have a new example program,
+<p>Testing is performed in two ways.&nbsp; First I have a new example program,
that should become part of the example directory.&nbsp; It is basically
a merging of ex_access.c and ex_env.c.&nbsp; This example is adequate to
test basic functionality, as it does just does database put/get calls and
@@ -449,5 +449,5 @@ I am going to modify the Tcl interface to accept the server information.&nbsp;
Nothing else should need to change in Tcl.&nbsp; Then we can either write
our own test modules or use a subset of the existing ones to test functionality
on a regular basis.
-</BODY>
-</HTML>
+</body>
+</html>
diff --git a/bdb/rpc_server/cxx/db_server_cxxproc.cpp b/bdb/rpc_server/cxx/db_server_cxxproc.cpp
new file mode 100644
index 00000000000..25278273555
--- /dev/null
+++ b/bdb/rpc_server/cxx/db_server_cxxproc.cpp
@@ -0,0 +1,2200 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifdef HAVE_RPC
+#ifndef lint
+static const char revid[] = "$Id: db_server_cxxproc.cpp,v 1.12 2002/08/09 01:56:08 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <rpc/rpc.h>
+
+#include <string.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+}
+
+/* BEGIN __env_cachesize_proc */
+extern "C" void
+__env_cachesize_proc(
+ long dbenvcl_id,
+ u_int32_t gbytes,
+ u_int32_t bytes,
+ u_int32_t ncache,
+ __env_cachesize_reply *replyp)
+/* END __env_cachesize_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_cachesize(gbytes, bytes, ncache);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_close_proc */
+extern "C" void
+__env_close_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __env_close_reply *replyp)
+/* END __env_close_proc */
+{
+ ct_entry *dbenv_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ replyp->status = __dbenv_close_int(dbenvcl_id, flags, 0);
+ return;
+}
+
+/* BEGIN __env_create_proc */
+extern "C" void
+__env_create_proc(
+ u_int32_t timeout,
+ __env_create_reply *replyp)
+/* END __env_create_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *ctp;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ ctp->ct_envp = dbenv;
+ ctp->ct_type = CT_ENV;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = ctp;
+ __dbsrv_settimeout(ctp, timeout);
+ __dbsrv_active(ctp);
+ replyp->envcl_id = ctp->ct_id;
+
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __env_dbremove_proc */
+extern "C" void
+__env_dbremove_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __env_dbremove_reply *replyp)
+/* END __env_dbremove_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbremove(txnp, name, subdb, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_dbrename_proc */
+void
+__env_dbrename_proc(
+ long dbenvcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __env_dbrename_reply *replyp)
+/* END __env_dbrename_proc */
+{
+ int ret;
+ DbEnv *dbenv;
+ DbTxn *txnp;
+ ct_entry *dbenv_ctp, *txnp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbenv->dbrename(txnp, name, subdb, newname, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_encrypt_proc */
+extern "C" void
+__env_encrypt_proc(
+ long dbenvcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __env_encrypt_reply *replyp)
+/* END __env_encrypt_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_encrypt(passwd, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_flags_proc */
+extern "C" void
+__env_flags_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ u_int32_t onoff,
+ __env_flags_reply *replyp)
+/* END __env_flags_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ ret = dbenv->set_flags(flags, onoff);
+ if (onoff)
+ dbenv_ctp->ct_envdp.onflags = flags;
+ else
+ dbenv_ctp->ct_envdp.offflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+/* BEGIN __env_open_proc */
+extern "C" void
+__env_open_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ u_int32_t mode,
+ __env_open_reply *replyp)
+/* END __env_open_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *new_ctp;
+ u_int32_t newflags, shareflags;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ ret = DB_NOSERVER_HOME;
+ goto out;
+ }
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((flags & DB_INIT_LOCK) &&
+ (ret = dbenv->set_lk_detect(DB_LOCK_DEFAULT)) != 0)
+ goto out;
+
+ if (__dbsrv_verbose) {
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(fullhome->home);
+ }
+
+ /*
+ * Mask off flags we ignore
+ */
+ newflags = (flags & ~DB_SERVER_FLAGMASK);
+ shareflags = (newflags & DB_SERVER_ENVFLAGS);
+ /*
+ * Check now whether we can share a handle for this env.
+ */
+ replyp->envcl_id = dbenvcl_id;
+ if ((new_ctp = __dbsrv_shareenv(dbenv_ctp, fullhome, shareflags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing env ID %ld\n", new_ctp->ct_id);
+ replyp->envcl_id = new_ctp->ct_id;
+ ret = __dbenv_close_int(dbenvcl_id, 0, 0);
+ } else {
+ ret = dbenv->open(fullhome->home, newflags, mode);
+ dbenv_ctp->ct_envdp.home = fullhome;
+ dbenv_ctp->ct_envdp.envflags = shareflags;
+ }
+out: replyp->status = ret;
+ return;
+}
+
+/* BEGIN __env_remove_proc */
+extern "C" void
+__env_remove_proc(
+ long dbenvcl_id,
+ char *home,
+ u_int32_t flags,
+ __env_remove_reply *replyp)
+/* END __env_remove_proc */
+{
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp;
+ int ret;
+ home_entry *fullhome;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ fullhome = get_home(home);
+ if (fullhome == NULL) {
+ replyp->status = DB_NOSERVER_HOME;
+ return;
+ }
+
+ ret = dbenv->remove(fullhome->home, flags);
+ __dbdel_ctp(dbenv_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_abort_proc */
+extern "C" void
+__txn_abort_proc(
+ long txnpcl_id,
+ __txn_abort_reply *replyp)
+/* END __txn_abort_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->abort();
+ __dbdel_ctp(txnp_ctp);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_begin_proc */
+extern "C" void
+__txn_begin_proc(
+ long dbenvcl_id,
+ long parentcl_id,
+ u_int32_t flags,
+ __txn_begin_reply *replyp)
+/* END __txn_begin_proc */
+{
+ DbEnv *dbenv;
+ DbTxn *parent, *txnp;
+ ct_entry *ctp, *dbenv_ctp, *parent_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ parent_ctp = NULL;
+
+ ctp = new_ct_ent(&replyp->status);
+ if (ctp == NULL)
+ return;
+
+ if (parentcl_id != 0) {
+ ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
+ parent = (DbTxn *)parent_ctp->ct_anyp;
+ ctp->ct_activep = parent_ctp->ct_activep;
+ } else
+ parent = NULL;
+
+ ret = dbenv->txn_begin(parent, &txnp, flags);
+ if (ret == 0) {
+ ctp->ct_txnp = txnp;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = parent_ctp;
+ ctp->ct_envparent = dbenv_ctp;
+ replyp->txnidcl_id = ctp->ct_id;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+ } else
+ __dbclear_ctp(ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_commit_proc */
+extern "C" void
+__txn_commit_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_commit_reply *replyp)
+/* END __txn_commit_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->commit(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_discard_proc */
+extern "C" void
+__txn_discard_proc(
+ long txnpcl_id,
+ u_int32_t flags,
+ __txn_discard_reply *replyp)
+/* END __txn_discard_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->discard(flags);
+ __dbdel_ctp(txnp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_prepare_proc */
+extern "C" void
+__txn_prepare_proc(
+ long txnpcl_id,
+ u_int8_t *gid,
+ __txn_prepare_reply *replyp)
+/* END __txn_prepare_proc */
+{
+ DbTxn *txnp;
+ ct_entry *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+
+ ret = txnp->prepare(gid);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __txn_recover_proc */
+extern "C" void
+__txn_recover_proc(
+ long dbenvcl_id,
+ u_int32_t count,
+ u_int32_t flags,
+ __txn_recover_reply *replyp,
+ int * freep)
+/* END __txn_recover_proc */
+{
+ DbEnv *dbenv;
+ DbPreplist *dbprep, *p;
+ ct_entry *dbenv_ctp, *ctp;
+ long erri, i, retcount;
+ u_int32_t *txnidp;
+ int ret;
+ char *gid;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+ *freep = 0;
+
+ if ((ret =
+ __os_malloc(dbenv->get_DB_ENV(), count * sizeof(DbPreplist), &dbprep)) != 0)
+ goto out;
+ if ((ret =
+ dbenv->txn_recover(dbprep, count, &retcount, flags)) != 0)
+ goto out;
+ /*
+ * If there is nothing, success, but it's easy.
+ */
+ replyp->retcount = retcount; // TODO: fix C++ txn_recover
+ if (retcount == 0) {
+ replyp->txn.txn_val = NULL;
+ replyp->txn.txn_len = 0;
+ replyp->gid.gid_val = NULL;
+ replyp->gid.gid_len = 0;
+ }
+
+ /*
+ * We have our txn list. Now we need to allocate the space for
+ * the txn ID array and the GID array and set them up.
+ */
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, sizeof(u_int32_t),
+ &replyp->txn.txn_val)) != 0)
+ goto out;
+ replyp->txn.txn_len = retcount * sizeof(u_int32_t);
+ if ((ret = __os_calloc(dbenv->get_DB_ENV(), retcount, DB_XIDDATASIZE,
+ &replyp->gid.gid_val)) != 0) {
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ goto out;
+ }
+ replyp->gid.gid_len = retcount * DB_XIDDATASIZE;
+
+ /*
+ * Now walk through our results, creating parallel arrays
+ * to send back. For each entry we need to create a new
+ * txn ctp and then fill in the array info.
+ */
+ i = 0;
+ p = dbprep;
+ gid = replyp->gid.gid_val;
+ txnidp = replyp->txn.txn_val;
+ while (i++ < retcount) {
+ ctp = new_ct_ent(&ret);
+ if (ret != 0) {
+ i--;
+ goto out2;
+ }
+ ctp->ct_txnp = p->txn;
+ ctp->ct_type = CT_TXN;
+ ctp->ct_parent = NULL;
+ ctp->ct_envparent = dbenv_ctp;
+ __dbsrv_settimeout(ctp, dbenv_ctp->ct_timeout);
+ __dbsrv_active(ctp);
+
+ *txnidp = ctp->ct_id;
+ memcpy(gid, p->gid, DB_XIDDATASIZE);
+
+ p++;
+ txnidp++;
+ gid += DB_XIDDATASIZE;
+ }
+ /*
+ * If we get here, we have success and we have to set freep
+ * so it'll get properly freed next time.
+ */
+ *freep = 1;
+out:
+ if (dbprep != NULL)
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+out2:
+ /*
+ * We had an error in the middle of creating our new txn
+ * ct entries. We have to unwind all that we have done. Ugh.
+ */
+ for (txnidp = replyp->txn.txn_val, erri = 0;
+ erri < i; erri++, txnidp++) {
+ ctp = get_tableent(*txnidp);
+ __dbclear_ctp(ctp);
+ }
+ __os_free(dbenv->get_DB_ENV(), replyp->txn.txn_val);
+ __os_free(dbenv->get_DB_ENV(), replyp->gid.gid_val);
+ __os_free(dbenv->get_DB_ENV(), dbprep);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_maxkey_proc */
+extern "C" void
+__db_bt_maxkey_proc(
+ long dbpcl_id,
+ u_int32_t maxkey,
+ __db_bt_maxkey_reply *replyp)
+/* END __db_bt_maxkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_maxkey(maxkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_associate_proc */
+extern "C" void
+__db_associate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ long sdbpcl_id,
+ u_int32_t flags,
+ __db_associate_reply *replyp)
+/* END __db_associate_proc */
+{
+ Db *dbp, *sdbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *sdbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ ACTIVATE_CTP(sdbp_ctp, sdbpcl_id, CT_DB);
+ sdbp = (Db *)sdbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ if (flags != 0)
+ ret = EINVAL;
+ else
+ ret = dbp->associate(txnp, sdbp, NULL, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_bt_minkey_proc */
+extern "C" void
+__db_bt_minkey_proc(
+ long dbpcl_id,
+ u_int32_t minkey,
+ __db_bt_minkey_reply *replyp)
+/* END __db_bt_minkey_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_bt_minkey(minkey);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_close_proc */
+extern "C" void
+__db_close_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_close_reply *replyp)
+/* END __db_close_proc */
+{
+ ct_entry *dbp_ctp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ replyp->status = __db_close_int(dbpcl_id, flags);
+ return;
+}
+
+/* BEGIN __db_create_proc */
+extern "C" void
+__db_create_proc(
+ long dbenvcl_id,
+ u_int32_t flags,
+ __db_create_reply *replyp)
+/* END __db_create_proc */
+{
+ Db *dbp;
+ DbEnv *dbenv;
+ ct_entry *dbenv_ctp, *dbp_ctp;
+
+ ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
+ dbenv = (DbEnv *)dbenv_ctp->ct_anyp;
+
+ dbp_ctp = new_ct_ent(&replyp->status);
+ if (dbp_ctp == NULL)
+ return ;
+ /*
+ * We actually require env's for databases. The client should
+ * have caught it, but just in case.
+ */
+ DB_ASSERT(dbenv != NULL);
+ dbp = new Db(dbenv, flags);
+ dbp_ctp->ct_dbp = dbp;
+ dbp_ctp->ct_type = CT_DB;
+ dbp_ctp->ct_parent = dbenv_ctp;
+ dbp_ctp->ct_envparent = dbenv_ctp;
+ replyp->dbcl_id = dbp_ctp->ct_id;
+ replyp->status = 0;
+ return;
+}
+
+/* BEGIN __db_del_proc */
+extern "C" void
+__db_del_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_del_reply *replyp)
+/* END __db_del_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->del(txnp, &key, flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_encrypt_proc */
+extern "C" void
+__db_encrypt_proc(
+ long dbpcl_id,
+ char *passwd,
+ u_int32_t flags,
+ __db_encrypt_reply *replyp)
+/* END __db_encrypt_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_encrypt(passwd, flags);
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_extentsize_proc */
+extern "C" void
+__db_extentsize_proc(
+ long dbpcl_id,
+ u_int32_t extentsize,
+ __db_extentsize_reply *replyp)
+/* END __db_extentsize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_q_extentsize(extentsize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_flags_proc */
+extern "C" void
+__db_flags_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_flags_reply *replyp)
+/* END __db_flags_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_flags(flags);
+ dbp_ctp->ct_dbdp.setflags = flags;
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_get_proc */
+extern "C" void
+__db_get_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_get_reply *replyp,
+ int * freep)
+/* END __db_get_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE) {
+ if (data.get_data() == 0) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ dataulen, &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->get(txnp, &key, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_ffactor_proc */
+extern "C" void
+__db_h_ffactor_proc(
+ long dbpcl_id,
+ u_int32_t ffactor,
+ __db_h_ffactor_reply *replyp)
+/* END __db_h_ffactor_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_ffactor(ffactor);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_h_nelem_proc */
+extern "C" void
+__db_h_nelem_proc(
+ long dbpcl_id,
+ u_int32_t nelem,
+ __db_h_nelem_reply *replyp)
+/* END __db_h_nelem_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_h_nelem(nelem);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_key_range_proc */
+extern "C" void
+__db_key_range_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t flags,
+ __db_key_range_reply *replyp)
+/* END __db_key_range_proc */
+{
+ Db *dbp;
+ DB_KEY_RANGE range;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ /* Set up key */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(keyflags);
+
+ ret = dbp->key_range(txnp, &key, &range, flags);
+
+ replyp->status = ret;
+ replyp->less = range.less;
+ replyp->equal = range.equal;
+ replyp->greater = range.greater;
+ return;
+}
+
+/* BEGIN __db_lorder_proc */
+extern "C" void
+__db_lorder_proc(
+ long dbpcl_id,
+ u_int32_t lorder,
+ __db_lorder_reply *replyp)
+/* END __db_lorder_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_lorder(lorder);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_open_proc */
+extern "C" void
+__db_open_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t type,
+ u_int32_t flags,
+ u_int32_t mode,
+ __db_open_reply *replyp)
+/* END __db_open_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ DBTYPE dbtype;
+ ct_entry *dbp_ctp, *new_ctp, *txnp_ctp;
+ int isswapped, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ replyp->dbcl_id = dbpcl_id;
+ if ((new_ctp = __dbsrv_sharedb(dbp_ctp, name, subdb, (DBTYPE)type, flags))
+ != NULL) {
+ /*
+ * We can share, clean up old ID, set new one.
+ */
+ if (__dbsrv_verbose)
+ printf("Sharing db ID %ld\n", new_ctp->ct_id);
+ replyp->dbcl_id = new_ctp->ct_id;
+ ret = __db_close_int(dbpcl_id, 0);
+ goto out;
+ }
+ ret = dbp->open(txnp, name, subdb, (DBTYPE)type, flags, mode);
+ if (ret == 0) {
+ (void)dbp->get_type(&dbtype);
+ replyp->type = dbtype;
+ /* XXX
+ * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
+ * this dbp's flags back.
+ */
+ replyp->dbflags = (int) dbp->get_DB()->flags;
+ /*
+ * We need to determine the byte order of the database
+ * and send it back to the client. Determine it by
+ * the server's native order and the swapped value of
+ * the DB itself.
+ */
+ (void)dbp->get_byteswapped(&isswapped);
+ if (__db_byteorder(NULL, 1234) == 0) {
+ if (isswapped == 0)
+ replyp->lorder = 1234;
+ else
+ replyp->lorder = 4321;
+ } else {
+ if (isswapped == 0)
+ replyp->lorder = 4321;
+ else
+ replyp->lorder = 1234;
+ }
+ dbp_ctp->ct_dbdp.type = dbtype;
+ dbp_ctp->ct_dbdp.dbflags = LF_ISSET(DB_SERVER_DBFLAGS);
+ if (name == NULL)
+ dbp_ctp->ct_dbdp.db = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, name,
+ &dbp_ctp->ct_dbdp.db)) != 0)
+ goto out;
+ if (subdb == NULL)
+ dbp_ctp->ct_dbdp.subdb = NULL;
+ else if ((ret = __os_strdup(dbp->get_DB()->dbenv, subdb,
+ &dbp_ctp->ct_dbdp.subdb)) != 0)
+ goto out;
+ }
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pagesize_proc */
+extern "C" void
+__db_pagesize_proc(
+ long dbpcl_id,
+ u_int32_t pagesize,
+ __db_pagesize_reply *replyp)
+/* END __db_pagesize_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_pagesize(pagesize);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_pget_proc */
+extern "C" void
+__db_pget_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_pget_reply *replyp,
+ int * freep)
+/* END __db_pget_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbp->pget(txnp, &skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, skey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, pkey.get_data());
+ __os_ufree(dbp->get_DB()->dbenv, data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbp->get_DB()->dbenv,
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_put_proc */
+extern "C" void
+__db_put_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __db_put_reply *replyp,
+ int * freep)
+/* END __db_put_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ *freep = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbp->put(txnp, &key, &data, flags);
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if (ret == 0 && (flags == DB_APPEND)) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbp->get_DB()->dbenv,
+ key.get_size(), &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbp->get_DB()->dbenv, key.get_data());
+ goto err;
+ }
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_delim_proc */
+extern "C" void
+__db_re_delim_proc(
+ long dbpcl_id,
+ u_int32_t delim,
+ __db_re_delim_reply *replyp)
+/* END __db_re_delim_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_delim(delim);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_len_proc */
+extern "C" void
+__db_re_len_proc(
+ long dbpcl_id,
+ u_int32_t len,
+ __db_re_len_reply *replyp)
+/* END __db_re_len_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_len(len);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_re_pad_proc */
+extern "C" void
+__db_re_pad_proc(
+ long dbpcl_id,
+ u_int32_t pad,
+ __db_re_pad_reply *replyp)
+/* END __db_re_pad_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->set_re_pad(pad);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_remove_proc */
+extern "C" void
+__db_remove_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ u_int32_t flags,
+ __db_remove_reply *replyp)
+/* END __db_remove_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->remove(name, subdb, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_rename_proc */
+extern "C" void
+__db_rename_proc(
+ long dbpcl_id,
+ char *name,
+ char *subdb,
+ char *newname,
+ u_int32_t flags,
+ __db_rename_reply *replyp)
+/* END __db_rename_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->rename(name, subdb, newname, flags);
+ __dbdel_ctp(dbp_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_stat_proc */
+extern "C" void
+__db_stat_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_stat_reply *replyp,
+ int * freep)
+/* END __db_stat_proc */
+{
+ Db *dbp;
+ DBTYPE type;
+ ct_entry *dbp_ctp;
+ u_int32_t *q, *p, *retsp;
+ int i, len, ret;
+ void *sp;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->stat(&sp, flags);
+ replyp->status = ret;
+ if (ret != 0)
+ return;
+ /*
+ * We get here, we have success. Allocate an array so that
+ * we can use the list generator. Generate the reply, free
+ * up the space.
+ */
+ /*
+ * XXX This assumes that all elements of all stat structures
+ * are u_int32_t fields. They are, currently.
+ */
+ (void)dbp->get_type(&type);
+ if (type == DB_HASH)
+ len = sizeof(DB_HASH_STAT);
+ else if (type == DB_QUEUE)
+ len = sizeof(DB_QUEUE_STAT);
+ else /* BTREE or RECNO are same stats */
+ len = sizeof(DB_BTREE_STAT);
+ replyp->stats.stats_len = len / sizeof(u_int32_t);
+
+ if ((ret = __os_umalloc(dbp->get_DB()->dbenv,
+ len * replyp->stats.stats_len, &retsp)) != 0)
+ goto out;
+ for (i = 0, q = retsp, p = (u_int32_t *)sp; i < len;
+ i++, q++, p++)
+ *q = *p;
+ replyp->stats.stats_val = retsp;
+ __os_ufree(dbp->get_DB()->dbenv, sp);
+ if (ret == 0)
+ *freep = 1;
+out:
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_sync_proc */
+extern "C" void
+__db_sync_proc(
+ long dbpcl_id,
+ u_int32_t flags,
+ __db_sync_reply *replyp)
+/* END __db_sync_proc */
+{
+ Db *dbp;
+ ct_entry *dbp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ ret = dbp->sync(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_truncate_proc */
+extern "C" void
+__db_truncate_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_truncate_reply *replyp)
+/* END __db_truncate_proc */
+{
+ Db *dbp;
+ DbTxn *txnp;
+ ct_entry *dbp_ctp, *txnp_ctp;
+ u_int32_t count;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ } else
+ txnp = NULL;
+
+ ret = dbp->truncate(txnp, &count, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->count = count;
+ return;
+}
+
+/* BEGIN __db_cursor_proc */
+extern "C" void
+__db_cursor_proc(
+ long dbpcl_id,
+ long txnpcl_id,
+ u_int32_t flags,
+ __db_cursor_reply *replyp)
+/* END __db_cursor_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ DbTxn *txnp;
+ ct_entry *dbc_ctp, *env_ctp, *dbp_ctp, *txnp_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ if (txnpcl_id != 0) {
+ ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
+ txnp = (DbTxn *)txnp_ctp->ct_anyp;
+ dbc_ctp->ct_activep = txnp_ctp->ct_activep;
+ } else
+ txnp = NULL;
+
+ if ((ret = dbp->cursor(txnp, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = CT_CURSOR;
+ dbc_ctp->ct_parent = dbp_ctp;
+ env_ctp = dbp_ctp->ct_envparent;
+ dbc_ctp->ct_envparent = env_ctp;
+ __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else
+ __dbclear_ctp(dbc_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __db_join_proc */
+extern "C" void
+__db_join_proc(
+ long dbpcl_id,
+ u_int32_t *curs,
+ u_int32_t curslen,
+ u_int32_t flags,
+ __db_join_reply *replyp)
+/* END __db_join_proc */
+{
+ Db *dbp;
+ Dbc **jcurs, **c;
+ Dbc *dbc;
+ ct_entry *dbc_ctp, *ctp, *dbp_ctp;
+ size_t size;
+ u_int32_t *cl, i;
+ int ret;
+
+ ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
+ dbp = (Db *)dbp_ctp->ct_anyp;
+
+ dbc_ctp = new_ct_ent(&replyp->status);
+ if (dbc_ctp == NULL)
+ return;
+
+ size = (curslen + 1) * sizeof(Dbc *);
+ if ((ret = __os_calloc(dbp->get_DB()->dbenv,
+ curslen + 1, sizeof(Dbc *), &jcurs)) != 0) {
+ replyp->status = ret;
+ __dbclear_ctp(dbc_ctp);
+ return;
+ }
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ ctp = get_tableent(*curs);
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ /*
+ * If we are using a transaction, set the join activity timer
+ * to point to the parent transaction.
+ */
+ if (ctp->ct_activep != &ctp->ct_active)
+ dbc_ctp->ct_activep = ctp->ct_activep;
+ for (i = 0, cl = curs, c = jcurs; i < curslen; i++, cl++, c++) {
+ ctp = get_tableent(*cl);
+ if (ctp == NULL) {
+ replyp->status = DB_NOSERVER_ID;
+ goto out;
+ }
+ /*
+ * If we are using a txn, the join cursor points to the
+ * transaction timeout. If we are not using a transaction,
+ * then all the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ * Change the type of the curslist ctps to CT_JOIN so that
+ * we know they are part of a join list and we can distinguish
+ * them and later restore them when the join cursor is closed.
+ */
+ DB_ASSERT(ctp->ct_type == CT_CURSOR);
+ ctp->ct_type |= CT_JOIN;
+ ctp->ct_origp = ctp->ct_activep;
+ /*
+ * Setting this to the ct_active field of the dbc_ctp is
+ * really just a way to distinguish which join dbc this
+ * cursor is part of. The ct_activep of this cursor is
+ * not used at all during its lifetime as part of a join
+ * cursor.
+ */
+ ctp->ct_activep = &dbc_ctp->ct_active;
+ *c = ctp->ct_dbc;
+ }
+ *c = NULL;
+ if ((ret = dbp->join(jcurs, &dbc, flags)) == 0) {
+ dbc_ctp->ct_dbc = dbc;
+ dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
+ dbc_ctp->ct_parent = dbp_ctp;
+ dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
+ __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
+ __dbsrv_active(dbc_ctp);
+ replyp->dbcidcl_id = dbc_ctp->ct_id;
+ } else {
+ __dbclear_ctp(dbc_ctp);
+ /*
+ * If we get an error, undo what we did above to any cursors.
+ */
+ for (cl = curs; *cl != 0; cl++) {
+ ctp = get_tableent(*cl);
+ ctp->ct_type = CT_CURSOR;
+ ctp->ct_activep = ctp->ct_origp;
+ }
+ }
+
+ replyp->status = ret;
+out:
+ __os_free(dbp->get_DB()->dbenv, jcurs);
+ return;
+}
+
+/* BEGIN __dbc_close_proc */
+extern "C" void
+__dbc_close_proc(
+ long dbccl_id,
+ __dbc_close_reply *replyp)
+/* END __dbc_close_proc */
+{
+ ct_entry *dbc_ctp;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ replyp->status = __dbc_close_int(dbc_ctp);
+ return;
+}
+
+/* BEGIN __dbc_count_proc */
+extern "C" void
+__dbc_count_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_count_reply *replyp)
+/* END __dbc_count_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ db_recno_t num;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->count(&num, flags);
+ replyp->status = ret;
+ if (ret == 0)
+ replyp->dupcount = num;
+ return;
+}
+
+/* BEGIN __dbc_del_proc */
+extern "C" void
+__dbc_del_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_del_reply *replyp)
+/* END __dbc_del_proc */
+{
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->del(flags);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_dup_proc */
+extern "C" void
+__dbc_dup_proc(
+ long dbccl_id,
+ u_int32_t flags,
+ __dbc_dup_reply *replyp)
+/* END __dbc_dup_proc */
+{
+ Dbc *dbc, *newdbc;
+ ct_entry *dbc_ctp, *new_ctp;
+ int ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ new_ctp = new_ct_ent(&replyp->status);
+ if (new_ctp == NULL)
+ return;
+
+ if ((ret = dbc->dup(&newdbc, flags)) == 0) {
+ new_ctp->ct_dbc = newdbc;
+ new_ctp->ct_type = CT_CURSOR;
+ new_ctp->ct_parent = dbc_ctp->ct_parent;
+ new_ctp->ct_envparent = dbc_ctp->ct_envparent;
+ /*
+ * If our cursor has a parent txn, we need to use it too.
+ */
+ if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
+ new_ctp->ct_activep = dbc_ctp->ct_activep;
+ __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
+ __dbsrv_active(new_ctp);
+ replyp->dbcidcl_id = new_ctp->ct_id;
+ } else
+ __dbclear_ctp(new_ctp);
+
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_get_proc */
+extern "C" void
+__dbc_get_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_get_reply *replyp,
+ int * freep)
+/* END __dbc_get_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, bulk_alloc, ret;
+ void *tmpdata;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+ bulk_alloc = 0;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ dataflags &= DB_DBT_PARTIAL;
+ if (flags & DB_MULTIPLE || flags & DB_MULTIPLE_KEY) {
+ if (data.get_data() == NULL) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_ulen(), &tmpdata);
+ if (ret != 0)
+ goto err;
+ data.set_data(tmpdata);
+ bulk_alloc = 1;
+ }
+ dataflags |= DB_DBT_USERMEM;
+ } else
+ dataflags |= DB_DBT_MALLOC;
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->get(&key, &data, flags);
+
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (key.get_data() == keydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), key.get_size(),
+ &replyp->keydata.keydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->keydata.keydata_val, key.get_data(), key.get_size());
+ } else
+ replyp->keydata.keydata_val = (char *)key.get_data();
+
+ replyp->keydata.keydata_len = key.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(), data.get_size(),
+ &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), key.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->keydata.keydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ if (bulk_alloc)
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_pget_proc */
+extern "C" void
+__dbc_pget_proc(
+ long dbccl_id,
+ u_int32_t skeydlen,
+ u_int32_t skeydoff,
+ u_int32_t skeyulen,
+ u_int32_t skeyflags,
+ void *skeydata,
+ u_int32_t skeysize,
+ u_int32_t pkeydlen,
+ u_int32_t pkeydoff,
+ u_int32_t pkeyulen,
+ u_int32_t pkeyflags,
+ void *pkeydata,
+ u_int32_t pkeysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_pget_reply *replyp,
+ int * freep)
+/* END __dbc_pget_proc */
+{
+ Dbc *dbc;
+ DbEnv *dbenv;
+ ct_entry *dbc_ctp;
+ int key_alloc, ret;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbenv = DbEnv::get_DbEnv(((DBC *)dbc)->dbp->dbenv);
+
+ *freep = 0;
+
+ /*
+ * Ignore memory related flags on server.
+ */
+ /* Set up key and data */
+ Dbt skey(skeydata, skeysize);
+ skey.set_dlen(skeydlen);
+ skey.set_ulen(skeyulen);
+ skey.set_doff(skeydoff);
+ skey.set_flags(DB_DBT_MALLOC | (skeyflags & DB_DBT_PARTIAL));
+
+ Dbt pkey(pkeydata, pkeysize);
+ pkey.set_dlen(pkeydlen);
+ pkey.set_ulen(pkeyulen);
+ pkey.set_doff(pkeydoff);
+ pkey.set_flags(DB_DBT_MALLOC | (pkeyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(DB_DBT_MALLOC | (dataflags & DB_DBT_PARTIAL));
+
+ /* Got all our stuff, now do the get */
+ ret = dbc->pget(&skey, &pkey, &data, flags);
+ /*
+ * Otherwise just status.
+ */
+ if (ret == 0) {
+ /*
+ * XXX
+ * We need to xdr_free whatever we are returning, next time.
+ * However, DB does not allocate a new key if one was given
+ * and we'd be free'ing up space allocated in the request.
+ * So, allocate a new key/data pointer if it is the same one
+ * as in the request.
+ */
+ *freep = 1;
+ /*
+ * Key
+ */
+ key_alloc = 0;
+ if (skey.get_data() == skeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ skey.get_size(), &replyp->skeydata.skeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ goto err;
+ }
+ key_alloc = 1;
+ memcpy(replyp->skeydata.skeydata_val, skey.get_data(),
+ skey.get_size());
+ } else
+ replyp->skeydata.skeydata_val = (char *)skey.get_data();
+ replyp->skeydata.skeydata_len = skey.get_size();
+
+ /*
+ * Primary key
+ */
+ if (pkey.get_data() == pkeydata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ pkey.get_size(), &replyp->pkeydata.pkeydata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ goto err;
+ }
+ /*
+ * We can set it to 2, because they cannot send the
+ * pkey over without sending the skey over too.
+ * So if they did send a pkey, they must have sent
+ * the skey as well.
+ */
+ key_alloc = 2;
+ memcpy(replyp->pkeydata.pkeydata_val, pkey.get_data(),
+ pkey.get_size());
+ } else
+ replyp->pkeydata.pkeydata_val = (char *)pkey.get_data();
+ replyp->pkeydata.pkeydata_len = pkey.get_size();
+
+ /*
+ * Data
+ */
+ if (data.get_data() == datadata) {
+ ret = __os_umalloc(dbenv->get_DB_ENV(),
+ data.get_size(), &replyp->datadata.datadata_val);
+ if (ret != 0) {
+ __os_ufree(dbenv->get_DB_ENV(), skey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), pkey.get_data());
+ __os_ufree(dbenv->get_DB_ENV(), data.get_data());
+ /*
+ * If key_alloc is 1, just skey needs to be
+ * freed, if key_alloc is 2, both skey and pkey
+ * need to be freed.
+ */
+ if (key_alloc--)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->skeydata.skeydata_val);
+ if (key_alloc)
+ __os_ufree(dbenv->get_DB_ENV(),
+ replyp->pkeydata.pkeydata_val);
+ goto err;
+ }
+ memcpy(replyp->datadata.datadata_val, data.get_data(),
+ data.get_size());
+ } else
+ replyp->datadata.datadata_val = (char *)data.get_data();
+ replyp->datadata.datadata_len = data.get_size();
+ } else {
+err: replyp->skeydata.skeydata_val = NULL;
+ replyp->skeydata.skeydata_len = 0;
+ replyp->pkeydata.pkeydata_val = NULL;
+ replyp->pkeydata.pkeydata_len = 0;
+ replyp->datadata.datadata_val = NULL;
+ replyp->datadata.datadata_len = 0;
+ *freep = 0;
+ }
+ replyp->status = ret;
+ return;
+}
+
+/* BEGIN __dbc_put_proc */
+extern "C" void
+__dbc_put_proc(
+ long dbccl_id,
+ u_int32_t keydlen,
+ u_int32_t keydoff,
+ u_int32_t keyulen,
+ u_int32_t keyflags,
+ void *keydata,
+ u_int32_t keysize,
+ u_int32_t datadlen,
+ u_int32_t datadoff,
+ u_int32_t dataulen,
+ u_int32_t dataflags,
+ void *datadata,
+ u_int32_t datasize,
+ u_int32_t flags,
+ __dbc_put_reply *replyp,
+ int * freep)
+/* END __dbc_put_proc */
+{
+ Db *dbp;
+ Dbc *dbc;
+ ct_entry *dbc_ctp;
+ int ret;
+ DBTYPE dbtype;
+
+ ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+ dbp = (Db *)dbc_ctp->ct_parent->ct_anyp;
+
+ /* Set up key and data */
+ Dbt key(keydata, keysize);
+ key.set_dlen(keydlen);
+ key.set_ulen(keyulen);
+ key.set_doff(keydoff);
+ /*
+ * Ignore memory related flags on server.
+ */
+ key.set_flags(DB_DBT_MALLOC | (keyflags & DB_DBT_PARTIAL));
+
+ Dbt data(datadata, datasize);
+ data.set_dlen(datadlen);
+ data.set_ulen(dataulen);
+ data.set_doff(datadoff);
+ data.set_flags(dataflags);
+
+ /* Got all our stuff, now do the put */
+ ret = dbc->put(&key, &data, flags);
+
+ *freep = 0;
+ replyp->keydata.keydata_val = NULL;
+ replyp->keydata.keydata_len = 0;
+ if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE)) {
+ ret = dbp->get_type(&dbtype);
+ if (ret == 0 && dbtype == DB_RECNO) {
+ /*
+ * We need to xdr_free whatever we are returning, next time.
+ */
+ replyp->keydata.keydata_val = (char *)key.get_data();
+ replyp->keydata.keydata_len = key.get_size();
+ }
+ }
+ replyp->status = ret;
+ return;
+}
+#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/cxx/db_server_cxxutil.cpp b/bdb/rpc_server/cxx/db_server_cxxutil.cpp
new file mode 100644
index 00000000000..60865264c00
--- /dev/null
+++ b/bdb/rpc_server/cxx/db_server_cxxutil.cpp
@@ -0,0 +1,746 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: db_server_cxxutil.cpp,v 1.8 2002/05/23 07:49:34 mjc Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <rpc/rpc.h>
+
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+#include "dbinc_auto/db_server.h"
+
+#include "db_int.h"
+#include "db_cxx.h"
+#include "dbinc_auto/clib_ext.h"
+
+extern "C" {
+#include "dbinc/db_server_int.h"
+#include "dbinc_auto/rpc_server_ext.h"
+#include "dbinc_auto/common_ext.h"
+
+extern int __dbsrv_main __P((void));
+}
+
+static int add_home __P((char *));
+static int add_passwd __P((char *));
+static int env_recover __P((char *));
+static void __dbclear_child __P((ct_entry *));
+
+static LIST_HEAD(cthead, ct_entry) __dbsrv_head;
+static LIST_HEAD(homehead, home_entry) __dbsrv_home;
+static long __dbsrv_defto = DB_SERVER_TIMEOUT;
+static long __dbsrv_maxto = DB_SERVER_MAXTIMEOUT;
+static long __dbsrv_idleto = DB_SERVER_IDLETIMEOUT;
+static char *logfile = NULL;
+static char *prog;
+
+static void usage __P((char *));
+static void version_check __P((void));
+
+int __dbsrv_verbose = 0;
+
+int
+main(
+ int argc,
+ char **argv)
+{
+ extern char *optarg;
+ CLIENT *cl;
+ int ch, ret;
+ char *passwd;
+
+ prog = argv[0];
+
+ version_check();
+
+ /*
+ * Check whether another server is running or not. There
+ * is a race condition where two servers could be racing to
+ * register with the portmapper. The goal of this check is to
+ * forbid running additional servers (like those started from
+ * the test suite) if the user is already running one.
+ *
+ * XXX
+ * This does not solve nor prevent two servers from being
+ * started at the same time and running recovery at the same
+ * time on the same environments.
+ */
+ if ((cl = clnt_create("localhost",
+ DB_RPC_SERVERPROG, DB_RPC_SERVERVERS, "tcp")) != NULL) {
+ fprintf(stderr,
+ "%s: Berkeley DB RPC server already running.\n", prog);
+ clnt_destroy(cl);
+ return (EXIT_FAILURE);
+ }
+
+ LIST_INIT(&__dbsrv_home);
+ while ((ch = getopt(argc, argv, "h:I:L:P:t:T:Vv")) != EOF)
+ switch (ch) {
+ case 'h':
+ (void)add_home(optarg);
+ break;
+ case 'I':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_idleto))
+ return (EXIT_FAILURE);
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'P':
+ passwd = strdup(optarg);
+ memset(optarg, 0, strlen(optarg));
+ if (passwd == NULL) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ if ((ret = add_passwd(passwd)) != 0) {
+ fprintf(stderr, "%s: strdup: %s\n",
+ prog, strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 't':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_defto))
+ return (EXIT_FAILURE);
+ break;
+ case 'T':
+ if (__db_getlong(NULL, prog,
+ optarg, 1, LONG_MAX, &__dbsrv_maxto))
+ return (EXIT_FAILURE);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ __dbsrv_verbose = 1;
+ break;
+ default:
+ usage(prog);
+ }
+ /*
+ * Check default timeout against maximum timeout
+ */
+ if (__dbsrv_defto > __dbsrv_maxto)
+ __dbsrv_defto = __dbsrv_maxto;
+
+ /*
+ * Check default timeout against idle timeout
+ * It would be bad to timeout environments sooner than txns.
+ */
+ if (__dbsrv_defto > __dbsrv_idleto)
+ fprintf(stderr,
+ "%s: WARNING: Idle timeout %ld is less than resource timeout %ld\n",
+ prog, __dbsrv_idleto, __dbsrv_defto);
+
+ LIST_INIT(&__dbsrv_head);
+
+ /*
+ * If a client crashes during an RPC, our reply to it
+ * generates a SIGPIPE. Ignore SIGPIPE so we don't exit unnecessarily.
+ */
+#ifdef SIGPIPE
+ signal(SIGPIPE, SIG_IGN);
+#endif
+
+ if (logfile != NULL && __db_util_logset("berkeley_db_svc", logfile))
+ return (EXIT_FAILURE);
+
+ /*
+ * Now that we are ready to start, run recovery on all the
+ * environments specified.
+ */
+ if (env_recover(prog) != 0)
+ return (EXIT_FAILURE);
+
+ /*
+ * We've done our setup, now call the generated server loop
+ */
+ if (__dbsrv_verbose)
+ printf("%s: Ready to receive requests\n", prog);
+ __dbsrv_main();
+
+ /* NOTREACHED */
+ abort();
+}
+
+static void
+usage(char *prog)
+{
+ fprintf(stderr, "usage: %s %s\n\t%s\n", prog,
+ "[-Vv] [-h home] [-P passwd]",
+ "[-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ exit(EXIT_FAILURE);
+}
+
+static void
+version_check()
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ prog, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ exit(EXIT_FAILURE);
+ }
+}
+
+extern "C" void
+__dbsrv_settimeout(
+ ct_entry *ctp,
+ u_int32_t to)
+{
+ if (to > (u_int32_t)__dbsrv_maxto)
+ ctp->ct_timeout = __dbsrv_maxto;
+ else if (to <= 0)
+ ctp->ct_timeout = __dbsrv_defto;
+ else
+ ctp->ct_timeout = to;
+}
+
+extern "C" void
+__dbsrv_timeout(int force)
+{
+ static long to_hint = -1;
+ time_t t;
+ long to;
+ ct_entry *ctp, *nextctp;
+
+ if ((t = time(NULL)) == -1)
+ return;
+
+ /*
+ * Check hint. If hint is further in the future
+ * than now, no work to do.
+ */
+ if (!force && to_hint > 0 && t < to_hint)
+ return;
+ to_hint = -1;
+ /*
+ * Timeout transactions or cursors holding DB resources.
+ * Do this before timing out envs to properly release resources.
+ *
+ * !!!
+ * We can just loop through this list looking for cursors and txns.
+ * We do not need to verify txn and cursor relationships at this
+ * point because we maintain the list in LIFO order *and* we
+ * maintain activity in the ultimate txn parent of any cursor
+ * so either everything in a txn is timing out, or nothing.
+ * So, since we are LIFO, we will correctly close/abort all the
+ * appropriate handles, in the correct order.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ switch (ctp->ct_type) {
+ case CT_TXN:
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out txn id %ld\n",
+ ctp->ct_id);
+ (void)((DbTxn *)ctp->ct_anyp)->abort();
+ __dbdel_ctp(ctp);
+ /*
+ * If we timed out an txn, we may have closed
+ * all sorts of ctp's.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ case CT_CURSOR:
+ case (CT_JOINCUR | CT_CURSOR):
+ to = *(ctp->ct_activep) + ctp->ct_timeout;
+ /* TIMEOUT */
+ if (to < t) {
+ if (__dbsrv_verbose)
+ printf("Timing out cursor %ld\n",
+ ctp->ct_id);
+ (void)__dbc_close_int(ctp);
+ /*
+ * Start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ } else if ((to_hint > 0 && to_hint > to) ||
+ to_hint == -1)
+ to_hint = to;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * Timeout idle handles.
+ * If we are forcing a timeout, we'll close all env handles.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL; ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ to = *(ctp->ct_activep) + ctp->ct_idle;
+ /* TIMEOUT */
+ if (to < t || force) {
+ if (__dbsrv_verbose)
+ printf("Timing out env id %ld\n", ctp->ct_id);
+ (void)__dbenv_close_int(ctp->ct_id, 0, 1);
+ /*
+ * If we timed out an env, we may have closed
+ * all sorts of ctp's (maybe even all of them.
+ * So start over with a guaranteed good ctp.
+ */
+ nextctp = LIST_FIRST(&__dbsrv_head);
+ }
+ }
+}
+
+/*
+ * RECURSIVE FUNCTION. We need to clear/free any number of levels of nested
+ * layers.
+ */
+static void
+__dbclear_child(ct_entry *parent)
+{
+ ct_entry *ctp, *nextctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = nextctp) {
+ nextctp = LIST_NEXT(ctp, entries);
+ if (ctp->ct_type == 0)
+ continue;
+ if (ctp->ct_parent == parent) {
+ __dbclear_child(ctp);
+ /*
+ * Need to do this here because le_next may
+ * have changed with the recursive call and we
+ * don't want to point to a removed entry.
+ */
+ nextctp = LIST_NEXT(ctp, entries);
+ __dbclear_ctp(ctp);
+ }
+ }
+}
+
+extern "C" void
+__dbclear_ctp(ct_entry *ctp)
+{
+ LIST_REMOVE(ctp, entries);
+ __os_free(NULL, ctp);
+}
+
+extern "C" void
+__dbdel_ctp(ct_entry *parent)
+{
+ __dbclear_child(parent);
+ __dbclear_ctp(parent);
+}
+
+extern "C" ct_entry *
+new_ct_ent(int *errp)
+{
+ time_t t;
+ ct_entry *ctp, *octp;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(ct_entry), &ctp)) != 0) {
+ *errp = ret;
+ return (NULL);
+ }
+ memset(ctp, 0, sizeof(ct_entry));
+ /*
+ * Get the time as ID. We may service more than one request per
+ * second however. If we are, then increment id value until we
+ * find an unused one. We insert entries in LRU fashion at the
+ * head of the list. So, if the first entry doesn't match, then
+ * we know for certain that we can use our entry.
+ */
+ if ((t = time(NULL)) == -1) {
+ *errp = __os_get_errno();
+ __os_free(NULL, ctp);
+ return (NULL);
+ }
+ octp = LIST_FIRST(&__dbsrv_head);
+ if (octp != NULL && octp->ct_id >= t)
+ t = octp->ct_id + 1;
+ ctp->ct_id = t;
+ ctp->ct_idle = __dbsrv_idleto;
+ ctp->ct_activep = &ctp->ct_active;
+ ctp->ct_origp = NULL;
+ ctp->ct_refcount = 1;
+
+ LIST_INSERT_HEAD(&__dbsrv_head, ctp, entries);
+ return (ctp);
+}
+
+extern "C" ct_entry *
+get_tableent(long id)
+{
+ ct_entry *ctp;
+
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries))
+ if (ctp->ct_id == id)
+ return (ctp);
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_sharedb(ct_entry *db_ctp, const char *name, const char *subdb, DBTYPE type, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share a db handle. Criteria for sharing are:
+ * If any of the non-sharable flags are set, we cannot share.
+ * Must be a db ctp, obviously.
+ * Must share the same env parent.
+ * Must be the same type, or current one DB_UNKNOWN.
+ * Must be same byteorder, or current one must not care.
+ * All flags must match.
+ * Must be same name, but don't share in-memory databases.
+ * Must be same subdb name.
+ */
+ if (flags & DB_SERVER_DBNOSHARE)
+ return (NULL);
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == db_ctp)
+ continue;
+ if (ctp->ct_type != CT_DB)
+ continue;
+ if (ctp->ct_envparent != db_ctp->ct_envparent)
+ continue;
+ if (type != DB_UNKNOWN && ctp->ct_dbdp.type != type)
+ continue;
+ if (ctp->ct_dbdp.dbflags != LF_ISSET(DB_SERVER_DBFLAGS))
+ continue;
+ if (db_ctp->ct_dbdp.setflags != 0 &&
+ ctp->ct_dbdp.setflags != db_ctp->ct_dbdp.setflags)
+ continue;
+ if (name == NULL || ctp->ct_dbdp.db == NULL ||
+ strcmp(name, ctp->ct_dbdp.db) != 0)
+ continue;
+ if (subdb != ctp->ct_dbdp.subdb &&
+ (subdb == NULL || ctp->ct_dbdp.subdb == NULL ||
+ strcmp(subdb, ctp->ct_dbdp.subdb) != 0))
+ continue;
+ /*
+ * If we get here, then we match.
+ */
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" ct_entry *
+__dbsrv_shareenv(ct_entry *env_ctp, home_entry *home, u_int32_t flags)
+{
+ ct_entry *ctp;
+
+ /*
+ * Check if we can share an env. Criteria for sharing are:
+ * Must be an env ctp, obviously.
+ * Must share the same home env.
+ * All flags must match.
+ */
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Skip ourselves.
+ */
+ if (ctp == env_ctp)
+ continue;
+ if (ctp->ct_type != CT_ENV)
+ continue;
+ if (ctp->ct_envdp.home != home)
+ continue;
+ if (ctp->ct_envdp.envflags != flags)
+ continue;
+ if (ctp->ct_envdp.onflags != env_ctp->ct_envdp.onflags)
+ continue;
+ if (ctp->ct_envdp.offflags != env_ctp->ct_envdp.offflags)
+ continue;
+ /*
+ * If we get here, then we match. The only thing left to
+ * check is the timeout. Since the server timeout set by
+ * the client is a hint, for sharing we'll give them the
+ * benefit of the doubt and grant them the longer timeout.
+ */
+ if (ctp->ct_timeout < env_ctp->ct_timeout)
+ ctp->ct_timeout = env_ctp->ct_timeout;
+ ctp->ct_refcount++;
+ return (ctp);
+ }
+
+ return (NULL);
+}
+
+extern "C" void
+__dbsrv_active(ct_entry *ctp)
+{
+ time_t t;
+ ct_entry *envctp;
+
+ if (ctp == NULL)
+ return;
+ if ((t = time(NULL)) == -1)
+ return;
+ *(ctp->ct_activep) = t;
+ if ((envctp = ctp->ct_envparent) == NULL)
+ return;
+ *(envctp->ct_activep) = t;
+ return;
+}
+
+extern "C" int
+__db_close_int(long id, u_int32_t flags)
+{
+ Db *dbp;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_DB);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing dbp id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ if (--ctp->ct_refcount != 0)
+ return (ret);
+ dbp = ctp->ct_dbp;
+ if (__dbsrv_verbose)
+ printf("Closing dbp id %ld\n", id);
+
+ ret = dbp->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+extern "C" int
+__dbc_close_int(ct_entry *dbc_ctp)
+{
+ Dbc *dbc;
+ int ret;
+ ct_entry *ctp;
+
+ dbc = (Dbc *)dbc_ctp->ct_anyp;
+
+ ret = dbc->close();
+ /*
+ * If this cursor is a join cursor then we need to fix up the
+ * cursors that it was joined from so that they are independent again.
+ */
+ if (dbc_ctp->ct_type & CT_JOINCUR)
+ for (ctp = LIST_FIRST(&__dbsrv_head); ctp != NULL;
+ ctp = LIST_NEXT(ctp, entries)) {
+ /*
+ * Test if it is a join cursor, and if it is part
+ * of this one.
+ */
+ if ((ctp->ct_type & CT_JOIN) &&
+ ctp->ct_activep == &dbc_ctp->ct_active) {
+ ctp->ct_type &= ~CT_JOIN;
+ ctp->ct_activep = ctp->ct_origp;
+ __dbsrv_active(ctp);
+ }
+ }
+ __dbclear_ctp(dbc_ctp);
+ return (ret);
+
+}
+
+extern "C" int
+__dbenv_close_int(long id, u_int32_t flags, int force)
+{
+ DbEnv *dbenv;
+ int ret;
+ ct_entry *ctp;
+
+ ret = 0;
+ ctp = get_tableent(id);
+ if (ctp == NULL)
+ return (DB_NOSERVER_ID);
+ DB_ASSERT(ctp->ct_type == CT_ENV);
+ if (__dbsrv_verbose && ctp->ct_refcount != 1)
+ printf("Deref'ing env id %ld, refcount %d\n",
+ id, ctp->ct_refcount);
+ /*
+ * If we are timing out, we need to force the close, no matter
+ * what the refcount.
+ */
+ if (--ctp->ct_refcount != 0 && !force)
+ return (ret);
+ dbenv = ctp->ct_envp;
+ if (__dbsrv_verbose)
+ printf("Closing env id %ld\n", id);
+
+ ret = dbenv->close(flags);
+ __dbdel_ctp(ctp);
+ return (ret);
+}
+
+static int
+add_home(char *home)
+{
+ home_entry *hp, *homep;
+ int ret;
+
+ if ((ret = __os_malloc(NULL, sizeof(home_entry), &hp)) != 0)
+ return (ret);
+ if ((ret = __os_malloc(NULL, strlen(home)+1, &hp->home)) != 0)
+ return (ret);
+ memcpy(hp->home, home, strlen(home)+1);
+ hp->dir = home;
+ hp->passwd = NULL;
+ /*
+ * This loop is to remove any trailing path separators,
+ * to assure hp->name points to the last component.
+ */
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ while (*(hp->name) == '\0') {
+ hp->name = __db_rpath(home);
+ *(hp->name) = '\0';
+ hp->name++;
+ }
+ /*
+ * Now we have successfully added it. Make sure there are no
+ * identical names.
+ */
+ for (homep = LIST_FIRST(&__dbsrv_home); homep != NULL;
+ homep = LIST_NEXT(homep, entries))
+ if (strcmp(homep->name, hp->name) == 0) {
+ printf("Already added home name %s, at directory %s\n",
+ hp->name, homep->dir);
+ return (-1);
+ }
+ LIST_INSERT_HEAD(&__dbsrv_home, hp, entries);
+ if (__dbsrv_verbose)
+ printf("Added home %s in dir %s\n", hp->name, hp->dir);
+ return (0);
+}
+
+static int
+add_passwd(char *passwd)
+{
+ home_entry *hp;
+
+ /*
+ * We add the passwd to the last given home dir. If there
+ * isn't a home dir, or the most recent one already has a
+ * passwd, then there is a user error.
+ */
+ hp = LIST_FIRST(&__dbsrv_home);
+ if (hp == NULL || hp->passwd != NULL)
+ return (EINVAL);
+ /*
+ * We've already strdup'ed the passwd above, so we don't need
+ * to malloc new space, just point to it.
+ */
+ hp->passwd = passwd;
+ return (0);
+}
+
+extern "C" home_entry *
+get_home(char *name)
+{
+ home_entry *hp;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries))
+ if (strcmp(name, hp->name) == 0)
+ return (hp);
+ return (NULL);
+}
+
+static int
+env_recover(char *progname)
+{
+ DbEnv *dbenv;
+ home_entry *hp;
+ u_int32_t flags;
+ int exitval, ret;
+
+ for (hp = LIST_FIRST(&__dbsrv_home); hp != NULL;
+ hp = LIST_NEXT(hp, entries)) {
+ exitval = 0;
+ dbenv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ if (__dbsrv_verbose == 1) {
+ (void)dbenv->set_verbose(DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(DB_VERB_CHKPOINT, 1);
+ }
+ dbenv->set_errfile(stderr);
+ dbenv->set_errpfx(progname);
+ if (hp->passwd != NULL)
+ (void)dbenv->set_encrypt(hp->passwd, DB_ENCRYPT_AES);
+
+ /*
+ * Initialize the env with DB_RECOVER. That is all we
+ * have to do to run recovery.
+ */
+ if (__dbsrv_verbose)
+ printf("Running recovery on %s\n", hp->home);
+ flags = DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL |
+ DB_INIT_TXN | DB_USE_ENVIRON | DB_RECOVER;
+ if ((ret = dbenv->open(hp->home, flags, 0)) != 0) {
+ dbenv->err(ret, "DbEnv->open");
+ goto error;
+ }
+
+ if (0) {
+error: exitval = 1;
+ }
+ if ((ret = dbenv->close(0)) != 0) {
+ exitval = 1;
+ fprintf(stderr, "%s: dbenv->close: %s\n",
+ progname, db_strerror(ret));
+ }
+ if (exitval)
+ return (exitval);
+ }
+ return (0);
+}
diff --git a/bdb/rpc_server/db_server.sed b/bdb/rpc_server/db_server.sed
deleted file mode 100644
index f028f778e21..00000000000
--- a/bdb/rpc_server/db_server.sed
+++ /dev/null
@@ -1,5 +0,0 @@
-1i\
-\#include "db_config.h"\
-\#ifdef HAVE_RPC
-$a\
-\#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/db_server_proc.c b/bdb/rpc_server/db_server_proc.c
deleted file mode 100644
index 108a00fb371..00000000000
--- a/bdb/rpc_server/db_server_proc.c
+++ /dev/null
@@ -1,1546 +0,0 @@
-/*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 2000
- * Sleepycat Software. All rights reserved.
- */
-
-#include "db_config.h"
-
-#ifdef HAVE_RPC
-#ifndef lint
-static const char revid[] = "$Id: db_server_proc.c,v 1.48 2001/01/06 16:08:01 sue Exp $";
-#endif /* not lint */
-
-#ifndef NO_SYSTEM_INCLUDES
-#include <sys/types.h>
-
-#include <rpc/rpc.h>
-
-#include <string.h>
-#endif
-#include "db_server.h"
-
-#include "db_int.h"
-#include "db_server_int.h"
-#include "rpc_server_ext.h"
-
-static int __db_stats_list __P((DB_ENV *,
- __db_stat_statsreplist **, u_int32_t *, int));
-
-/* BEGIN __env_cachesize_1_proc */
-void
-__env_cachesize_1_proc(dbenvcl_id, gbytes, bytes,
- ncache, replyp)
- long dbenvcl_id;
- u_int32_t gbytes;
- u_int32_t bytes;
- u_int32_t ncache;
- __env_cachesize_reply *replyp;
-/* END __env_cachesize_1_proc */
-{
- int ret;
- DB_ENV * dbenv;
- ct_entry *dbenv_ctp;
-
- ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
- dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
- ret = dbenv->set_cachesize(dbenv, gbytes, bytes, ncache);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __env_close_1_proc */
-void
-__env_close_1_proc(dbenvcl_id, flags, replyp)
- long dbenvcl_id;
- u_int32_t flags;
- __env_close_reply *replyp;
-/* END __env_close_1_proc */
-{
- replyp->status = __dbenv_close_int(dbenvcl_id, flags);
- return;
-}
-
-/* BEGIN __env_create_1_proc */
-void
-__env_create_1_proc(timeout, replyp)
- u_int32_t timeout;
- __env_create_reply *replyp;
-/* END __env_create_1_proc */
-{
- int ret;
- DB_ENV *dbenv;
- ct_entry *ctp;
-
- ctp = new_ct_ent(&replyp->status);
- if (ctp == NULL)
- return;
- if ((ret = db_env_create(&dbenv, 0)) == 0) {
- ctp->ct_envp = dbenv;
- ctp->ct_type = CT_ENV;
- ctp->ct_parent = NULL;
- ctp->ct_envparent = ctp;
- __dbsrv_settimeout(ctp, timeout);
- __dbsrv_active(ctp);
- replyp->envcl_id = ctp->ct_id;
- } else
- __dbclear_ctp(ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __env_flags_1_proc */
-void
-__env_flags_1_proc(dbenvcl_id, flags, onoff, replyp)
- long dbenvcl_id;
- u_int32_t flags;
- u_int32_t onoff;
- __env_flags_reply *replyp;
-/* END __env_flags_1_proc */
-{
- int ret;
- DB_ENV * dbenv;
- ct_entry *dbenv_ctp;
-
- ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
- dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
-
- ret = dbenv->set_flags(dbenv, flags, onoff);
-
- replyp->status = ret;
- return;
-}
-/* BEGIN __env_open_1_proc */
-void
-__env_open_1_proc(dbenvcl_id, home, flags,
- mode, replyp)
- long dbenvcl_id;
- char *home;
- u_int32_t flags;
- u_int32_t mode;
- __env_open_reply *replyp;
-/* END __env_open_1_proc */
-{
- int ret;
- DB_ENV * dbenv;
- ct_entry *dbenv_ctp;
- char *fullhome;
-
- ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
- dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
- fullhome = get_home(home);
- if (fullhome == NULL) {
- replyp->status = DB_NOSERVER_HOME;
- return;
- }
-
- ret = dbenv->open(dbenv, fullhome, flags, mode);
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __env_remove_1_proc */
-void
-__env_remove_1_proc(dbenvcl_id, home, flags, replyp)
- long dbenvcl_id;
- char *home;
- u_int32_t flags;
- __env_remove_reply *replyp;
-/* END __env_remove_1_proc */
-{
- int ret;
- DB_ENV * dbenv;
- ct_entry *dbenv_ctp;
- char *fullhome;
-
- ACTIVATE_CTP(dbenv_ctp, dbenvcl_id, CT_ENV);
- dbenv = (DB_ENV *)dbenv_ctp->ct_anyp;
- fullhome = get_home(home);
- if (fullhome == NULL) {
- replyp->status = DB_NOSERVER_HOME;
- return;
- }
-
- ret = dbenv->remove(dbenv, fullhome, flags);
- __dbdel_ctp(dbenv_ctp);
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __txn_abort_1_proc */
-void
-__txn_abort_1_proc(txnpcl_id, replyp)
- long txnpcl_id;
- __txn_abort_reply *replyp;
-/* END __txn_abort_1_proc */
-{
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- int ret;
-
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
- ret = txn_abort(txnp);
- __dbdel_ctp(txnp_ctp);
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __txn_begin_1_proc */
-void
-__txn_begin_1_proc(envpcl_id, parentcl_id,
- flags, replyp)
- long envpcl_id;
- long parentcl_id;
- u_int32_t flags;
- __txn_begin_reply *replyp;
-/* END __txn_begin_1_proc */
-{
- int ret;
- DB_ENV * envp;
- ct_entry *envp_ctp;
- DB_TXN * parent;
- ct_entry *parent_ctp;
- DB_TXN *txnp;
- ct_entry *ctp;
-
- ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
- envp = (DB_ENV *)envp_ctp->ct_anyp;
- parent_ctp = NULL;
-
- ctp = new_ct_ent(&replyp->status);
- if (ctp == NULL)
- return;
-
- if (parentcl_id != 0) {
- ACTIVATE_CTP(parent_ctp, parentcl_id, CT_TXN);
- parent = (DB_TXN *)parent_ctp->ct_anyp;
- ctp->ct_activep = parent_ctp->ct_activep;
- } else
- parent = NULL;
-
- ret = txn_begin(envp, parent, &txnp, flags);
- if (ret == 0) {
- ctp->ct_txnp = txnp;
- ctp->ct_type = CT_TXN;
- ctp->ct_parent = parent_ctp;
- ctp->ct_envparent = envp_ctp;
- replyp->txnidcl_id = ctp->ct_id;
- __dbsrv_settimeout(ctp, envp_ctp->ct_timeout);
- __dbsrv_active(ctp);
- } else
- __dbclear_ctp(ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __txn_commit_1_proc */
-void
-__txn_commit_1_proc(txnpcl_id, flags, replyp)
- long txnpcl_id;
- u_int32_t flags;
- __txn_commit_reply *replyp;
-/* END __txn_commit_1_proc */
-{
- int ret;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
-
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
-
- ret = txn_commit(txnp, flags);
- __dbdel_ctp(txnp_ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_bt_maxkey_1_proc */
-void
-__db_bt_maxkey_1_proc(dbpcl_id, maxkey, replyp)
- long dbpcl_id;
- u_int32_t maxkey;
- __db_bt_maxkey_reply *replyp;
-/* END __db_bt_maxkey_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_bt_maxkey(dbp, maxkey);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_bt_minkey_1_proc */
-void
-__db_bt_minkey_1_proc(dbpcl_id, minkey, replyp)
- long dbpcl_id;
- u_int32_t minkey;
- __db_bt_minkey_reply *replyp;
-/* END __db_bt_minkey_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_bt_minkey(dbp, minkey);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_close_1_proc */
-void
-__db_close_1_proc(dbpcl_id, flags, replyp)
- long dbpcl_id;
- u_int32_t flags;
- __db_close_reply *replyp;
-/* END __db_close_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->close(dbp, flags);
- __dbdel_ctp(dbp_ctp);
-
- replyp-> status= ret;
- return;
-}
-
-/* BEGIN __db_create_1_proc */
-void
-__db_create_1_proc(flags, envpcl_id, replyp)
- u_int32_t flags;
- long envpcl_id;
- __db_create_reply *replyp;
-/* END __db_create_1_proc */
-{
- int ret;
- DB_ENV * envp;
- DB *dbp;
- ct_entry *envp_ctp, *dbp_ctp;
-
- ACTIVATE_CTP(envp_ctp, envpcl_id, CT_ENV);
- envp = (DB_ENV *)envp_ctp->ct_anyp;
-
- dbp_ctp = new_ct_ent(&replyp->status);
- if (dbp_ctp == NULL)
- return ;
- /*
- * We actually require env's for databases. The client should
- * have caught it, but just in case.
- */
- DB_ASSERT(envp != NULL);
- if ((ret = db_create(&dbp, envp, flags)) == 0) {
- dbp_ctp->ct_dbp = dbp;
- dbp_ctp->ct_type = CT_DB;
- dbp_ctp->ct_parent = envp_ctp;
- dbp_ctp->ct_envparent = envp_ctp;
- replyp->dbpcl_id = dbp_ctp->ct_id;
- } else
- __dbclear_ctp(dbp_ctp);
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_del_1_proc */
-void
-__db_del_1_proc(dbpcl_id, txnpcl_id, keydlen,
- keydoff, keyflags, keydata, keysize,
- flags, replyp)
- long dbpcl_id;
- long txnpcl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t flags;
- __db_del_reply *replyp;
-/* END __db_del_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- DBT key;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
- if (txnpcl_id != 0) {
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
- } else
- txnp = NULL;
-
- memset(&key, 0, sizeof(key));
-
- /* Set up key DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- key.flags = keyflags;
- key.size = keysize;
- key.data = keydata;
-
- ret = dbp->del(dbp, txnp, &key, flags);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_extentsize_1_proc */
-void
-__db_extentsize_1_proc(dbpcl_id, extentsize, replyp)
- long dbpcl_id;
- u_int32_t extentsize;
- __db_extentsize_reply *replyp;
-/* END __db_extentsize_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_q_extentsize(dbp, extentsize);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_flags_1_proc */
-void
-__db_flags_1_proc(dbpcl_id, flags, replyp)
- long dbpcl_id;
- u_int32_t flags;
- __db_flags_reply *replyp;
-/* END __db_flags_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_flags(dbp, flags);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_get_1_proc */
-void
-__db_get_1_proc(dbpcl_id, txnpcl_id, keydlen,
- keydoff, keyflags, keydata, keysize,
- datadlen, datadoff, dataflags, datadata,
- datasize, flags, replyp, freep)
- long dbpcl_id;
- long txnpcl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t datadlen;
- u_int32_t datadoff;
- u_int32_t dataflags;
- void *datadata;
- u_int32_t datasize;
- u_int32_t flags;
- __db_get_reply *replyp;
- int * freep;
-/* END __db_get_1_proc */
-{
- int key_alloc, ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- DBT key, data;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
- if (txnpcl_id != 0) {
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
- } else
- txnp = NULL;
-
- *freep = 0;
- memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
-
- /* Set up key and data DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- /*
- * Ignore memory related flags on server.
- */
- key.flags = DB_DBT_MALLOC;
- if (keyflags & DB_DBT_PARTIAL)
- key.flags |= DB_DBT_PARTIAL;
- key.size = keysize;
- key.data = keydata;
-
- data.dlen = datadlen;
- data.doff = datadoff;
- /*
- * Ignore memory related flags on server.
- */
- data.flags = DB_DBT_MALLOC;
- if (dataflags & DB_DBT_PARTIAL)
- data.flags |= DB_DBT_PARTIAL;
- data.size = datasize;
- data.data = datadata;
-
- /* Got all our stuff, now do the get */
- ret = dbp->get(dbp, txnp, &key, &data, flags);
- /*
- * Otherwise just status.
- */
- if (ret == 0) {
- /*
- * XXX
- * We need to xdr_free whatever we are returning, next time.
- * However, DB does not allocate a new key if one was given
- * and we'd be free'ing up space allocated in the request.
- * So, allocate a new key/data pointer if it is the same one
- * as in the request.
- */
- *freep = 1;
- /*
- * Key
- */
- key_alloc = 0;
- if (key.data == keydata) {
- ret = __os_malloc(dbp->dbenv,
- key.size, NULL, &replyp->keydata.keydata_val);
- if (ret != 0) {
- __os_free(key.data, key.size);
- __os_free(data.data, data.size);
- goto err;
- }
- key_alloc = 1;
- memcpy(replyp->keydata.keydata_val, key.data, key.size);
- } else
- replyp->keydata.keydata_val = key.data;
-
- replyp->keydata.keydata_len = key.size;
-
- /*
- * Data
- */
- if (data.data == datadata) {
- ret = __os_malloc(dbp->dbenv,
- data.size, NULL, &replyp->datadata.datadata_val);
- if (ret != 0) {
- __os_free(key.data, key.size);
- __os_free(data.data, data.size);
- if (key_alloc)
- __os_free(replyp->keydata.keydata_val,
- key.size);
- goto err;
- }
- memcpy(replyp->datadata.datadata_val, data.data,
- data.size);
- } else
- replyp->datadata.datadata_val = data.data;
- replyp->datadata.datadata_len = data.size;
- } else {
-err: replyp->keydata.keydata_val = NULL;
- replyp->keydata.keydata_len = 0;
- replyp->datadata.datadata_val = NULL;
- replyp->datadata.datadata_len = 0;
- *freep = 0;
- }
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_h_ffactor_1_proc */
-void
-__db_h_ffactor_1_proc(dbpcl_id, ffactor, replyp)
- long dbpcl_id;
- u_int32_t ffactor;
- __db_h_ffactor_reply *replyp;
-/* END __db_h_ffactor_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_h_ffactor(dbp, ffactor);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_h_nelem_1_proc */
-void
-__db_h_nelem_1_proc(dbpcl_id, nelem, replyp)
- long dbpcl_id;
- u_int32_t nelem;
- __db_h_nelem_reply *replyp;
-/* END __db_h_nelem_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_h_nelem(dbp, nelem);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_key_range_1_proc */
-void
-__db_key_range_1_proc(dbpcl_id, txnpcl_id, keydlen,
- keydoff, keyflags, keydata, keysize,
- flags, replyp)
- long dbpcl_id;
- long txnpcl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t flags;
- __db_key_range_reply *replyp;
-/* END __db_key_range_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- DBT key;
- DB_KEY_RANGE range;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
- if (txnpcl_id != 0) {
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
- } else
- txnp = NULL;
-
- memset(&key, 0, sizeof(key));
- /* Set up key and data DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- key.size = keysize;
- key.data = keydata;
- key.flags = keyflags;
-
- ret = dbp->key_range(dbp, txnp, &key, &range, flags);
-
- replyp->status = ret;
- replyp->less = range.less;
- replyp->equal = range.equal;
- replyp->greater = range.greater;
- return;
-}
-
-/* BEGIN __db_lorder_1_proc */
-void
-__db_lorder_1_proc(dbpcl_id, lorder, replyp)
- long dbpcl_id;
- u_int32_t lorder;
- __db_lorder_reply *replyp;
-/* END __db_lorder_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_lorder(dbp, lorder);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __dbopen_1_proc */
-void
-__db_open_1_proc(dbpcl_id, name, subdb,
- type, flags, mode, replyp)
- long dbpcl_id;
- char *name;
- char *subdb;
- u_int32_t type;
- u_int32_t flags;
- u_int32_t mode;
- __db_open_reply *replyp;
-/* END __db_open_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->open(dbp, name, subdb, (DBTYPE)type, flags, mode);
- if (ret == 0) {
- replyp->type = (int) dbp->get_type(dbp);
- /* XXX
- * Tcl needs to peek at dbp->flags for DB_AM_DUP. Send
- * this dbp's flags back.
- */
- replyp->dbflags = (int) dbp->flags;
- }
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_pagesize_1_proc */
-void
-__db_pagesize_1_proc(dbpcl_id, pagesize, replyp)
- long dbpcl_id;
- u_int32_t pagesize;
- __db_pagesize_reply *replyp;
-/* END __db_pagesize_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_pagesize(dbp, pagesize);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_put_1_proc */
-void
-__db_put_1_proc(dbpcl_id, txnpcl_id, keydlen,
- keydoff, keyflags, keydata, keysize,
- datadlen, datadoff, dataflags, datadata,
- datasize, flags, replyp, freep)
- long dbpcl_id;
- long txnpcl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t datadlen;
- u_int32_t datadoff;
- u_int32_t dataflags;
- void *datadata;
- u_int32_t datasize;
- u_int32_t flags;
- __db_put_reply *replyp;
- int * freep;
-/* END __db_put_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- DBT key, data;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
- if (txnpcl_id != 0) {
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
- } else
- txnp = NULL;
-
- *freep = 0;
- memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
-
- /* Set up key and data DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- /*
- * Ignore memory related flags on server.
- */
- key.flags = DB_DBT_MALLOC;
- if (keyflags & DB_DBT_PARTIAL)
- key.flags |= DB_DBT_PARTIAL;
- key.size = keysize;
- key.data = keydata;
-
- data.dlen = datadlen;
- data.doff = datadoff;
- data.flags = dataflags;
- data.size = datasize;
- data.data = datadata;
-
- /* Got all our stuff, now do the put */
- ret = dbp->put(dbp, txnp, &key, &data, flags);
- /*
- * If the client did a DB_APPEND, set up key in reply.
- * Otherwise just status.
- */
- if (ret == 0 && (flags == DB_APPEND)) {
- /*
- * XXX
- * We need to xdr_free whatever we are returning, next time.
- * However, DB does not allocate a new key if one was given
- * and we'd be free'ing up space allocated in the request.
- * So, allocate a new key/data pointer if it is the same one
- * as in the request.
- */
- *freep = 1;
- /*
- * Key
- */
- if (key.data == keydata) {
- ret = __os_malloc(dbp->dbenv,
- key.size, NULL, &replyp->keydata.keydata_val);
- if (ret != 0) {
- __os_free(key.data, key.size);
- goto err;
- }
- memcpy(replyp->keydata.keydata_val, key.data, key.size);
- } else
- replyp->keydata.keydata_val = key.data;
-
- replyp->keydata.keydata_len = key.size;
- } else {
-err: replyp->keydata.keydata_val = NULL;
- replyp->keydata.keydata_len = 0;
- *freep = 0;
- }
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_re_delim_1_proc */
-void
-__db_re_delim_1_proc(dbpcl_id, delim, replyp)
- long dbpcl_id;
- u_int32_t delim;
- __db_re_delim_reply *replyp;
-/* END __db_re_delim_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_re_delim(dbp, delim);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_re_len_1_proc */
-void
-__db_re_len_1_proc(dbpcl_id, len, replyp)
- long dbpcl_id;
- u_int32_t len;
- __db_re_len_reply *replyp;
-/* END __db_re_len_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_re_len(dbp, len);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_re_pad_1_proc */
-void
-__db_re_pad_1_proc(dbpcl_id, pad, replyp)
- long dbpcl_id;
- u_int32_t pad;
- __db_re_pad_reply *replyp;
-/* END __db_re_pad_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->set_re_pad(dbp, pad);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_remove_1_proc */
-void
-__db_remove_1_proc(dbpcl_id, name, subdb,
- flags, replyp)
- long dbpcl_id;
- char *name;
- char *subdb;
- u_int32_t flags;
- __db_remove_reply *replyp;
-/* END __db_remove_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->remove(dbp, name, subdb, flags);
- __dbdel_ctp(dbp_ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_rename_1_proc */
-void
-__db_rename_1_proc(dbpcl_id, name, subdb,
- newname, flags, replyp)
- long dbpcl_id;
- char *name;
- char *subdb;
- char *newname;
- u_int32_t flags;
- __db_rename_reply *replyp;
-/* END __db_rename_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->rename(dbp, name, subdb, newname, flags);
- __dbdel_ctp(dbp_ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_stat_1_proc */
-void
-__db_stat_1_proc(dbpcl_id,
- flags, replyp, freep)
- long dbpcl_id;
- u_int32_t flags;
- __db_stat_reply *replyp;
- int * freep;
-/* END __db_stat_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DBTYPE type;
- void *sp;
- int len;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->stat(dbp, &sp, NULL, flags);
- replyp->status = ret;
- if (ret != 0)
- return;
- /*
- * We get here, we have success. Allocate an array so that
- * we can use the list generator. Generate the reply, free
- * up the space.
- */
- /*
- * XXX This assumes that all elements of all stat structures
- * are u_int32_t fields. They are, currently.
- */
- type = dbp->get_type(dbp);
- if (type == DB_HASH)
- len = sizeof(DB_HASH_STAT) / sizeof(u_int32_t);
- else if (type == DB_QUEUE)
- len = sizeof(DB_QUEUE_STAT) / sizeof(u_int32_t);
- else /* BTREE or RECNO are same stats */
- len = sizeof(DB_BTREE_STAT) / sizeof(u_int32_t);
- /*
- * Set up our list of stats.
- */
- ret = __db_stats_list(dbp->dbenv,
- &replyp->statslist, (u_int32_t*)sp, len);
-
- __os_free(sp, 0);
- if (ret == 0)
- *freep = 1;
- replyp->status = ret;
- return;
-}
-
-int
-__db_stats_list(dbenv, locp, pp, len)
- DB_ENV *dbenv;
- __db_stat_statsreplist **locp;
- u_int32_t *pp;
- int len;
-{
- u_int32_t *p, *q;
- int i, ret;
- __db_stat_statsreplist *nl, **nlp;
-
- nlp = locp;
- for (i = 0; i < len; i++) {
- p = pp+i;
- if ((ret = __os_malloc(dbenv, sizeof(*nl), NULL, nlp)) != 0)
- goto out;
- nl = *nlp;
- nl->next = NULL;
- if ((ret = __os_malloc(dbenv,
- sizeof(u_int32_t), NULL, &nl->ent.ent_val)) != 0)
- goto out;
- q = (u_int32_t *)nl->ent.ent_val;
- *q = *p;
- nl->ent.ent_len = sizeof(u_int32_t);
- nlp = &nl->next;
- }
- return (0);
-out:
- __db_stats_freelist(locp);
- return (ret);
-}
-
-/*
- * PUBLIC: void __db_stats_freelist __P((__db_stat_statsreplist **));
- */
-void
-__db_stats_freelist(locp)
- __db_stat_statsreplist **locp;
-{
- __db_stat_statsreplist *nl, *nl1;
-
- for (nl = *locp; nl != NULL; nl = nl1) {
- nl1 = nl->next;
- if (nl->ent.ent_val)
- __os_free(nl->ent.ent_val, nl->ent.ent_len);
- __os_free(nl, sizeof(*nl));
- }
- *locp = NULL;
-}
-
-/* BEGIN __db_swapped_1_proc */
-void
-__db_swapped_1_proc(dbpcl_id, replyp)
- long dbpcl_id;
- __db_swapped_reply *replyp;
-/* END __db_swapped_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->get_byteswapped(dbp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_sync_1_proc */
-void
-__db_sync_1_proc(dbpcl_id, flags, replyp)
- long dbpcl_id;
- u_int32_t flags;
- __db_sync_reply *replyp;
-/* END __db_sync_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- ret = dbp->sync(dbp, flags);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_cursor_1_proc */
-void
-__db_cursor_1_proc(dbpcl_id, txnpcl_id,
- flags, replyp)
- long dbpcl_id;
- long txnpcl_id;
- u_int32_t flags;
- __db_cursor_reply *replyp;
-/* END __db_cursor_1_proc */
-{
- int ret;
- DB * dbp;
- ct_entry *dbp_ctp;
- DB_TXN * txnp;
- ct_entry *txnp_ctp;
- DBC *dbc;
- ct_entry *dbc_ctp, *env_ctp;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
- dbc_ctp = new_ct_ent(&replyp->status);
- if (dbc_ctp == NULL)
- return;
-
- if (txnpcl_id != 0) {
- ACTIVATE_CTP(txnp_ctp, txnpcl_id, CT_TXN);
- txnp = (DB_TXN *)txnp_ctp->ct_anyp;
- dbc_ctp->ct_activep = txnp_ctp->ct_activep;
- } else
- txnp = NULL;
-
- if ((ret = dbp->cursor(dbp, txnp, &dbc, flags)) == 0) {
- dbc_ctp->ct_dbc = dbc;
- dbc_ctp->ct_type = CT_CURSOR;
- dbc_ctp->ct_parent = dbp_ctp;
- env_ctp = dbp_ctp->ct_envparent;
- dbc_ctp->ct_envparent = env_ctp;
- __dbsrv_settimeout(dbc_ctp, env_ctp->ct_timeout);
- __dbsrv_active(dbc_ctp);
- replyp->dbcidcl_id = dbc_ctp->ct_id;
- } else
- __dbclear_ctp(dbc_ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __db_join_1_proc */
-void
-__db_join_1_proc(dbpcl_id, curslist,
- flags, replyp)
- long dbpcl_id;
- u_int32_t * curslist;
- u_int32_t flags;
- __db_join_reply *replyp;
-/* END __db_join_1_proc */
-{
- DB * dbp;
- ct_entry *dbp_ctp;
- DBC *dbc;
- DBC **jcurs, **c;
- ct_entry *dbc_ctp, *ctp;
- size_t size;
- int ret;
- u_int32_t *cl;
-
- ACTIVATE_CTP(dbp_ctp, dbpcl_id, CT_DB);
- dbp = (DB *)dbp_ctp->ct_anyp;
-
- dbc_ctp = new_ct_ent(&replyp->status);
- if (dbc_ctp == NULL)
- return;
-
- for (size = sizeof(DBC *), cl = curslist; *cl != 0; size += sizeof(DBC *), cl++)
- ;
- if ((ret = __os_malloc(dbp->dbenv, size, NULL, &jcurs)) != 0) {
- replyp->status = ret;
- __dbclear_ctp(dbc_ctp);
- return;
- }
- /*
- * If our curslist has a parent txn, we need to use it too
- * for the activity timeout. All cursors must be part of
- * the same transaction, so just check the first.
- */
- ctp = get_tableent(*curslist);
- DB_ASSERT(ctp->ct_type == CT_CURSOR);
- /*
- * If we are using a transaction, set the join activity timer
- * to point to the parent transaction.
- */
- if (ctp->ct_activep != &ctp->ct_active)
- dbc_ctp->ct_activep = ctp->ct_activep;
- for (cl = curslist, c = jcurs; *cl != 0; cl++, c++) {
- ctp = get_tableent(*cl);
- if (ctp == NULL) {
- replyp->status = DB_NOSERVER_ID;
- goto out;
- }
- /*
- * If we are using a txn, the join cursor points to the
- * transaction timeout. If we are not using a transaction,
- * then all the curslist cursors must point to the join
- * cursor's timeout so that we do not timeout any of the
- * curlist cursors while the join cursor is active.
- * Change the type of the curslist ctps to CT_JOIN so that
- * we know they are part of a join list and we can distinguish
- * them and later restore them when the join cursor is closed.
- */
- DB_ASSERT(ctp->ct_type == CT_CURSOR);
- ctp->ct_type |= CT_JOIN;
- ctp->ct_origp = ctp->ct_activep;
- /*
- * Setting this to the ct_active field of the dbc_ctp is
- * really just a way to distinguish which join dbc this
- * cursor is part of. The ct_activep of this cursor is
- * not used at all during its lifetime as part of a join
- * cursor.
- */
- ctp->ct_activep = &dbc_ctp->ct_active;
- *c = ctp->ct_dbc;
- }
- *c = NULL;
- if ((ret = dbp->join(dbp, jcurs, &dbc, flags)) == 0) {
- dbc_ctp->ct_dbc = dbc;
- dbc_ctp->ct_type = (CT_JOINCUR | CT_CURSOR);
- dbc_ctp->ct_parent = dbp_ctp;
- dbc_ctp->ct_envparent = dbp_ctp->ct_envparent;
- __dbsrv_settimeout(dbc_ctp, dbp_ctp->ct_envparent->ct_timeout);
- __dbsrv_active(dbc_ctp);
- replyp->dbcidcl_id = dbc_ctp->ct_id;
- } else {
- __dbclear_ctp(dbc_ctp);
- /*
- * If we get an error, undo what we did above to any cursors.
- */
- for (cl = curslist; *cl != 0; cl++) {
- ctp = get_tableent(*cl);
- ctp->ct_type = CT_CURSOR;
- ctp->ct_activep = ctp->ct_origp;
- }
- }
-
- replyp->status = ret;
-out:
- __os_free(jcurs, size);
- return;
-}
-
-/* BEGIN __dbc_close_1_proc */
-void
-__dbc_close_1_proc(dbccl_id, replyp)
- long dbccl_id;
- __dbc_close_reply *replyp;
-/* END __dbc_close_1_proc */
-{
- ct_entry *dbc_ctp;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- replyp->status = __dbc_close_int(dbc_ctp);
- return;
-}
-
-/* BEGIN __dbc_count_1_proc */
-void
-__dbc_count_1_proc(dbccl_id, flags, replyp)
- long dbccl_id;
- u_int32_t flags;
- __dbc_count_reply *replyp;
-/* END __dbc_count_1_proc */
-{
- int ret;
- DBC * dbc;
- ct_entry *dbc_ctp;
- db_recno_t num;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- dbc = (DBC *)dbc_ctp->ct_anyp;
-
- ret = dbc->c_count(dbc, &num, flags);
- replyp->status = ret;
- if (ret == 0)
- replyp->dupcount = num;
- return;
-}
-
-/* BEGIN __dbc_del_1_proc */
-void
-__dbc_del_1_proc(dbccl_id, flags, replyp)
- long dbccl_id;
- u_int32_t flags;
- __dbc_del_reply *replyp;
-/* END __dbc_del_1_proc */
-{
- int ret;
- DBC * dbc;
- ct_entry *dbc_ctp;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- dbc = (DBC *)dbc_ctp->ct_anyp;
-
- ret = dbc->c_del(dbc, flags);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __dbc_dup_1_proc */
-void
-__dbc_dup_1_proc(dbccl_id, flags, replyp)
- long dbccl_id;
- u_int32_t flags;
- __dbc_dup_reply *replyp;
-/* END __dbc_dup_1_proc */
-{
- int ret;
- DBC * dbc;
- ct_entry *dbc_ctp;
- DBC *newdbc;
- ct_entry *new_ctp;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- dbc = (DBC *)dbc_ctp->ct_anyp;
-
- new_ctp = new_ct_ent(&replyp->status);
- if (new_ctp == NULL)
- return;
-
- if ((ret = dbc->c_dup(dbc, &newdbc, flags)) == 0) {
- new_ctp->ct_dbc = newdbc;
- new_ctp->ct_type = CT_CURSOR;
- new_ctp->ct_parent = dbc_ctp->ct_parent;
- new_ctp->ct_envparent = dbc_ctp->ct_envparent;
- /*
- * If our cursor has a parent txn, we need to use it too.
- */
- if (dbc_ctp->ct_activep != &dbc_ctp->ct_active)
- new_ctp->ct_activep = dbc_ctp->ct_activep;
- __dbsrv_settimeout(new_ctp, dbc_ctp->ct_timeout);
- __dbsrv_active(new_ctp);
- replyp->dbcidcl_id = new_ctp->ct_id;
- } else
- __dbclear_ctp(new_ctp);
-
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __dbc_get_1_proc */
-void
-__dbc_get_1_proc(dbccl_id, keydlen, keydoff,
- keyflags, keydata, keysize, datadlen,
- datadoff, dataflags, datadata, datasize,
- flags, replyp, freep)
- long dbccl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t datadlen;
- u_int32_t datadoff;
- u_int32_t dataflags;
- void *datadata;
- u_int32_t datasize;
- u_int32_t flags;
- __dbc_get_reply *replyp;
- int * freep;
-/* END __dbc_get_1_proc */
-{
- DB_ENV *dbenv;
- DBC *dbc;
- DBT key, data;
- ct_entry *dbc_ctp;
- int key_alloc, ret;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- dbc = (DBC *)dbc_ctp->ct_anyp;
- dbenv = dbc->dbp->dbenv;
-
- *freep = 0;
- memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
-
- /* Set up key and data DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- /*
- * Ignore memory related flags on server.
- */
- key.flags = DB_DBT_MALLOC;
- if (keyflags & DB_DBT_PARTIAL)
- key.flags |= DB_DBT_PARTIAL;
- key.size = keysize;
- key.data = keydata;
-
- data.dlen = datadlen;
- data.doff = datadoff;
- data.flags = DB_DBT_MALLOC;
- if (dataflags & DB_DBT_PARTIAL)
- data.flags |= DB_DBT_PARTIAL;
- data.size = datasize;
- data.data = datadata;
-
- /* Got all our stuff, now do the get */
- ret = dbc->c_get(dbc, &key, &data, flags);
-
- /*
- * Otherwise just status.
- */
- if (ret == 0) {
- /*
- * XXX
- * We need to xdr_free whatever we are returning, next time.
- * However, DB does not allocate a new key if one was given
- * and we'd be free'ing up space allocated in the request.
- * So, allocate a new key/data pointer if it is the same one
- * as in the request.
- */
- *freep = 1;
- /*
- * Key
- */
- key_alloc = 0;
- if (key.data == keydata) {
- ret = __os_malloc(dbenv, key.size, NULL,
- &replyp->keydata.keydata_val);
- if (ret != 0) {
- __os_free(key.data, key.size);
- __os_free(data.data, data.size);
- goto err;
- }
- key_alloc = 1;
- memcpy(replyp->keydata.keydata_val, key.data, key.size);
- } else
- replyp->keydata.keydata_val = key.data;
-
- replyp->keydata.keydata_len = key.size;
-
- /*
- * Data
- */
- if (data.data == datadata) {
- ret = __os_malloc(dbenv, data.size, NULL,
- &replyp->datadata.datadata_val);
- if (ret != 0) {
- __os_free(key.data, key.size);
- __os_free(data.data, data.size);
- if (key_alloc)
- __os_free(replyp->keydata.keydata_val,
- key.size);
- goto err;
- }
- memcpy(replyp->datadata.datadata_val, data.data,
- data.size);
- } else
- replyp->datadata.datadata_val = data.data;
- replyp->datadata.datadata_len = data.size;
- } else {
-err: replyp->keydata.keydata_val = NULL;
- replyp->keydata.keydata_len = 0;
- replyp->datadata.datadata_val = NULL;
- replyp->datadata.datadata_len = 0;
- *freep = 0;
- }
- replyp->status = ret;
- return;
-}
-
-/* BEGIN __dbc_put_1_proc */
-void
-__dbc_put_1_proc(dbccl_id, keydlen, keydoff,
- keyflags, keydata, keysize, datadlen,
- datadoff, dataflags, datadata, datasize,
- flags, replyp, freep)
- long dbccl_id;
- u_int32_t keydlen;
- u_int32_t keydoff;
- u_int32_t keyflags;
- void *keydata;
- u_int32_t keysize;
- u_int32_t datadlen;
- u_int32_t datadoff;
- u_int32_t dataflags;
- void *datadata;
- u_int32_t datasize;
- u_int32_t flags;
- __dbc_put_reply *replyp;
- int * freep;
-/* END __dbc_put_1_proc */
-{
- int ret;
- DBC * dbc;
- DB *dbp;
- ct_entry *dbc_ctp;
- DBT key, data;
-
- ACTIVATE_CTP(dbc_ctp, dbccl_id, CT_CURSOR);
- dbc = (DBC *)dbc_ctp->ct_anyp;
- dbp = (DB *)dbc_ctp->ct_parent->ct_anyp;
-
- memset(&key, 0, sizeof(key));
- memset(&data, 0, sizeof(data));
-
- /* Set up key and data DBT */
- key.dlen = keydlen;
- key.doff = keydoff;
- /*
- * Ignore memory related flags on server.
- */
- key.flags = 0;
- if (keyflags & DB_DBT_PARTIAL)
- key.flags |= DB_DBT_PARTIAL;
- key.size = keysize;
- key.data = keydata;
-
- data.dlen = datadlen;
- data.doff = datadoff;
- data.flags = dataflags;
- data.size = datasize;
- data.data = datadata;
-
- /* Got all our stuff, now do the put */
- ret = dbc->c_put(dbc, &key, &data, flags);
-
- *freep = 0;
- if (ret == 0 && (flags == DB_AFTER || flags == DB_BEFORE) &&
- dbp->type == DB_RECNO) {
- /*
- * We need to xdr_free whatever we are returning, next time.
- */
- replyp->keydata.keydata_val = key.data;
- replyp->keydata.keydata_len = key.size;
- } else {
- replyp->keydata.keydata_val = NULL;
- replyp->keydata.keydata_len = 0;
- }
- replyp->status = ret;
- return;
-}
-#endif /* HAVE_RPC */
diff --git a/bdb/rpc_server/db_server_svc.sed b/bdb/rpc_server/db_server_svc.sed
deleted file mode 100644
index 9d540e51af6..00000000000
--- a/bdb/rpc_server/db_server_svc.sed
+++ /dev/null
@@ -1,5 +0,0 @@
-/^#include <netinet.in.h>/a\
-\extern void __dbsrv_timeout();
-/^ return;/i\
-\ __dbsrv_timeout(0);
-s/^main/void __dbsrv_main/
diff --git a/bdb/rpc_server/java/DbDispatcher.java b/bdb/rpc_server/java/DbDispatcher.java
new file mode 100644
index 00000000000..5c5e63fc2ad
--- /dev/null
+++ b/bdb/rpc_server/java/DbDispatcher.java
@@ -0,0 +1,590 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbDispatcher.java,v 1.5 2002/08/09 01:56:08 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import org.acplt.oncrpc.OncRpcException;
+
+/**
+ * Dispatcher for RPC messages for the Java RPC server.
+ * These are hooks that translate between RPC msg/reply structures and
+ * DB calls, which keeps the real implementation code in Rpc* classes cleaner.
+ */
+public abstract class DbDispatcher extends DbServerStub
+{
+ abstract int addEnv(RpcDbEnv rdbenv);
+ abstract int addDb(RpcDb rdb);
+ abstract int addTxn(RpcDbTxn rtxn);
+ abstract int addCursor(RpcDbc rdbc);
+ abstract void delEnv(RpcDbEnv rdbenv);
+ abstract void delDb(RpcDb rdb);
+ abstract void delTxn(RpcDbTxn rtxn);
+ abstract void delCursor(RpcDbc rdbc);
+ abstract RpcDbEnv getEnv(int envid);
+ abstract RpcDb getDb(int dbid);
+ abstract RpcDbTxn getTxn(int txnbid);
+ abstract RpcDbc getCursor(int dbcid);
+
+ public DbDispatcher() throws IOException, OncRpcException
+ {
+ super();
+ }
+
+ //// Db methods
+
+ public __db_associate_reply __DB_db_associate_4001(__db_associate_msg args)
+ {
+ __db_associate_reply reply = new __db_associate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.associate(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg args)
+ {
+ __db_bt_maxkey_reply reply = new __db_bt_maxkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_maxkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg args)
+ {
+ __db_bt_minkey_reply reply = new __db_bt_minkey_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_bt_minkey(this, args, reply);
+ return reply;
+ }
+
+ public __db_close_reply __DB_db_close_4001(__db_close_msg args)
+ {
+ __db_close_reply reply = new __db_close_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.close(this, args, reply);
+ return reply;
+ }
+
+ public __db_create_reply __DB_db_create_4001(__db_create_msg args)
+ {
+ __db_create_reply reply = new __db_create_reply();
+ RpcDb rdb = new RpcDb(getEnv(args.dbenvcl_id));
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.create(this, args, reply);
+ return reply;
+ }
+
+ public __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg args)
+ {
+ __db_cursor_reply reply = new __db_cursor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.cursor(this, args, reply);
+ return reply;
+ }
+
+ public __db_del_reply __DB_db_del_4001(__db_del_msg args)
+ {
+ __db_del_reply reply = new __db_del_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.del(this, args, reply);
+ return reply;
+ }
+
+ public __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg args)
+ {
+ __db_encrypt_reply reply = new __db_encrypt_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg args)
+ {
+ __db_extentsize_reply reply = new __db_extentsize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_q_extentsize(this, args, reply);
+ return reply;
+ }
+
+ public __db_flags_reply __DB_db_flags_4001(__db_flags_msg args)
+ {
+ __db_flags_reply reply = new __db_flags_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __db_get_reply __DB_db_get_4001(__db_get_msg args)
+ {
+ __db_get_reply reply = new __db_get_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.get(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg args)
+ {
+ __db_h_ffactor_reply reply = new __db_h_ffactor_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_ffactor(this, args, reply);
+ return reply;
+ }
+
+ public __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg args)
+ {
+ __db_h_nelem_reply reply = new __db_h_nelem_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_h_nelem(this, args, reply);
+ return reply;
+ }
+
+ public __db_join_reply __DB_db_join_4001(__db_join_msg args)
+ {
+ __db_join_reply reply = new __db_join_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.join(this, args, reply);
+ return reply;
+ }
+
+ public __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg args)
+ {
+ __db_key_range_reply reply = new __db_key_range_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.key_range(this, args, reply);
+ return reply;
+ }
+
+ public __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg args)
+ {
+ __db_lorder_reply reply = new __db_lorder_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_lorder(this, args, reply);
+ return reply;
+ }
+
+ public __db_open_reply __DB_db_open_4001(__db_open_msg args)
+ {
+ __db_open_reply reply = new __db_open_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.open(this, args, reply);
+ return reply;
+ }
+
+ public __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg args)
+ {
+ __db_pagesize_reply reply = new __db_pagesize_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_pagesize(this, args, reply);
+ return reply;
+ }
+
+ public __db_pget_reply __DB_db_pget_4001(__db_pget_msg args)
+ {
+ __db_pget_reply reply = new __db_pget_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.pget(this, args, reply);
+ return reply;
+ }
+
+ public __db_put_reply __DB_db_put_4001(__db_put_msg args)
+ {
+ __db_put_reply reply = new __db_put_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.put(this, args, reply);
+ return reply;
+ }
+
+ public __db_remove_reply __DB_db_remove_4001(__db_remove_msg args)
+ {
+ __db_remove_reply reply = new __db_remove_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.remove(this, args, reply);
+ return reply;
+ }
+
+ public __db_rename_reply __DB_db_rename_4001(__db_rename_msg args)
+ {
+ __db_rename_reply reply = new __db_rename_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.rename(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg args)
+ {
+ __db_re_delim_reply reply = new __db_re_delim_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_delim(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg args)
+ {
+ __db_re_len_reply reply = new __db_re_len_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_len(this, args, reply);
+ return reply;
+ }
+
+ public __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg args)
+ {
+ __db_re_pad_reply reply = new __db_re_pad_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.set_re_pad(this, args, reply);
+ return reply;
+ }
+
+ public __db_stat_reply __DB_db_stat_4001(__db_stat_msg args)
+ {
+ __db_stat_reply reply = new __db_stat_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.stat(this, args, reply);
+ return reply;
+ }
+
+ public __db_sync_reply __DB_db_sync_4001(__db_sync_msg args)
+ {
+ __db_sync_reply reply = new __db_sync_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.sync(this, args, reply);
+ return reply;
+ }
+
+ public __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg args)
+ {
+ __db_truncate_reply reply = new __db_truncate_reply();
+ RpcDb rdb = getDb(args.dbpcl_id);
+ if (rdb == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdb.truncate(this, args, reply);
+ return reply;
+ }
+
+ //// Cursor methods
+
+ public __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg args)
+ {
+ __dbc_close_reply reply = new __dbc_close_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.close(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg args)
+ {
+ __dbc_count_reply reply = new __dbc_count_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.count(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg args)
+ {
+ __dbc_del_reply reply = new __dbc_del_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.del(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg args)
+ {
+ __dbc_dup_reply reply = new __dbc_dup_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.dup(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg args)
+ {
+ __dbc_get_reply reply = new __dbc_get_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.get(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg args) {
+ __dbc_pget_reply reply = new __dbc_pget_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.pget(this, args, reply);
+ return reply;
+ }
+
+ public __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg args) {
+ __dbc_put_reply reply = new __dbc_put_reply();
+ RpcDbc rdbc = getCursor(args.dbccl_id);
+ if (rdbc == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbc.put(this, args, reply);
+ return reply;
+ }
+
+ //// Environment methods
+
+ public __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg args)
+ {
+ __env_cachesize_reply reply = new __env_cachesize_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_cachesize(this, args, reply);
+ return reply;
+ }
+
+ public __env_close_reply __DB_env_close_4001(__env_close_msg args)
+ {
+ __env_close_reply reply = new __env_close_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.close(this, args, reply);
+ return reply;
+ }
+
+ public __env_create_reply __DB_env_create_4001(__env_create_msg args)
+ {
+ __env_create_reply reply = new __env_create_reply();
+ RpcDbEnv rdbenv = new RpcDbEnv();
+ rdbenv.create(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg args)
+ {
+ __env_dbremove_reply reply = new __env_dbremove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbremove(this, args, reply);
+ return reply;
+ }
+
+ public __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg args)
+ {
+ __env_dbrename_reply reply = new __env_dbrename_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.dbrename(this, args, reply);
+ return reply;
+ }
+
+ public __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg args)
+ {
+ __env_encrypt_reply reply = new __env_encrypt_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_encrypt(this, args, reply);
+ return reply;
+ }
+
+ public __env_flags_reply __DB_env_flags_4001(__env_flags_msg args)
+ {
+ __env_flags_reply reply = new __env_flags_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.set_flags(this, args, reply);
+ return reply;
+ }
+
+ public __env_open_reply __DB_env_open_4001(__env_open_msg args)
+ {
+ __env_open_reply reply = new __env_open_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.open(this, args, reply);
+ return reply;
+ }
+
+ public __env_remove_reply __DB_env_remove_4001(__env_remove_msg args)
+ {
+ __env_remove_reply reply = new __env_remove_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.remove(this, args, reply);
+ return reply;
+ }
+
+ //// Transaction methods
+
+ public __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg args)
+ {
+ __txn_abort_reply reply = new __txn_abort_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.abort(this, args, reply);
+ return reply;
+ }
+
+ public __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg args)
+ {
+ __txn_begin_reply reply = new __txn_begin_reply();
+ RpcDbTxn rdbtxn = new RpcDbTxn(getEnv(args.dbenvcl_id), null);
+ rdbtxn.begin(this, args, reply);
+ return reply;
+ }
+
+ public __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg args)
+ {
+ __txn_commit_reply reply = new __txn_commit_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.commit(this, args, reply);
+ return reply;
+ }
+
+ public __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg args)
+ {
+ __txn_discard_reply reply = new __txn_discard_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.discard(this, args, reply);
+ return reply;
+ }
+
+ public __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg args)
+ {
+ __txn_prepare_reply reply = new __txn_prepare_reply();
+ RpcDbTxn rdbtxn = getTxn(args.txnpcl_id);
+ if (rdbtxn == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbtxn.prepare(this, args, reply);
+ return reply;
+ }
+
+ public __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg args)
+ {
+ __txn_recover_reply reply = new __txn_recover_reply();
+ RpcDbEnv rdbenv = getEnv(args.dbenvcl_id);
+ if (rdbenv == null)
+ reply.status = Db.DB_NOSERVER_ID;
+ else
+ rdbenv.txn_recover(this, args, reply);
+ return reply;
+ }
+}
diff --git a/bdb/rpc_server/java/DbServer.java b/bdb/rpc_server/java/DbServer.java
new file mode 100644
index 00000000000..9b20becbcdc
--- /dev/null
+++ b/bdb/rpc_server/java/DbServer.java
@@ -0,0 +1,301 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: DbServer.java,v 1.5 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.*;
+import java.util.*;
+import org.acplt.oncrpc.OncRpcException;
+import org.acplt.oncrpc.server.OncRpcCallInformation;
+
+/**
+ * Main entry point for the Java version of the Berkeley DB RPC server
+ */
+public class DbServer extends DbDispatcher
+{
+ public static long idleto = 10 * 60 * 1000; // 5 minutes
+ public static long defto = 5 * 60 * 1000; // 5 minutes
+ public static long maxto = 60 * 60 * 1000; // 1 hour
+ public static String passwd = null;
+ public static PrintWriter err;
+
+ long now, hint; // updated each operation
+ FreeList env_list = new FreeList();
+ FreeList db_list = new FreeList();
+ FreeList txn_list = new FreeList();
+ FreeList cursor_list = new FreeList();
+
+ public DbServer() throws IOException, OncRpcException
+ {
+ super();
+ init_lists();
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program,
+ int version, int procedure) throws OncRpcException, IOException
+ {
+ long newnow = System.currentTimeMillis();
+ // DbServer.err.println("Dispatching RPC call " + procedure + " after delay of " + (newnow - now));
+ now = newnow;
+ // DbServer.err.flush();
+ super.dispatchOncRpcCall(call, program, version, procedure);
+
+ try {
+ doTimeouts();
+ } catch(Throwable t) {
+ System.err.println("Caught " + t + " during doTimeouts()");
+ t.printStackTrace(System.err);
+ }
+ }
+
+ // Internal methods to track context
+ private void init_lists()
+ {
+ // We do this so that getEnv/Db/etc(0) == null
+ env_list.add(null);
+ db_list.add(null);
+ txn_list.add(null);
+ cursor_list.add(null);
+ }
+
+ int addEnv(RpcDbEnv rdbenv)
+ {
+ rdbenv.timer.last_access = now;
+ int id = env_list.add(rdbenv);
+ return id;
+ }
+
+ int addDb(RpcDb rdb)
+ {
+ int id = db_list.add(rdb);
+ return id;
+ }
+
+ int addTxn(RpcDbTxn rtxn)
+ {
+ rtxn.timer.last_access = now;
+ int id = txn_list.add(rtxn);
+ return id;
+ }
+
+ int addCursor(RpcDbc rdbc)
+ {
+ rdbc.timer.last_access = now;
+ int id = cursor_list.add(rdbc);
+ return id;
+ }
+
+ void delEnv(RpcDbEnv rdbenv)
+ {
+ // cursors and transactions will already have been cleaned up
+ for(LocalIterator i = db_list.iterator(); i.hasNext(); ) {
+ RpcDb rdb = (RpcDb)i.next();
+ if (rdb != null && rdb.rdbenv == rdbenv)
+ delDb(rdb);
+ }
+
+ env_list.del(rdbenv);
+ rdbenv.dispose();
+ }
+
+ void delDb(RpcDb rdb)
+ {
+ db_list.del(rdb);
+ rdb.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rdb)
+ i.remove();
+ }
+ }
+
+ void delTxn(RpcDbTxn rtxn)
+ {
+ txn_list.del(rtxn);
+ rtxn.dispose();
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc != null && rdbc.timer == rtxn)
+ i.remove();
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn_child = (RpcDbTxn)i.next();
+ if (rtxn_child != null && rtxn_child.timer == rtxn)
+ i.remove();
+ }
+ }
+
+ void delCursor(RpcDbc rdbc)
+ {
+ cursor_list.del(rdbc);
+ rdbc.dispose();
+ }
+
+ RpcDbEnv getEnv(int envid)
+ {
+ RpcDbEnv rdbenv = (RpcDbEnv)env_list.get(envid);
+ if (rdbenv != null)
+ rdbenv.timer.last_access = now;
+ return rdbenv;
+ }
+
+ RpcDb getDb(int dbid)
+ {
+ RpcDb rdb = (RpcDb)db_list.get(dbid);
+ if (rdb != null)
+ rdb.rdbenv.timer.last_access = now;
+ return rdb;
+ }
+
+ RpcDbTxn getTxn(int txnid)
+ {
+ RpcDbTxn rtxn = (RpcDbTxn)txn_list.get(txnid);
+ if (rtxn != null)
+ rtxn.timer.last_access = rtxn.rdbenv.timer.last_access = now;
+ return rtxn;
+ }
+
+ RpcDbc getCursor(int dbcid)
+ {
+ RpcDbc rdbc = (RpcDbc)cursor_list.get(dbcid);
+ if (rdbc != null)
+ rdbc.last_access = rdbc.timer.last_access = rdbc.rdbenv.timer.last_access = now;
+ return rdbc;
+ }
+
+ void doTimeouts()
+ {
+ if (now < hint) {
+ // DbServer.err.println("Skipping cleaner sweep - now = " + now + ", hint = " + hint);
+ return;
+ }
+
+ // DbServer.err.println("Starting a cleaner sweep");
+ hint = now + DbServer.maxto;
+
+ for(LocalIterator i = cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ if (rdbc == null)
+ continue;
+
+ long end_time = rdbc.timer.last_access + rdbc.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rdbc + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbc);
+ delCursor(rdbc);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = txn_list.iterator(); i.hasNext(); ) {
+ RpcDbTxn rtxn = (RpcDbTxn)i.next();
+ if (rtxn == null)
+ continue;
+
+ long end_time = rtxn.timer.last_access + rtxn.rdbenv.timeout;
+ // DbServer.err.println("Examining " + rtxn + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rtxn);
+ delTxn(rtxn);
+ } else if (end_time < hint)
+ hint = end_time;
+ }
+
+ for(LocalIterator i = env_list.iterator(); i.hasNext(); ) {
+ RpcDbEnv rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv == null)
+ continue;
+
+ long end_time = rdbenv.timer.last_access + rdbenv.idletime;
+ // DbServer.err.println("Examining " + rdbenv + ", time left = " + (end_time - now));
+ if (end_time < now) {
+ DbServer.err.println("Cleaning up " + rdbenv);
+ delEnv(rdbenv);
+ }
+ }
+
+ // if we didn't find anything, reset the hint
+ if (hint == now + DbServer.maxto)
+ hint = 0;
+
+ // DbServer.err.println("Finishing a cleaner sweep");
+ }
+
+ // Some constants that aren't available elsewhere
+ static final int DB_SERVER_FLAGMASK = Db.DB_LOCKDOWN |
+ Db.DB_PRIVATE | Db.DB_RECOVER | Db.DB_RECOVER_FATAL |
+ Db.DB_SYSTEM_MEM | Db.DB_USE_ENVIRON |
+ Db.DB_USE_ENVIRON_ROOT;
+ static final int DB_SERVER_ENVFLAGS = Db.DB_INIT_CDB |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL |
+ Db.DB_INIT_TXN | Db.DB_JOINENV;
+ static final int DB_SERVER_DBFLAGS = Db.DB_DIRTY_READ |
+ Db.DB_NOMMAP | Db.DB_RDONLY;
+ static final int DB_SERVER_DBNOSHARE = Db.DB_EXCL | Db.DB_TRUNCATE;
+
+ public static void main(String[] args)
+ {
+ System.out.println("Starting DbServer...");
+ for (int i = 0; i < args.length; i++) {
+ if (args[i].charAt(0) != '-')
+ usage();
+
+ switch (args[i].charAt(1)) {
+ case 'h':
+ ++i; // add_home(args[++i]);
+ break;
+ case 'I':
+ idleto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'P':
+ passwd = args[++i];
+ break;
+ case 't':
+ defto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'T':
+ maxto = Long.parseLong(args[++i]) * 1000L;
+ break;
+ case 'V':
+ // version;
+ break;
+ case 'v':
+ // verbose
+ break;
+ default:
+ usage();
+ }
+ }
+
+ try {
+ DbServer.err = new PrintWriter(new FileOutputStream("JavaRPCServer.trace", true));
+ DbServer server = new DbServer();
+ server.run();
+ } catch (Throwable e) {
+ System.out.println("DbServer exception:");
+ e.printStackTrace(DbServer.err);
+ } finally {
+ if (DbServer.err != null)
+ DbServer.err.close();
+ }
+
+ System.out.println("DbServer stopped.");
+ }
+
+ static void usage()
+ {
+ System.err.println("usage: java com.sleepycat.db.rpcserver.DbServer \\");
+ System.err.println("[-Vv] [-h home] [-P passwd] [-I idletimeout] [-L logfile] [-t def_timeout] [-T maxtimeout]");
+ System.exit(1);
+ }
+}
diff --git a/bdb/rpc_server/java/FreeList.java b/bdb/rpc_server/java/FreeList.java
new file mode 100644
index 00000000000..e831c466137
--- /dev/null
+++ b/bdb/rpc_server/java/FreeList.java
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: FreeList.java,v 1.3 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Keep track of a list of objects by id with a free list.
+ * Intentionally package-protected exposure.
+ */
+class FreeList
+{
+ class FreeIndex {
+ int index;
+ FreeIndex(int index) { this.index = index; }
+ int getIndex() { return index; }
+ }
+
+ Vector items = new Vector();
+ FreeIndex free_head = null;
+
+ public synchronized int add(Object obj) {
+ int pos;
+ if (free_head == null) {
+ pos = items.size();
+ items.addElement(obj);
+ if (pos % 1000 == 0)
+ DbServer.err.println(this + " grew to size " + pos);
+ } else {
+ pos = free_head.getIndex();
+ free_head = (FreeIndex)items.elementAt(pos);
+ items.setElementAt(obj, pos);
+ }
+ return pos;
+ }
+
+ public synchronized void del(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj != null && obj instanceof FreeIndex)
+ throw new NoSuchElementException("index " + pos + " has already been freed");
+ items.setElementAt(free_head, pos);
+ free_head = new FreeIndex(pos);
+ }
+
+ public void del(Object obj) {
+ del(items.indexOf(obj));
+ }
+
+ public Object get(int pos) {
+ Object obj = items.elementAt(pos);
+ if (obj instanceof FreeIndex)
+ obj = null;
+ return obj;
+ }
+
+ public LocalIterator iterator() {
+ return new FreeListIterator();
+ }
+
+ /**
+ * Iterator for a FreeList. Note that this class doesn't implement
+ * java.util.Iterator to maintain compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+ class FreeListIterator implements LocalIterator {
+ int current;
+
+ FreeListIterator() { current = findNext(-1); }
+
+ private int findNext(int start) {
+ int next = start;
+ while (++next < items.size()) {
+ Object obj = items.elementAt(next);
+ if (obj == null || !(obj instanceof FreeIndex))
+ break;
+ }
+ return next;
+ }
+
+ public boolean hasNext() {
+ return (findNext(current) < items.size());
+ }
+
+ public Object next() {
+ current = findNext(current);
+ if (current == items.size())
+ throw new NoSuchElementException("enumerated past end of FreeList");
+ return items.elementAt(current);
+ }
+
+ public void remove() {
+ del(current);
+ }
+ }
+}
diff --git a/bdb/rpc_server/java/LocalIterator.java b/bdb/rpc_server/java/LocalIterator.java
new file mode 100644
index 00000000000..eecb0b5e78d
--- /dev/null
+++ b/bdb/rpc_server/java/LocalIterator.java
@@ -0,0 +1,23 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: LocalIterator.java,v 1.2 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import java.util.*;
+
+/**
+ * Iterator interface. Note that this matches java.util.Iterator
+ * but maintains compatibility with Java 1.1
+ * Intentionally package-protected exposure.
+ */
+interface LocalIterator {
+ boolean hasNext();
+ Object next();
+ void remove();
+}
diff --git a/bdb/rpc_server/java/README b/bdb/rpc_server/java/README
new file mode 100644
index 00000000000..c2d8f3abd57
--- /dev/null
+++ b/bdb/rpc_server/java/README
@@ -0,0 +1,24 @@
+Berkeley DB Java RPC server, copyright (C) 2002 Sleepycat Software
+
+The Java implementation of the Berkeley DB RPC server is intended
+primarily for testing purposes. It provides the same interface
+as the C and C++ RPC servers, but is implemented via the Java API
+rather than the C or C++ APIs. This allows the existing Tcl test
+suite to exercise the Java API without modification.
+
+The Java RPC server relies on a Java version of rpcgen to
+automatically generate appropriate Java classes from the RPC
+interface specification (../db_server.x). We use jrpcgen, which
+is part of the Remote Tea for Java project:
+ acplt.plt.rwth-aachen.de/ks/english/remotetea.html
+
+To rebuild the Java stubs from db_server.x, you will need to
+download the full Remote Tea package, but if you just want to
+compile the Java sources and run the Java RPC server, the runtime
+component of Remote Tea is included in oncrpc.jar. Building
+the Java RPC server is automatic when Berkeley DB is configured
+with the both --enable-rpc and --enable-java.
+
+All of the Remote Tea project is licensed under the Library GNU
+Public License, and we have made no modifications to their
+released code.
diff --git a/bdb/rpc_server/java/RpcDb.java b/bdb/rpc_server/java/RpcDb.java
new file mode 100644
index 00000000000..59da9be67dc
--- /dev/null
+++ b/bdb/rpc_server/java/RpcDb.java
@@ -0,0 +1,694 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDb.java,v 1.8 2002/08/09 01:56:09 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a db object for the Java RPC server.
+ */
+public class RpcDb extends Timer
+{
+ static final byte[] empty = new byte[0];
+ Db db;
+ RpcDbEnv rdbenv;
+ int refcount = 1;
+ String dbname, subdbname;
+ int type, setflags, openflags;
+
+ public RpcDb(RpcDbEnv rdbenv)
+ {
+ this.rdbenv = rdbenv;
+ }
+
+ void dispose()
+ {
+ if (db != null) {
+ try {
+ db.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ db = null;
+ }
+ }
+
+ public void associate(DbDispatcher server,
+ __db_associate_msg args, __db_associate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ /*
+ * We do not support DB_CREATE for associate. Users
+ * can only access secondary indices on a read-only basis,
+ * so whatever they are looking for needs to be there already.
+ */
+ db.associate(txn, server.getDb(args.sdbpcl_id).db, null, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __db_close_msg args, __db_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ db.close(args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __db_create_msg args, __db_create_reply reply)
+ {
+ try {
+ db = new Db(server.getEnv(args.dbenvcl_id).dbenv, args.flags);
+ reply.dbcl_id = server.addDb(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void cursor(DbDispatcher server,
+ __db_cursor_msg args, __db_cursor_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbc dbc = db.cursor(txn, args.flags);
+ RpcDbc rdbc = new RpcDbc(this, dbc, false);
+ rdbc.timer = (rtxn != null) ? rtxn.timer : this;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __db_del_msg args, __db_del_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ db.del(txn, key, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __db_get_msg args, __db_get_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ if ((args.flags & Db.DB_MULTIPLE) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = db.get(txn, key, data, args.flags);
+
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void join(DbDispatcher server,
+ __db_join_msg args, __db_join_reply reply)
+ {
+ try {
+ Dbc[] cursors = new Dbc[args.curs.length + 1];
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ if (rdbc == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ cursors[i] = rdbc.dbc;
+ }
+ cursors[args.curs.length] = null;
+
+ Dbc jdbc = db.join(cursors, args.flags);
+
+ RpcDbc rjdbc = new RpcDbc(this, jdbc, true);
+ /*
+ * If our curslist has a parent txn, we need to use it too
+ * for the activity timeout. All cursors must be part of
+ * the same transaction, so just check the first.
+ */
+ RpcDbc rdbc0 = server.getCursor(args.curs[0]);
+ if (rdbc0.timer != rdbc0)
+ rjdbc.timer = rdbc0.timer;
+
+ /*
+ * All of the curslist cursors must point to the join
+ * cursor's timeout so that we do not timeout any of the
+ * curlist cursors while the join cursor is active.
+ */
+ for(int i = 0; i < args.curs.length; i++) {
+ RpcDbc rdbc = server.getCursor(args.curs[i]);
+ rdbc.orig_timer = rdbc.timer;
+ rdbc.timer = rjdbc;
+ }
+ reply.dbcidcl_id = server.addCursor(rjdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void key_range(DbDispatcher server,
+ __db_key_range_msg args, __db_key_range_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(args.keyflags);
+
+ DbKeyRange range = new DbKeyRange();
+
+ db.key_range(txn, key, range, args.flags);
+ reply.status = 0;
+ reply.less = range.less;
+ reply.equal = range.equal;
+ reply.greater = range.greater;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDb(DbDispatcher server, __db_open_reply reply)
+ throws DbException
+ {
+ RpcDb rdb = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).db_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdb = (RpcDb)i.next();
+ if (rdb != null && rdb != this && rdb.rdbenv == rdbenv &&
+ (type == Db.DB_UNKNOWN || rdb.type == type) &&
+ openflags == rdb.openflags &&
+ setflags == rdb.setflags &&
+ dbname != null && rdb.dbname != null &&
+ dbname.equals(rdb.dbname) &&
+ (subdbname == rdb.subdbname ||
+ (subdbname != null && rdb.subdbname != null &&
+ subdbname.equals(rdb.subdbname))))
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ ++rdb.refcount;
+ reply.dbcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.type = rdb.db.get_type();
+ reply.dbflags = rdb.db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = rdb.db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing Db: " + reply.dbcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __db_open_msg args, __db_open_reply reply)
+ {
+ try {
+ dbname = (args.name.length() > 0) ? args.name : null;
+ subdbname = (args.subdb.length() > 0) ? args.subdb : null;
+ type = args.type;
+ openflags = args.flags & DbServer.DB_SERVER_DBFLAGS;
+
+ if (findSharedDb(server, reply)) {
+ db.close(0);
+ db = null;
+ server.delDb(this);
+ } else {
+ DbServer.err.println("Calling db.open(" + null + ", " + dbname + ", " + subdbname + ", " + args.type + ", " + Integer.toHexString(args.flags) + ", " + args.mode + ")");
+ db.open(null, dbname, subdbname, args.type, args.flags, args.mode);
+
+ reply.dbcl_id = args.dbpcl_id;
+ reply.type = this.type = db.get_type();
+ reply.dbflags = db.get_flags_raw();
+ // FIXME: not possible to work out byteorder from Java?
+ reply.lorder = db.get_byteswapped() ? 4321 : 1234;
+ reply.status = 0;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("Db.open: reply.status = " + reply.status + ", reply.dbcl_id = " + reply.dbcl_id);
+ }
+
+ public void pget(DbDispatcher server,
+ __db_pget_msg args, __db_pget_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ db.pget(txn, skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata ||
+ skey.get_data().length != skey.get_size()) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata ||
+ pkey.get_data().length != pkey.get_size()) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata ||
+ data.get_data().length != data.get_size()) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.skeydata = reply.pkeydata = reply.datadata = empty;
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __db_put_msg args, __db_put_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_doff(args.keydoff);
+ key.set_ulen(args.keyulen);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(args.dataflags);
+
+ reply.status = db.put(txn, key, data, args.flags);
+
+ /*
+ * If the client did a DB_APPEND, set up key in reply.
+ * Otherwise just status.
+ */
+ if ((args.flags & Db.DB_APPEND) != 0) {
+ if (key.get_data() == args.keydata ||
+ key.get_data().length != key.get_size()) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+ } else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ reply.keydata = empty;
+ reply.status = e.get_errno();
+ DbServer.err.println("Exception, setting status to " + reply.status);
+ e.printStackTrace(DbServer.err);
+ }
+ }
+
+ public void remove(DbDispatcher server,
+ __db_remove_msg args, __db_remove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ db.remove(args.name, args.subdb, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void rename(DbDispatcher server,
+ __db_rename_msg args, __db_rename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+ db.rename(args.name, args.subdb, args.newname, args.flags);
+ db = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delDb(this);
+ }
+ }
+
+ public void set_bt_maxkey(DbDispatcher server,
+ __db_bt_maxkey_msg args, __db_bt_maxkey_reply reply)
+ {
+ try {
+ db.set_bt_maxkey(args.maxkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_bt_minkey(DbDispatcher server,
+ __db_bt_minkey_msg args, __db_bt_minkey_reply reply)
+ {
+ try {
+ db.set_bt_minkey(args.minkey);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __db_encrypt_msg args, __db_encrypt_reply reply)
+ {
+ try {
+ db.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __db_flags_msg args, __db_flags_reply reply)
+ {
+ try {
+ // DbServer.err.println("Calling db.setflags(" + Integer.toHexString(args.flags) + ")");
+ db.set_flags(args.flags);
+ setflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_ffactor(DbDispatcher server,
+ __db_h_ffactor_msg args, __db_h_ffactor_reply reply)
+ {
+ try {
+ db.set_h_ffactor(args.ffactor);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_h_nelem(DbDispatcher server,
+ __db_h_nelem_msg args, __db_h_nelem_reply reply)
+ {
+ try {
+ db.set_h_nelem(args.nelem);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_lorder(DbDispatcher server,
+ __db_lorder_msg args, __db_lorder_reply reply)
+ {
+ try {
+ db.set_lorder(args.lorder);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_pagesize(DbDispatcher server,
+ __db_pagesize_msg args, __db_pagesize_reply reply)
+ {
+ try {
+ db.set_pagesize(args.pagesize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_q_extentsize(DbDispatcher server,
+ __db_extentsize_msg args, __db_extentsize_reply reply)
+ {
+ try {
+ db.set_q_extentsize(args.extentsize);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_delim(DbDispatcher server,
+ __db_re_delim_msg args, __db_re_delim_reply reply)
+ {
+ try {
+ db.set_re_delim(args.delim);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_len(DbDispatcher server,
+ __db_re_len_msg args, __db_re_len_reply reply)
+ {
+ try {
+ db.set_re_len(args.len);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_re_pad(DbDispatcher server,
+ __db_re_pad_msg args, __db_re_pad_reply reply)
+ {
+ try {
+ db.set_re_pad(args.pad);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void stat(DbDispatcher server,
+ __db_stat_msg args, __db_stat_reply reply)
+ {
+ try {
+ Object raw_stat = db.stat(args.flags);
+
+ if (raw_stat instanceof DbHashStat) {
+ DbHashStat hs = (DbHashStat)raw_stat;
+ int[] raw_stats = {
+ hs.hash_magic, hs.hash_version,
+ hs.hash_metaflags, hs.hash_nkeys,
+ hs.hash_ndata, hs.hash_pagesize,
+ hs.hash_ffactor, hs.hash_buckets,
+ hs.hash_free, hs.hash_bfree,
+ hs.hash_bigpages, hs.hash_big_bfree,
+ hs.hash_overflows, hs.hash_ovfl_free,
+ hs.hash_dup, hs.hash_dup_free
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbQueueStat) {
+ DbQueueStat qs = (DbQueueStat)raw_stat;
+ int[] raw_stats = {
+ qs.qs_magic, qs.qs_version,
+ qs.qs_metaflags, qs.qs_nkeys,
+ qs.qs_ndata, qs.qs_pagesize,
+ qs.qs_extentsize, qs.qs_pages,
+ qs.qs_re_len, qs.qs_re_pad,
+ qs.qs_pgfree, qs.qs_first_recno,
+ qs.qs_cur_recno
+ };
+ reply.stats = raw_stats;
+ } else if (raw_stat instanceof DbBtreeStat) {
+ DbBtreeStat bs = (DbBtreeStat)raw_stat;
+ int[] raw_stats = {
+ bs.bt_magic, bs.bt_version,
+ bs.bt_metaflags, bs.bt_nkeys,
+ bs.bt_ndata, bs.bt_pagesize,
+ bs.bt_maxkey, bs.bt_minkey,
+ bs.bt_re_len, bs.bt_re_pad,
+ bs.bt_levels, bs.bt_int_pg,
+ bs.bt_leaf_pg, bs.bt_dup_pg,
+ bs.bt_over_pg, bs.bt_free,
+ bs.bt_int_pgfree, bs.bt_leaf_pgfree,
+ bs.bt_dup_pgfree, bs.bt_over_pgfree
+ };
+ reply.stats = raw_stats;
+ } else
+ throw new DbException("Invalid return type from db.stat()", Db.DB_NOTFOUND);
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.stats = new int[0];
+ }
+ }
+
+ public void sync(DbDispatcher server,
+ __db_sync_msg args, __db_sync_reply reply)
+ {
+ try {
+ db.sync(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void truncate(DbDispatcher server,
+ __db_truncate_msg args, __db_truncate_reply reply)
+ {
+ try {
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ reply.count = db.truncate(txn, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/bdb/rpc_server/java/RpcDbEnv.java b/bdb/rpc_server/java/RpcDbEnv.java
new file mode 100644
index 00000000000..9d9f1ba4324
--- /dev/null
+++ b/bdb/rpc_server/java/RpcDbEnv.java
@@ -0,0 +1,269 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbEnv.java,v 1.6 2002/08/23 08:45:59 mjc Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbenv for the Java RPC server.
+ */
+public class RpcDbEnv extends Timer
+{
+ DbEnv dbenv;
+ String home;
+ long idletime, timeout;
+ int openflags, onflags, offflags;
+ int refcount = 1;
+
+ void dispose()
+ {
+ if (dbenv != null) {
+ try {
+ dbenv.close(0);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbenv = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __env_close_msg args, __env_close_reply reply)
+ {
+ if (--refcount != 0) {
+ reply.status = 0;
+ return;
+ }
+
+ try {
+ dbenv.close(args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void create(DbDispatcher server,
+ __env_create_msg args, __env_create_reply reply)
+ {
+ this.idletime = (args.timeout != 0) ? args.timeout : DbServer.idleto;
+ this.timeout = DbServer.defto;
+ try {
+ dbenv = new DbEnv(0);
+ reply.envcl_id = server.addEnv(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbremove(DbDispatcher server,
+ __env_dbremove_msg args, __env_dbremove_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbremove(txn, args.name, args.subdb, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dbrename(DbDispatcher server,
+ __env_dbrename_msg args, __env_dbrename_reply reply)
+ {
+ try {
+ args.name = (args.name.length() > 0) ? args.name : null;
+ args.subdb = (args.subdb.length() > 0) ? args.subdb : null;
+ args.newname = (args.newname.length() > 0) ? args.newname : null;
+
+ RpcDbTxn rtxn = server.getTxn(args.txnpcl_id);
+ DbTxn txn = (rtxn != null) ? rtxn.txn : null;
+ dbenv.dbrename(txn, args.name, args.subdb, args.newname, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ private boolean findSharedDbEnv(DbDispatcher server, __env_open_reply reply)
+ throws DbException
+ {
+ RpcDbEnv rdbenv = null;
+ boolean matchFound = false;
+ LocalIterator i = ((DbServer)server).env_list.iterator();
+
+ while (!matchFound && i.hasNext()) {
+ rdbenv = (RpcDbEnv)i.next();
+ if (rdbenv != null && rdbenv != this &&
+ (home == rdbenv.home ||
+ (home != null && home.equals(rdbenv.home))) &&
+ openflags == rdbenv.openflags &&
+ onflags == rdbenv.onflags &&
+ offflags == rdbenv.offflags)
+ matchFound = true;
+ }
+
+ if (matchFound) {
+ /*
+ * The only thing left to check is the timeout.
+ * Since the server timeout set by the client is a hint, for sharing
+ * we'll give them the benefit of the doubt and grant them the
+ * longer timeout.
+ */
+ if (rdbenv.timeout < timeout)
+ rdbenv.timeout = timeout;
+
+ ++rdbenv.refcount;
+ reply.envcl_id = ((FreeList.FreeListIterator)i).current;
+ reply.status = 0;
+
+ DbServer.err.println("Sharing DbEnv: " + reply.envcl_id);
+ }
+
+ return matchFound;
+ }
+
+ public void open(DbDispatcher server,
+ __env_open_msg args, __env_open_reply reply)
+ {
+ try {
+ home = (args.home.length() > 0) ? args.home : null;
+
+ /*
+ * If they are using locking do deadlock detection for them,
+ * internally.
+ */
+ if ((args.flags & Db.DB_INIT_LOCK) != 0)
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+
+ // adjust flags for RPC
+ int newflags = (args.flags & ~DbServer.DB_SERVER_FLAGMASK);
+ openflags = (newflags & DbServer.DB_SERVER_ENVFLAGS);
+
+ if (findSharedDbEnv(server, reply)) {
+ dbenv.close(0);
+ dbenv = null;
+ server.delEnv(this);
+ } else {
+ // TODO: check home?
+ dbenv.open(home, newflags, args.mode);
+ reply.status = 0;
+ reply.envcl_id = args.dbenvcl_id;
+ }
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ }
+
+ // System.err.println("DbEnv.open: reply.status = " + reply.status + ", reply.envcl_id = " + reply.envcl_id);
+ }
+
+ public void remove(DbDispatcher server,
+ __env_remove_msg args, __env_remove_reply reply)
+ {
+ try {
+ args.home = (args.home.length() > 0) ? args.home : null;
+ // TODO: check home?
+
+ dbenv.remove(args.home, args.flags);
+ dbenv = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } catch(FileNotFoundException e) {
+ reply.status = Db.DB_NOTFOUND;
+ } finally {
+ server.delEnv(this);
+ }
+ }
+
+ public void set_cachesize(DbDispatcher server,
+ __env_cachesize_msg args, __env_cachesize_reply reply)
+ {
+ try {
+ dbenv.set_cachesize(args.gbytes, args.bytes, args.ncache);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_encrypt(DbDispatcher server,
+ __env_encrypt_msg args, __env_encrypt_reply reply)
+ {
+ try {
+ dbenv.set_encrypt(args.passwd, args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void set_flags(DbDispatcher server,
+ __env_flags_msg args, __env_flags_reply reply)
+ {
+ try {
+ dbenv.set_flags(args.flags, args.onoff != 0);
+ if (args.onoff != 0)
+ onflags |= args.flags;
+ else
+ offflags |= args.flags;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ // txn_recover implementation
+ public void txn_recover(DbDispatcher server,
+ __txn_recover_msg args, __txn_recover_reply reply)
+ {
+ try {
+ DbPreplist[] prep_list = dbenv.txn_recover(args.count, args.flags);
+ if (prep_list != null && prep_list.length > 0) {
+ int count = prep_list.length;
+ reply.retcount = count;
+ reply.txn = new int[count];
+ reply.gid = new byte[count * Db.DB_XIDDATASIZE];
+
+ for(int i = 0; i < count; i++) {
+ reply.txn[i] = server.addTxn(new RpcDbTxn(this, prep_list[i].txn));
+ System.arraycopy(prep_list[i].gid, 0, reply.gid, i * Db.DB_XIDDATASIZE, Db.DB_XIDDATASIZE);
+ }
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/bdb/rpc_server/java/RpcDbTxn.java b/bdb/rpc_server/java/RpcDbTxn.java
new file mode 100644
index 00000000000..a3207b5e35d
--- /dev/null
+++ b/bdb/rpc_server/java/RpcDbTxn.java
@@ -0,0 +1,123 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbTxn.java,v 1.2 2002/08/09 01:56:10 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a txn object for the Java RPC server.
+ */
+public class RpcDbTxn extends Timer
+{
+ RpcDbEnv rdbenv;
+ DbTxn txn;
+
+ public RpcDbTxn(RpcDbEnv rdbenv, DbTxn txn)
+ {
+ this.rdbenv = rdbenv;
+ this.txn = txn;
+ }
+
+ void dispose()
+ {
+ if (txn != null) {
+ try {
+ txn.abort();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ txn = null;
+ }
+ }
+
+ public void abort(DbDispatcher server,
+ __txn_abort_msg args, __txn_abort_reply reply)
+ {
+ try {
+ txn.abort();
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void begin(DbDispatcher server,
+ __txn_begin_msg args, __txn_begin_reply reply)
+ {
+ try {
+ if (rdbenv == null) {
+ reply.status = Db.DB_NOSERVER_ID;
+ return;
+ }
+ DbEnv dbenv = rdbenv.dbenv;
+ RpcDbTxn rparent = server.getTxn(args.parentcl_id);
+ DbTxn parent = (rparent != null) ? rparent.txn : null;
+
+ txn = dbenv.txn_begin(parent, args.flags);
+
+ if (rparent != null)
+ timer = rparent.timer;
+ reply.txnidcl_id = server.addTxn(this);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void commit(DbDispatcher server,
+ __txn_commit_msg args, __txn_commit_reply reply)
+ {
+ try {
+ txn.commit(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void discard(DbDispatcher server,
+ __txn_discard_msg args, __txn_discard_reply reply)
+ {
+ try {
+ txn.discard(args.flags);
+ txn = null;
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delTxn(this);
+ }
+ }
+
+ public void prepare(DbDispatcher server,
+ __txn_prepare_msg args, __txn_prepare_reply reply)
+ {
+ try {
+ txn.prepare(args.gid);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+}
diff --git a/bdb/rpc_server/java/RpcDbc.java b/bdb/rpc_server/java/RpcDbc.java
new file mode 100644
index 00000000000..a37b4ee4896
--- /dev/null
+++ b/bdb/rpc_server/java/RpcDbc.java
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: RpcDbc.java,v 1.3 2002/08/09 01:56:10 bostic Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+import com.sleepycat.db.*;
+import java.io.IOException;
+import java.io.*;
+import java.util.*;
+
+/**
+ * RPC wrapper around a dbc object for the Java RPC server.
+ */
+public class RpcDbc extends Timer
+{
+ static final byte[] empty = new byte[0];
+ RpcDbEnv rdbenv;
+ RpcDb rdb;
+ Dbc dbc;
+ Timer orig_timer;
+ boolean isJoin;
+
+ public RpcDbc(RpcDb rdb, Dbc dbc, boolean isJoin)
+ {
+ this.rdb = rdb;
+ this.rdbenv = rdb.rdbenv;
+ this.dbc = dbc;
+ this.isJoin = isJoin;
+ }
+
+ void dispose()
+ {
+ if (dbc != null) {
+ try {
+ dbc.close();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ }
+ dbc = null;
+ }
+ }
+
+ public void close(DbDispatcher server,
+ __dbc_close_msg args, __dbc_close_reply reply)
+ {
+ try {
+ dbc.close();
+ dbc = null;
+
+ if (isJoin)
+ for(LocalIterator i = ((DbServer)server).cursor_list.iterator(); i.hasNext(); ) {
+ RpcDbc rdbc = (RpcDbc)i.next();
+ // Unjoin cursors that were joined to create this
+ if (rdbc != null && rdbc.timer == this)
+ rdbc.timer = rdbc.orig_timer;
+ }
+
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ } finally {
+ server.delCursor(this);
+ }
+ }
+
+ public void count(DbDispatcher server,
+ __dbc_count_msg args, __dbc_count_reply reply)
+ {
+ try {
+ reply.dupcount = dbc.count(args.flags);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void del(DbDispatcher server,
+ __dbc_del_msg args, __dbc_del_reply reply)
+ {
+ try {
+ reply.status = dbc.del(args.flags);
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void dup(DbDispatcher server,
+ __dbc_dup_msg args, __dbc_dup_reply reply)
+ {
+ try {
+ Dbc newdbc = dbc.dup(args.flags);
+ RpcDbc rdbc = new RpcDbc(rdb, newdbc, false);
+ /* If this cursor has a parent txn, we need to use it too. */
+ if (timer != this)
+ rdbc.timer = timer;
+ reply.dbcidcl_id = server.addCursor(rdbc);
+ reply.status = 0;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void get(DbDispatcher server,
+ __dbc_get_msg args, __dbc_get_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(Db.DB_DBT_MALLOC |
+ (args.keyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ if ((args.flags & Db.DB_MULTIPLE) != 0 ||
+ (args.flags & Db.DB_MULTIPLE_KEY) != 0) {
+ if (data.get_data().length == 0)
+ data.set_data(new byte[data.get_ulen()]);
+ data.set_flags(Db.DB_DBT_USERMEM |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+ } else
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.get(key, data, args.flags);
+
+ if (key.get_data() == args.keydata) {
+ reply.keydata = new byte[key.get_size()];
+ System.arraycopy(key.get_data(), 0, reply.keydata, 0, key.get_size());
+ } else
+ reply.keydata = key.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = reply.datadata = empty;
+ }
+ }
+
+ public void pget(DbDispatcher server,
+ __dbc_pget_msg args, __dbc_pget_reply reply)
+ {
+ try {
+ Dbt skey = new Dbt(args.skeydata);
+ skey.set_dlen(args.skeydlen);
+ skey.set_doff(args.skeydoff);
+ skey.set_ulen(args.skeyulen);
+ skey.set_flags(Db.DB_DBT_MALLOC |
+ (args.skeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt pkey = new Dbt(args.pkeydata);
+ pkey.set_dlen(args.pkeydlen);
+ pkey.set_doff(args.pkeydoff);
+ pkey.set_ulen(args.pkeyulen);
+ pkey.set_flags(Db.DB_DBT_MALLOC |
+ (args.pkeyflags & Db.DB_DBT_PARTIAL));
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_doff(args.datadoff);
+ data.set_ulen(args.dataulen);
+ data.set_flags(Db.DB_DBT_MALLOC |
+ (args.dataflags & Db.DB_DBT_PARTIAL));
+
+ reply.status = dbc.pget(skey, pkey, data, args.flags);
+
+ if (skey.get_data() == args.skeydata) {
+ reply.skeydata = new byte[skey.get_size()];
+ System.arraycopy(skey.get_data(), 0, reply.skeydata, 0, skey.get_size());
+ } else
+ reply.skeydata = skey.get_data();
+
+ if (pkey.get_data() == args.pkeydata) {
+ reply.pkeydata = new byte[pkey.get_size()];
+ System.arraycopy(pkey.get_data(), 0, reply.pkeydata, 0, pkey.get_size());
+ } else
+ reply.pkeydata = pkey.get_data();
+
+ if (data.get_data() == args.datadata) {
+ reply.datadata = new byte[data.get_size()];
+ System.arraycopy(data.get_data(), 0, reply.datadata, 0, data.get_size());
+ } else
+ reply.datadata = data.get_data();
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ }
+ }
+
+ public void put(DbDispatcher server,
+ __dbc_put_msg args, __dbc_put_reply reply)
+ {
+ try {
+ Dbt key = new Dbt(args.keydata);
+ key.set_dlen(args.keydlen);
+ key.set_ulen(args.keyulen);
+ key.set_doff(args.keydoff);
+ key.set_flags(args.keyflags & Db.DB_DBT_PARTIAL);
+
+ Dbt data = new Dbt(args.datadata);
+ data.set_dlen(args.datadlen);
+ data.set_ulen(args.dataulen);
+ data.set_doff(args.datadoff);
+ data.set_flags(args.dataflags);
+
+ reply.status = dbc.put(key, data, args.flags);
+
+ if (reply.status == 0 &&
+ (args.flags == Db.DB_AFTER || args.flags == Db.DB_BEFORE) &&
+ rdb.db.get_type() == Db.DB_RECNO)
+ reply.keydata = key.get_data();
+ else
+ reply.keydata = empty;
+ } catch(DbException e) {
+ e.printStackTrace(DbServer.err);
+ reply.status = e.get_errno();
+ reply.keydata = empty;
+ }
+ }
+}
diff --git a/bdb/rpc_server/java/Timer.java b/bdb/rpc_server/java/Timer.java
new file mode 100644
index 00000000000..e16f3084f95
--- /dev/null
+++ b/bdb/rpc_server/java/Timer.java
@@ -0,0 +1,22 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: Timer.java,v 1.1 2002/01/03 02:59:39 mjc Exp $
+ */
+
+package com.sleepycat.db.rpcserver;
+
+/**
+ * Class to keep track of access times. This is slightly devious by having
+ * both the access_time and a reference to another Timer that can be
+ * used to group/share access times. This is done to keep the Java code
+ * close to the canonical C implementation of the RPC server.
+ */
+public class Timer
+{
+ Timer timer = this;
+ long last_access;
+}
diff --git a/bdb/rpc_server/java/gen/DbServerStub.java b/bdb/rpc_server/java/gen/DbServerStub.java
new file mode 100644
index 00000000000..90fc13a6d9c
--- /dev/null
+++ b/bdb/rpc_server/java/gen/DbServerStub.java
@@ -0,0 +1,495 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+import org.acplt.oncrpc.server.*;
+
+/**
+ */
+public abstract class DbServerStub extends OncRpcServerStub implements OncRpcDispatchable {
+
+ public DbServerStub()
+ throws OncRpcException, IOException {
+ this(0);
+ }
+
+ public DbServerStub(int port)
+ throws OncRpcException, IOException {
+ info = new OncRpcServerTransportRegistrationInfo [] {
+ new OncRpcServerTransportRegistrationInfo(db_server.DB_RPC_SERVERPROG, 4001),
+ };
+ transports = new OncRpcServerTransport [] {
+ new OncRpcUdpServerTransport(this, port, info, 32768),
+ new OncRpcTcpServerTransport(this, port, info, 32768)
+ };
+ }
+
+ public void dispatchOncRpcCall(OncRpcCallInformation call, int program, int version, int procedure)
+ throws OncRpcException, IOException {
+ if ( version == 4001 ) {
+ switch ( procedure ) {
+ case 1: {
+ __env_cachesize_msg args$ = new __env_cachesize_msg();
+ call.retrieveCall(args$);
+ __env_cachesize_reply result$ = __DB_env_cachesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 2: {
+ __env_close_msg args$ = new __env_close_msg();
+ call.retrieveCall(args$);
+ __env_close_reply result$ = __DB_env_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 3: {
+ __env_create_msg args$ = new __env_create_msg();
+ call.retrieveCall(args$);
+ __env_create_reply result$ = __DB_env_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 4: {
+ __env_dbremove_msg args$ = new __env_dbremove_msg();
+ call.retrieveCall(args$);
+ __env_dbremove_reply result$ = __DB_env_dbremove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 5: {
+ __env_dbrename_msg args$ = new __env_dbrename_msg();
+ call.retrieveCall(args$);
+ __env_dbrename_reply result$ = __DB_env_dbrename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 6: {
+ __env_encrypt_msg args$ = new __env_encrypt_msg();
+ call.retrieveCall(args$);
+ __env_encrypt_reply result$ = __DB_env_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 7: {
+ __env_flags_msg args$ = new __env_flags_msg();
+ call.retrieveCall(args$);
+ __env_flags_reply result$ = __DB_env_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 8: {
+ __env_open_msg args$ = new __env_open_msg();
+ call.retrieveCall(args$);
+ __env_open_reply result$ = __DB_env_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 9: {
+ __env_remove_msg args$ = new __env_remove_msg();
+ call.retrieveCall(args$);
+ __env_remove_reply result$ = __DB_env_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 10: {
+ __txn_abort_msg args$ = new __txn_abort_msg();
+ call.retrieveCall(args$);
+ __txn_abort_reply result$ = __DB_txn_abort_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 11: {
+ __txn_begin_msg args$ = new __txn_begin_msg();
+ call.retrieveCall(args$);
+ __txn_begin_reply result$ = __DB_txn_begin_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 12: {
+ __txn_commit_msg args$ = new __txn_commit_msg();
+ call.retrieveCall(args$);
+ __txn_commit_reply result$ = __DB_txn_commit_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 13: {
+ __txn_discard_msg args$ = new __txn_discard_msg();
+ call.retrieveCall(args$);
+ __txn_discard_reply result$ = __DB_txn_discard_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 14: {
+ __txn_prepare_msg args$ = new __txn_prepare_msg();
+ call.retrieveCall(args$);
+ __txn_prepare_reply result$ = __DB_txn_prepare_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 15: {
+ __txn_recover_msg args$ = new __txn_recover_msg();
+ call.retrieveCall(args$);
+ __txn_recover_reply result$ = __DB_txn_recover_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 16: {
+ __db_associate_msg args$ = new __db_associate_msg();
+ call.retrieveCall(args$);
+ __db_associate_reply result$ = __DB_db_associate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 17: {
+ __db_bt_maxkey_msg args$ = new __db_bt_maxkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_maxkey_reply result$ = __DB_db_bt_maxkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 18: {
+ __db_bt_minkey_msg args$ = new __db_bt_minkey_msg();
+ call.retrieveCall(args$);
+ __db_bt_minkey_reply result$ = __DB_db_bt_minkey_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 19: {
+ __db_close_msg args$ = new __db_close_msg();
+ call.retrieveCall(args$);
+ __db_close_reply result$ = __DB_db_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 20: {
+ __db_create_msg args$ = new __db_create_msg();
+ call.retrieveCall(args$);
+ __db_create_reply result$ = __DB_db_create_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 21: {
+ __db_del_msg args$ = new __db_del_msg();
+ call.retrieveCall(args$);
+ __db_del_reply result$ = __DB_db_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 22: {
+ __db_encrypt_msg args$ = new __db_encrypt_msg();
+ call.retrieveCall(args$);
+ __db_encrypt_reply result$ = __DB_db_encrypt_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 23: {
+ __db_extentsize_msg args$ = new __db_extentsize_msg();
+ call.retrieveCall(args$);
+ __db_extentsize_reply result$ = __DB_db_extentsize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 24: {
+ __db_flags_msg args$ = new __db_flags_msg();
+ call.retrieveCall(args$);
+ __db_flags_reply result$ = __DB_db_flags_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 25: {
+ __db_get_msg args$ = new __db_get_msg();
+ call.retrieveCall(args$);
+ __db_get_reply result$ = __DB_db_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 26: {
+ __db_h_ffactor_msg args$ = new __db_h_ffactor_msg();
+ call.retrieveCall(args$);
+ __db_h_ffactor_reply result$ = __DB_db_h_ffactor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 27: {
+ __db_h_nelem_msg args$ = new __db_h_nelem_msg();
+ call.retrieveCall(args$);
+ __db_h_nelem_reply result$ = __DB_db_h_nelem_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 28: {
+ __db_key_range_msg args$ = new __db_key_range_msg();
+ call.retrieveCall(args$);
+ __db_key_range_reply result$ = __DB_db_key_range_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 29: {
+ __db_lorder_msg args$ = new __db_lorder_msg();
+ call.retrieveCall(args$);
+ __db_lorder_reply result$ = __DB_db_lorder_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 30: {
+ __db_open_msg args$ = new __db_open_msg();
+ call.retrieveCall(args$);
+ __db_open_reply result$ = __DB_db_open_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 31: {
+ __db_pagesize_msg args$ = new __db_pagesize_msg();
+ call.retrieveCall(args$);
+ __db_pagesize_reply result$ = __DB_db_pagesize_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 32: {
+ __db_pget_msg args$ = new __db_pget_msg();
+ call.retrieveCall(args$);
+ __db_pget_reply result$ = __DB_db_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 33: {
+ __db_put_msg args$ = new __db_put_msg();
+ call.retrieveCall(args$);
+ __db_put_reply result$ = __DB_db_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 34: {
+ __db_re_delim_msg args$ = new __db_re_delim_msg();
+ call.retrieveCall(args$);
+ __db_re_delim_reply result$ = __DB_db_re_delim_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 35: {
+ __db_re_len_msg args$ = new __db_re_len_msg();
+ call.retrieveCall(args$);
+ __db_re_len_reply result$ = __DB_db_re_len_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 36: {
+ __db_re_pad_msg args$ = new __db_re_pad_msg();
+ call.retrieveCall(args$);
+ __db_re_pad_reply result$ = __DB_db_re_pad_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 37: {
+ __db_remove_msg args$ = new __db_remove_msg();
+ call.retrieveCall(args$);
+ __db_remove_reply result$ = __DB_db_remove_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 38: {
+ __db_rename_msg args$ = new __db_rename_msg();
+ call.retrieveCall(args$);
+ __db_rename_reply result$ = __DB_db_rename_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 39: {
+ __db_stat_msg args$ = new __db_stat_msg();
+ call.retrieveCall(args$);
+ __db_stat_reply result$ = __DB_db_stat_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 40: {
+ __db_sync_msg args$ = new __db_sync_msg();
+ call.retrieveCall(args$);
+ __db_sync_reply result$ = __DB_db_sync_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 41: {
+ __db_truncate_msg args$ = new __db_truncate_msg();
+ call.retrieveCall(args$);
+ __db_truncate_reply result$ = __DB_db_truncate_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 42: {
+ __db_cursor_msg args$ = new __db_cursor_msg();
+ call.retrieveCall(args$);
+ __db_cursor_reply result$ = __DB_db_cursor_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 43: {
+ __db_join_msg args$ = new __db_join_msg();
+ call.retrieveCall(args$);
+ __db_join_reply result$ = __DB_db_join_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 44: {
+ __dbc_close_msg args$ = new __dbc_close_msg();
+ call.retrieveCall(args$);
+ __dbc_close_reply result$ = __DB_dbc_close_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 45: {
+ __dbc_count_msg args$ = new __dbc_count_msg();
+ call.retrieveCall(args$);
+ __dbc_count_reply result$ = __DB_dbc_count_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 46: {
+ __dbc_del_msg args$ = new __dbc_del_msg();
+ call.retrieveCall(args$);
+ __dbc_del_reply result$ = __DB_dbc_del_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 47: {
+ __dbc_dup_msg args$ = new __dbc_dup_msg();
+ call.retrieveCall(args$);
+ __dbc_dup_reply result$ = __DB_dbc_dup_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 48: {
+ __dbc_get_msg args$ = new __dbc_get_msg();
+ call.retrieveCall(args$);
+ __dbc_get_reply result$ = __DB_dbc_get_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 49: {
+ __dbc_pget_msg args$ = new __dbc_pget_msg();
+ call.retrieveCall(args$);
+ __dbc_pget_reply result$ = __DB_dbc_pget_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ case 50: {
+ __dbc_put_msg args$ = new __dbc_put_msg();
+ call.retrieveCall(args$);
+ __dbc_put_reply result$ = __DB_dbc_put_4001(args$);
+ call.reply(result$);
+ break;
+ }
+ default:
+ call.failProcedureUnavailable();
+ }
+ } else {
+ call.failProcedureUnavailable();
+ }
+ }
+
+ public abstract __env_cachesize_reply __DB_env_cachesize_4001(__env_cachesize_msg arg1);
+
+ public abstract __env_close_reply __DB_env_close_4001(__env_close_msg arg1);
+
+ public abstract __env_create_reply __DB_env_create_4001(__env_create_msg arg1);
+
+ public abstract __env_dbremove_reply __DB_env_dbremove_4001(__env_dbremove_msg arg1);
+
+ public abstract __env_dbrename_reply __DB_env_dbrename_4001(__env_dbrename_msg arg1);
+
+ public abstract __env_encrypt_reply __DB_env_encrypt_4001(__env_encrypt_msg arg1);
+
+ public abstract __env_flags_reply __DB_env_flags_4001(__env_flags_msg arg1);
+
+ public abstract __env_open_reply __DB_env_open_4001(__env_open_msg arg1);
+
+ public abstract __env_remove_reply __DB_env_remove_4001(__env_remove_msg arg1);
+
+ public abstract __txn_abort_reply __DB_txn_abort_4001(__txn_abort_msg arg1);
+
+ public abstract __txn_begin_reply __DB_txn_begin_4001(__txn_begin_msg arg1);
+
+ public abstract __txn_commit_reply __DB_txn_commit_4001(__txn_commit_msg arg1);
+
+ public abstract __txn_discard_reply __DB_txn_discard_4001(__txn_discard_msg arg1);
+
+ public abstract __txn_prepare_reply __DB_txn_prepare_4001(__txn_prepare_msg arg1);
+
+ public abstract __txn_recover_reply __DB_txn_recover_4001(__txn_recover_msg arg1);
+
+ public abstract __db_associate_reply __DB_db_associate_4001(__db_associate_msg arg1);
+
+ public abstract __db_bt_maxkey_reply __DB_db_bt_maxkey_4001(__db_bt_maxkey_msg arg1);
+
+ public abstract __db_bt_minkey_reply __DB_db_bt_minkey_4001(__db_bt_minkey_msg arg1);
+
+ public abstract __db_close_reply __DB_db_close_4001(__db_close_msg arg1);
+
+ public abstract __db_create_reply __DB_db_create_4001(__db_create_msg arg1);
+
+ public abstract __db_del_reply __DB_db_del_4001(__db_del_msg arg1);
+
+ public abstract __db_encrypt_reply __DB_db_encrypt_4001(__db_encrypt_msg arg1);
+
+ public abstract __db_extentsize_reply __DB_db_extentsize_4001(__db_extentsize_msg arg1);
+
+ public abstract __db_flags_reply __DB_db_flags_4001(__db_flags_msg arg1);
+
+ public abstract __db_get_reply __DB_db_get_4001(__db_get_msg arg1);
+
+ public abstract __db_h_ffactor_reply __DB_db_h_ffactor_4001(__db_h_ffactor_msg arg1);
+
+ public abstract __db_h_nelem_reply __DB_db_h_nelem_4001(__db_h_nelem_msg arg1);
+
+ public abstract __db_key_range_reply __DB_db_key_range_4001(__db_key_range_msg arg1);
+
+ public abstract __db_lorder_reply __DB_db_lorder_4001(__db_lorder_msg arg1);
+
+ public abstract __db_open_reply __DB_db_open_4001(__db_open_msg arg1);
+
+ public abstract __db_pagesize_reply __DB_db_pagesize_4001(__db_pagesize_msg arg1);
+
+ public abstract __db_pget_reply __DB_db_pget_4001(__db_pget_msg arg1);
+
+ public abstract __db_put_reply __DB_db_put_4001(__db_put_msg arg1);
+
+ public abstract __db_re_delim_reply __DB_db_re_delim_4001(__db_re_delim_msg arg1);
+
+ public abstract __db_re_len_reply __DB_db_re_len_4001(__db_re_len_msg arg1);
+
+ public abstract __db_re_pad_reply __DB_db_re_pad_4001(__db_re_pad_msg arg1);
+
+ public abstract __db_remove_reply __DB_db_remove_4001(__db_remove_msg arg1);
+
+ public abstract __db_rename_reply __DB_db_rename_4001(__db_rename_msg arg1);
+
+ public abstract __db_stat_reply __DB_db_stat_4001(__db_stat_msg arg1);
+
+ public abstract __db_sync_reply __DB_db_sync_4001(__db_sync_msg arg1);
+
+ public abstract __db_truncate_reply __DB_db_truncate_4001(__db_truncate_msg arg1);
+
+ public abstract __db_cursor_reply __DB_db_cursor_4001(__db_cursor_msg arg1);
+
+ public abstract __db_join_reply __DB_db_join_4001(__db_join_msg arg1);
+
+ public abstract __dbc_close_reply __DB_dbc_close_4001(__dbc_close_msg arg1);
+
+ public abstract __dbc_count_reply __DB_dbc_count_4001(__dbc_count_msg arg1);
+
+ public abstract __dbc_del_reply __DB_dbc_del_4001(__dbc_del_msg arg1);
+
+ public abstract __dbc_dup_reply __DB_dbc_dup_4001(__dbc_dup_msg arg1);
+
+ public abstract __dbc_get_reply __DB_dbc_get_4001(__dbc_get_msg arg1);
+
+ public abstract __dbc_pget_reply __DB_dbc_pget_4001(__dbc_pget_msg arg1);
+
+ public abstract __dbc_put_reply __DB_dbc_put_4001(__dbc_put_msg arg1);
+
+}
+// End of DbServerStub.java
diff --git a/bdb/rpc_server/java/gen/__db_associate_msg.java b/bdb/rpc_server/java/gen/__db_associate_msg.java
new file mode 100644
index 00000000000..8977303b99a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_associate_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 4/25/02 11:01 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int sdbpcl_id;
+ public int flags;
+
+ public __db_associate_msg() {
+ }
+
+ public __db_associate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(sdbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ sdbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_associate_reply.java b/bdb/rpc_server/java/gen/__db_associate_reply.java
new file mode 100644
index 00000000000..476d0868b33
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_associate_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_associate_reply implements XdrAble {
+ public int status;
+
+ public __db_associate_reply() {
+ }
+
+ public __db_associate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_associate_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java b/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
new file mode 100644
index 00000000000..007ce16a974
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_bt_maxkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int maxkey;
+
+ public __db_bt_maxkey_msg() {
+ }
+
+ public __db_bt_maxkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(maxkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ maxkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java b/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
new file mode 100644
index 00000000000..855573271b3
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_bt_maxkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_maxkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_maxkey_reply() {
+ }
+
+ public __db_bt_maxkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_maxkey_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java b/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
new file mode 100644
index 00000000000..c86ec382456
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_bt_minkey_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_msg implements XdrAble {
+ public int dbpcl_id;
+ public int minkey;
+
+ public __db_bt_minkey_msg() {
+ }
+
+ public __db_bt_minkey_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(minkey);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ minkey = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java b/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
new file mode 100644
index 00000000000..4d944b6bf33
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_bt_minkey_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_bt_minkey_reply implements XdrAble {
+ public int status;
+
+ public __db_bt_minkey_reply() {
+ }
+
+ public __db_bt_minkey_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_bt_minkey_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_close_msg.java b/bdb/rpc_server/java/gen/__db_close_msg.java
new file mode 100644
index 00000000000..ce8d213701b
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_close_msg() {
+ }
+
+ public __db_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_close_reply.java b/bdb/rpc_server/java/gen/__db_close_reply.java
new file mode 100644
index 00000000000..a9380e9c053
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_close_reply implements XdrAble {
+ public int status;
+
+ public __db_close_reply() {
+ }
+
+ public __db_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_create_msg.java b/bdb/rpc_server/java/gen/__db_create_msg.java
new file mode 100644
index 00000000000..d21ca50f807
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_create_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __db_create_msg() {
+ }
+
+ public __db_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_create_reply.java b/bdb/rpc_server/java/gen/__db_create_reply.java
new file mode 100644
index 00000000000..e3dcbbab14e
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_create_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+
+ public __db_create_reply() {
+ }
+
+ public __db_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_create_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_cursor_msg.java b/bdb/rpc_server/java/gen/__db_cursor_msg.java
new file mode 100644
index 00000000000..60e09db6ebb
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_cursor_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_cursor_msg() {
+ }
+
+ public __db_cursor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_cursor_reply.java b/bdb/rpc_server/java/gen/__db_cursor_reply.java
new file mode 100644
index 00000000000..bafd2817c67
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_cursor_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_cursor_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_cursor_reply() {
+ }
+
+ public __db_cursor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_cursor_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_del_msg.java b/bdb/rpc_server/java/gen/__db_del_msg.java
new file mode 100644
index 00000000000..fdf47907dd6
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_del_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_del_msg() {
+ }
+
+ public __db_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_del_reply.java b/bdb/rpc_server/java/gen/__db_del_reply.java
new file mode 100644
index 00000000000..8a55445944f
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_del_reply implements XdrAble {
+ public int status;
+
+ public __db_del_reply() {
+ }
+
+ public __db_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_del_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_encrypt_msg.java b/bdb/rpc_server/java/gen/__db_encrypt_msg.java
new file mode 100644
index 00000000000..46d9f8ee7e8
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_msg implements XdrAble {
+ public int dbpcl_id;
+ public String passwd;
+ public int flags;
+
+ public __db_encrypt_msg() {
+ }
+
+ public __db_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_encrypt_reply.java b/bdb/rpc_server/java/gen/__db_encrypt_reply.java
new file mode 100644
index 00000000000..a97cc98c90b
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __db_encrypt_reply() {
+ }
+
+ public __db_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_encrypt_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_extentsize_msg.java b/bdb/rpc_server/java/gen/__db_extentsize_msg.java
new file mode 100644
index 00000000000..41a51cff9c4
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_extentsize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int extentsize;
+
+ public __db_extentsize_msg() {
+ }
+
+ public __db_extentsize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(extentsize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ extentsize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_extentsize_reply.java b/bdb/rpc_server/java/gen/__db_extentsize_reply.java
new file mode 100644
index 00000000000..409625486c7
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_extentsize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_extentsize_reply implements XdrAble {
+ public int status;
+
+ public __db_extentsize_reply() {
+ }
+
+ public __db_extentsize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_extentsize_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_flags_msg.java b/bdb/rpc_server/java/gen/__db_flags_msg.java
new file mode 100644
index 00000000000..d8752e2e4dd
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_flags_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_flags_msg() {
+ }
+
+ public __db_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_flags_reply.java b/bdb/rpc_server/java/gen/__db_flags_reply.java
new file mode 100644
index 00000000000..c4ec253db83
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_flags_reply implements XdrAble {
+ public int status;
+
+ public __db_flags_reply() {
+ }
+
+ public __db_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_flags_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_get_msg.java b/bdb/rpc_server/java/gen/__db_get_msg.java
new file mode 100644
index 00000000000..3dfe8e9d86e
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_get_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_get_msg() {
+ }
+
+ public __db_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_get_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_get_reply.java b/bdb/rpc_server/java/gen/__db_get_reply.java
new file mode 100644
index 00000000000..64ce525728a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __db_get_reply() {
+ }
+
+ public __db_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_get_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java b/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
new file mode 100644
index 00000000000..8d2ed1b1c0b
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_h_ffactor_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_msg implements XdrAble {
+ public int dbpcl_id;
+ public int ffactor;
+
+ public __db_h_ffactor_msg() {
+ }
+
+ public __db_h_ffactor_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(ffactor);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ ffactor = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java b/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
new file mode 100644
index 00000000000..1885ec50240
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_h_ffactor_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_ffactor_reply implements XdrAble {
+ public int status;
+
+ public __db_h_ffactor_reply() {
+ }
+
+ public __db_h_ffactor_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_ffactor_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_h_nelem_msg.java b/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
new file mode 100644
index 00000000000..7d084351755
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_h_nelem_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_msg implements XdrAble {
+ public int dbpcl_id;
+ public int nelem;
+
+ public __db_h_nelem_msg() {
+ }
+
+ public __db_h_nelem_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(nelem);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ nelem = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_h_nelem_reply.java b/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
new file mode 100644
index 00000000000..20c5c774e69
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_h_nelem_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_h_nelem_reply implements XdrAble {
+ public int status;
+
+ public __db_h_nelem_reply() {
+ }
+
+ public __db_h_nelem_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_h_nelem_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_join_msg.java b/bdb/rpc_server/java/gen/__db_join_msg.java
new file mode 100644
index 00000000000..88c72dbd6ba
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_join_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_msg implements XdrAble {
+ public int dbpcl_id;
+ public int [] curs;
+ public int flags;
+
+ public __db_join_msg() {
+ }
+
+ public __db_join_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeIntVector(curs);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ curs = xdr.xdrDecodeIntVector();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_join_reply.java b/bdb/rpc_server/java/gen/__db_join_reply.java
new file mode 100644
index 00000000000..80980e23d6c
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_join_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_join_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __db_join_reply() {
+ }
+
+ public __db_join_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_join_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_key_range_msg.java b/bdb/rpc_server/java/gen/__db_key_range_msg.java
new file mode 100644
index 00000000000..233077e0964
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_key_range_msg.java
@@ -0,0 +1,53 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int flags;
+
+ public __db_key_range_msg() {
+ }
+
+ public __db_key_range_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_key_range_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_key_range_reply.java b/bdb/rpc_server/java/gen/__db_key_range_reply.java
new file mode 100644
index 00000000000..09244c13d1d
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_key_range_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_key_range_reply implements XdrAble {
+ public int status;
+ public double less;
+ public double equal;
+ public double greater;
+
+ public __db_key_range_reply() {
+ }
+
+ public __db_key_range_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDouble(less);
+ xdr.xdrEncodeDouble(equal);
+ xdr.xdrEncodeDouble(greater);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ less = xdr.xdrDecodeDouble();
+ equal = xdr.xdrDecodeDouble();
+ greater = xdr.xdrDecodeDouble();
+ }
+
+}
+// End of __db_key_range_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_lorder_msg.java b/bdb/rpc_server/java/gen/__db_lorder_msg.java
new file mode 100644
index 00000000000..3399ad8daf0
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_lorder_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_msg implements XdrAble {
+ public int dbpcl_id;
+ public int lorder;
+
+ public __db_lorder_msg() {
+ }
+
+ public __db_lorder_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_lorder_reply.java b/bdb/rpc_server/java/gen/__db_lorder_reply.java
new file mode 100644
index 00000000000..cdcda4d4f43
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_lorder_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_lorder_reply implements XdrAble {
+ public int status;
+
+ public __db_lorder_reply() {
+ }
+
+ public __db_lorder_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_lorder_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_open_msg.java b/bdb/rpc_server/java/gen/__db_open_msg.java
new file mode 100644
index 00000000000..14dbd9e3b0c
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_open_msg.java
@@ -0,0 +1,50 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int type;
+ public int flags;
+ public int mode;
+
+ public __db_open_msg() {
+ }
+
+ public __db_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ type = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_open_reply.java b/bdb/rpc_server/java/gen/__db_open_reply.java
new file mode 100644
index 00000000000..d90c3754c2f
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_open_reply.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_open_reply implements XdrAble {
+ public int status;
+ public int dbcl_id;
+ public int type;
+ public int dbflags;
+ public int lorder;
+
+ public __db_open_reply() {
+ }
+
+ public __db_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcl_id);
+ xdr.xdrEncodeInt(type);
+ xdr.xdrEncodeInt(dbflags);
+ xdr.xdrEncodeInt(lorder);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcl_id = xdr.xdrDecodeInt();
+ type = xdr.xdrDecodeInt();
+ dbflags = xdr.xdrDecodeInt();
+ lorder = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_open_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_pagesize_msg.java b/bdb/rpc_server/java/gen/__db_pagesize_msg.java
new file mode 100644
index 00000000000..a452ea4e381
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_pagesize_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pagesize;
+
+ public __db_pagesize_msg() {
+ }
+
+ public __db_pagesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pagesize);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pagesize = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_pagesize_reply.java b/bdb/rpc_server/java/gen/__db_pagesize_reply.java
new file mode 100644
index 00000000000..830b2078b34
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_pagesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pagesize_reply implements XdrAble {
+ public int status;
+
+ public __db_pagesize_reply() {
+ }
+
+ public __db_pagesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pagesize_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_pget_msg.java b/bdb/rpc_server/java/gen/__db_pget_msg.java
new file mode 100644
index 00000000000..11d27ca9e46
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_pget_msg.java
@@ -0,0 +1,83 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_pget_msg() {
+ }
+
+ public __db_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_pget_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_pget_reply.java b/bdb/rpc_server/java/gen/__db_pget_reply.java
new file mode 100644
index 00000000000..86c9c2111b9
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __db_pget_reply() {
+ }
+
+ public __db_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_pget_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_put_msg.java b/bdb/rpc_server/java/gen/__db_put_msg.java
new file mode 100644
index 00000000000..b6159cff3a8
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_put_msg.java
@@ -0,0 +1,68 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __db_put_msg() {
+ }
+
+ public __db_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_put_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_put_reply.java b/bdb/rpc_server/java/gen/__db_put_reply.java
new file mode 100644
index 00000000000..fc89ae1c3bd
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __db_put_reply() {
+ }
+
+ public __db_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __db_put_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_delim_msg.java b/bdb/rpc_server/java/gen/__db_re_delim_msg.java
new file mode 100644
index 00000000000..c386bddd256
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_delim_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_msg implements XdrAble {
+ public int dbpcl_id;
+ public int delim;
+
+ public __db_re_delim_msg() {
+ }
+
+ public __db_re_delim_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(delim);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ delim = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_delim_reply.java b/bdb/rpc_server/java/gen/__db_re_delim_reply.java
new file mode 100644
index 00000000000..aa8a797f53d
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_delim_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_delim_reply implements XdrAble {
+ public int status;
+
+ public __db_re_delim_reply() {
+ }
+
+ public __db_re_delim_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_delim_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_len_msg.java b/bdb/rpc_server/java/gen/__db_re_len_msg.java
new file mode 100644
index 00000000000..664de5c899c
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_len_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_msg implements XdrAble {
+ public int dbpcl_id;
+ public int len;
+
+ public __db_re_len_msg() {
+ }
+
+ public __db_re_len_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(len);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ len = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_len_reply.java b/bdb/rpc_server/java/gen/__db_re_len_reply.java
new file mode 100644
index 00000000000..dda27c8c123
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_len_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_len_reply implements XdrAble {
+ public int status;
+
+ public __db_re_len_reply() {
+ }
+
+ public __db_re_len_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_len_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_re_pad_msg.java b/bdb/rpc_server/java/gen/__db_re_pad_msg.java
new file mode 100644
index 00000000000..2c1290b6e74
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_pad_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_msg implements XdrAble {
+ public int dbpcl_id;
+ public int pad;
+
+ public __db_re_pad_msg() {
+ }
+
+ public __db_re_pad_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(pad);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ pad = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_re_pad_reply.java b/bdb/rpc_server/java/gen/__db_re_pad_reply.java
new file mode 100644
index 00000000000..f0aaa9a3a70
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_re_pad_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_re_pad_reply implements XdrAble {
+ public int status;
+
+ public __db_re_pad_reply() {
+ }
+
+ public __db_re_pad_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_re_pad_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_remove_msg.java b/bdb/rpc_server/java/gen/__db_remove_msg.java
new file mode 100644
index 00000000000..dfa9066a7ec
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_remove_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __db_remove_msg() {
+ }
+
+ public __db_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_remove_reply.java b/bdb/rpc_server/java/gen/__db_remove_reply.java
new file mode 100644
index 00000000000..a2b86c04985
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_remove_reply implements XdrAble {
+ public int status;
+
+ public __db_remove_reply() {
+ }
+
+ public __db_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_remove_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_rename_msg.java b/bdb/rpc_server/java/gen/__db_rename_msg.java
new file mode 100644
index 00000000000..12b434e3375
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_rename_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_msg implements XdrAble {
+ public int dbpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __db_rename_msg() {
+ }
+
+ public __db_rename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_rename_reply.java b/bdb/rpc_server/java/gen/__db_rename_reply.java
new file mode 100644
index 00000000000..4e4a22be570
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_rename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_rename_reply implements XdrAble {
+ public int status;
+
+ public __db_rename_reply() {
+ }
+
+ public __db_rename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_rename_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_stat_msg.java b/bdb/rpc_server/java/gen/__db_stat_msg.java
new file mode 100644
index 00000000000..af536b5f707
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_stat_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_stat_msg() {
+ }
+
+ public __db_stat_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_stat_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_stat_reply.java b/bdb/rpc_server/java/gen/__db_stat_reply.java
new file mode 100644
index 00000000000..8df1460149a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_stat_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_stat_reply implements XdrAble {
+ public int status;
+ public int [] stats;
+
+ public __db_stat_reply() {
+ }
+
+ public __db_stat_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(stats);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ stats = xdr.xdrDecodeIntVector();
+ }
+
+}
+// End of __db_stat_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_sync_msg.java b/bdb/rpc_server/java/gen/__db_sync_msg.java
new file mode 100644
index 00000000000..c6594670fc6
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_sync_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_msg implements XdrAble {
+ public int dbpcl_id;
+ public int flags;
+
+ public __db_sync_msg() {
+ }
+
+ public __db_sync_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_sync_reply.java b/bdb/rpc_server/java/gen/__db_sync_reply.java
new file mode 100644
index 00000000000..d0a8bc8b196
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_sync_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_sync_reply implements XdrAble {
+ public int status;
+
+ public __db_sync_reply() {
+ }
+
+ public __db_sync_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_sync_reply.java
diff --git a/bdb/rpc_server/java/gen/__db_truncate_msg.java b/bdb/rpc_server/java/gen/__db_truncate_msg.java
new file mode 100644
index 00000000000..38810d65660
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_truncate_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_msg implements XdrAble {
+ public int dbpcl_id;
+ public int txnpcl_id;
+ public int flags;
+
+ public __db_truncate_msg() {
+ }
+
+ public __db_truncate_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbpcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbpcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_msg.java
diff --git a/bdb/rpc_server/java/gen/__db_truncate_reply.java b/bdb/rpc_server/java/gen/__db_truncate_reply.java
new file mode 100644
index 00000000000..c4f68869007
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__db_truncate_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __db_truncate_reply implements XdrAble {
+ public int status;
+ public int count;
+
+ public __db_truncate_reply() {
+ }
+
+ public __db_truncate_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(count);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __db_truncate_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_close_msg.java b/bdb/rpc_server/java/gen/__dbc_close_msg.java
new file mode 100644
index 00000000000..eb1ca7f7e17
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_close_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_msg implements XdrAble {
+ public int dbccl_id;
+
+ public __dbc_close_msg() {
+ }
+
+ public __dbc_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_close_reply.java b/bdb/rpc_server/java/gen/__dbc_close_reply.java
new file mode 100644
index 00000000000..47459aace36
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_close_reply implements XdrAble {
+ public int status;
+
+ public __dbc_close_reply() {
+ }
+
+ public __dbc_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_count_msg.java b/bdb/rpc_server/java/gen/__dbc_count_msg.java
new file mode 100644
index 00000000000..5f554e18a1b
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_count_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_count_msg() {
+ }
+
+ public __dbc_count_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_count_reply.java b/bdb/rpc_server/java/gen/__dbc_count_reply.java
new file mode 100644
index 00000000000..4daecdd2296
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_count_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_count_reply implements XdrAble {
+ public int status;
+ public int dupcount;
+
+ public __dbc_count_reply() {
+ }
+
+ public __dbc_count_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dupcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dupcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_count_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_del_msg.java b/bdb/rpc_server/java/gen/__dbc_del_msg.java
new file mode 100644
index 00000000000..bc4bd05f573
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_del_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_del_msg() {
+ }
+
+ public __dbc_del_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_del_reply.java b/bdb/rpc_server/java/gen/__dbc_del_reply.java
new file mode 100644
index 00000000000..e55ac9ffaf6
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_del_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_del_reply implements XdrAble {
+ public int status;
+
+ public __dbc_del_reply() {
+ }
+
+ public __dbc_del_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_del_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_dup_msg.java b/bdb/rpc_server/java/gen/__dbc_dup_msg.java
new file mode 100644
index 00000000000..9a3894e6158
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_dup_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_msg implements XdrAble {
+ public int dbccl_id;
+ public int flags;
+
+ public __dbc_dup_msg() {
+ }
+
+ public __dbc_dup_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_dup_reply.java b/bdb/rpc_server/java/gen/__dbc_dup_reply.java
new file mode 100644
index 00000000000..6b942f1a61a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_dup_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_dup_reply implements XdrAble {
+ public int status;
+ public int dbcidcl_id;
+
+ public __dbc_dup_reply() {
+ }
+
+ public __dbc_dup_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(dbcidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ dbcidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_dup_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_get_msg.java b/bdb/rpc_server/java/gen/__dbc_get_msg.java
new file mode 100644
index 00000000000..672ace43fdd
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_get_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_get_msg() {
+ }
+
+ public __dbc_get_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_get_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_get_reply.java b/bdb/rpc_server/java/gen/__dbc_get_reply.java
new file mode 100644
index 00000000000..8671fec6335
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_get_reply.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_get_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+ public byte [] datadata;
+
+ public __dbc_get_reply() {
+ }
+
+ public __dbc_get_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_get_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_pget_msg.java b/bdb/rpc_server/java/gen/__dbc_pget_msg.java
new file mode 100644
index 00000000000..8ca3c6171a1
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_pget_msg.java
@@ -0,0 +1,80 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_msg implements XdrAble {
+ public int dbccl_id;
+ public int skeydlen;
+ public int skeydoff;
+ public int skeyulen;
+ public int skeyflags;
+ public byte [] skeydata;
+ public int pkeydlen;
+ public int pkeydoff;
+ public int pkeyulen;
+ public int pkeyflags;
+ public byte [] pkeydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_pget_msg() {
+ }
+
+ public __dbc_pget_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(skeydlen);
+ xdr.xdrEncodeInt(skeydoff);
+ xdr.xdrEncodeInt(skeyulen);
+ xdr.xdrEncodeInt(skeyflags);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeInt(pkeydlen);
+ xdr.xdrEncodeInt(pkeydoff);
+ xdr.xdrEncodeInt(pkeyulen);
+ xdr.xdrEncodeInt(pkeyflags);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ skeydlen = xdr.xdrDecodeInt();
+ skeydoff = xdr.xdrDecodeInt();
+ skeyulen = xdr.xdrDecodeInt();
+ skeyflags = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydlen = xdr.xdrDecodeInt();
+ pkeydoff = xdr.xdrDecodeInt();
+ pkeyulen = xdr.xdrDecodeInt();
+ pkeyflags = xdr.xdrDecodeInt();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_pget_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_pget_reply.java b/bdb/rpc_server/java/gen/__dbc_pget_reply.java
new file mode 100644
index 00000000000..16cc795878d
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_pget_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_pget_reply implements XdrAble {
+ public int status;
+ public byte [] skeydata;
+ public byte [] pkeydata;
+ public byte [] datadata;
+
+ public __dbc_pget_reply() {
+ }
+
+ public __dbc_pget_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(skeydata);
+ xdr.xdrEncodeDynamicOpaque(pkeydata);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ skeydata = xdr.xdrDecodeDynamicOpaque();
+ pkeydata = xdr.xdrDecodeDynamicOpaque();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_pget_reply.java
diff --git a/bdb/rpc_server/java/gen/__dbc_put_msg.java b/bdb/rpc_server/java/gen/__dbc_put_msg.java
new file mode 100644
index 00000000000..98d12423dc5
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_put_msg.java
@@ -0,0 +1,65 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_msg implements XdrAble {
+ public int dbccl_id;
+ public int keydlen;
+ public int keydoff;
+ public int keyulen;
+ public int keyflags;
+ public byte [] keydata;
+ public int datadlen;
+ public int datadoff;
+ public int dataulen;
+ public int dataflags;
+ public byte [] datadata;
+ public int flags;
+
+ public __dbc_put_msg() {
+ }
+
+ public __dbc_put_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbccl_id);
+ xdr.xdrEncodeInt(keydlen);
+ xdr.xdrEncodeInt(keydoff);
+ xdr.xdrEncodeInt(keyulen);
+ xdr.xdrEncodeInt(keyflags);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ xdr.xdrEncodeInt(datadlen);
+ xdr.xdrEncodeInt(datadoff);
+ xdr.xdrEncodeInt(dataulen);
+ xdr.xdrEncodeInt(dataflags);
+ xdr.xdrEncodeDynamicOpaque(datadata);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbccl_id = xdr.xdrDecodeInt();
+ keydlen = xdr.xdrDecodeInt();
+ keydoff = xdr.xdrDecodeInt();
+ keyulen = xdr.xdrDecodeInt();
+ keyflags = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ datadlen = xdr.xdrDecodeInt();
+ datadoff = xdr.xdrDecodeInt();
+ dataulen = xdr.xdrDecodeInt();
+ dataflags = xdr.xdrDecodeInt();
+ datadata = xdr.xdrDecodeDynamicOpaque();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __dbc_put_msg.java
diff --git a/bdb/rpc_server/java/gen/__dbc_put_reply.java b/bdb/rpc_server/java/gen/__dbc_put_reply.java
new file mode 100644
index 00000000000..385f9f783fb
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__dbc_put_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __dbc_put_reply implements XdrAble {
+ public int status;
+ public byte [] keydata;
+
+ public __dbc_put_reply() {
+ }
+
+ public __dbc_put_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeDynamicOpaque(keydata);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ keydata = xdr.xdrDecodeDynamicOpaque();
+ }
+
+}
+// End of __dbc_put_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_cachesize_msg.java b/bdb/rpc_server/java/gen/__env_cachesize_msg.java
new file mode 100644
index 00000000000..d1fce1ffa35
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_cachesize_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int gbytes;
+ public int bytes;
+ public int ncache;
+
+ public __env_cachesize_msg() {
+ }
+
+ public __env_cachesize_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(gbytes);
+ xdr.xdrEncodeInt(bytes);
+ xdr.xdrEncodeInt(ncache);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ gbytes = xdr.xdrDecodeInt();
+ bytes = xdr.xdrDecodeInt();
+ ncache = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_cachesize_reply.java b/bdb/rpc_server/java/gen/__env_cachesize_reply.java
new file mode 100644
index 00000000000..193f8355d71
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_cachesize_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_cachesize_reply implements XdrAble {
+ public int status;
+
+ public __env_cachesize_reply() {
+ }
+
+ public __env_cachesize_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_cachesize_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_close_msg.java b/bdb/rpc_server/java/gen/__env_close_msg.java
new file mode 100644
index 00000000000..5e657bacfa5
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_close_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+
+ public __env_close_msg() {
+ }
+
+ public __env_close_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_close_reply.java b/bdb/rpc_server/java/gen/__env_close_reply.java
new file mode 100644
index 00000000000..11e61f7c8c3
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_close_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_close_reply implements XdrAble {
+ public int status;
+
+ public __env_close_reply() {
+ }
+
+ public __env_close_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_close_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_create_msg.java b/bdb/rpc_server/java/gen/__env_create_msg.java
new file mode 100644
index 00000000000..dbe546ae23a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_create_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_msg implements XdrAble {
+ public int timeout;
+
+ public __env_create_msg() {
+ }
+
+ public __env_create_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(timeout);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ timeout = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_create_reply.java b/bdb/rpc_server/java/gen/__env_create_reply.java
new file mode 100644
index 00000000000..5427fc4bc1e
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_create_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_create_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_create_reply() {
+ }
+
+ public __env_create_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_create_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_dbremove_msg.java b/bdb/rpc_server/java/gen/__env_dbremove_msg.java
new file mode 100644
index 00000000000..9730a92c590
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_dbremove_msg.java
@@ -0,0 +1,44 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public int flags;
+
+ public __env_dbremove_msg() {
+ }
+
+ public __env_dbremove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_dbremove_reply.java b/bdb/rpc_server/java/gen/__env_dbremove_reply.java
new file mode 100644
index 00000000000..75cc5a940cc
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_dbremove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbremove_reply implements XdrAble {
+ public int status;
+
+ public __env_dbremove_reply() {
+ }
+
+ public __env_dbremove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbremove_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_dbrename_msg.java b/bdb/rpc_server/java/gen/__env_dbrename_msg.java
new file mode 100644
index 00000000000..0bbda262b64
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_dbrename_msg.java
@@ -0,0 +1,47 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int txnpcl_id;
+ public String name;
+ public String subdb;
+ public String newname;
+ public int flags;
+
+ public __env_dbrename_msg() {
+ }
+
+ public __env_dbrename_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeString(name);
+ xdr.xdrEncodeString(subdb);
+ xdr.xdrEncodeString(newname);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ txnpcl_id = xdr.xdrDecodeInt();
+ name = xdr.xdrDecodeString();
+ subdb = xdr.xdrDecodeString();
+ newname = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_dbrename_reply.java b/bdb/rpc_server/java/gen/__env_dbrename_reply.java
new file mode 100644
index 00000000000..0cc8882305d
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_dbrename_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_dbrename_reply implements XdrAble {
+ public int status;
+
+ public __env_dbrename_reply() {
+ }
+
+ public __env_dbrename_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_dbrename_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_encrypt_msg.java b/bdb/rpc_server/java/gen/__env_encrypt_msg.java
new file mode 100644
index 00000000000..84e9a36d372
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_encrypt_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String passwd;
+ public int flags;
+
+ public __env_encrypt_msg() {
+ }
+
+ public __env_encrypt_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(passwd);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ passwd = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_encrypt_reply.java b/bdb/rpc_server/java/gen/__env_encrypt_reply.java
new file mode 100644
index 00000000000..e202a3089d0
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_encrypt_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 2/13/02 1:05 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_encrypt_reply implements XdrAble {
+ public int status;
+
+ public __env_encrypt_reply() {
+ }
+
+ public __env_encrypt_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_encrypt_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_flags_msg.java b/bdb/rpc_server/java/gen/__env_flags_msg.java
new file mode 100644
index 00000000000..25cd5f85f6d
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_flags_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int flags;
+ public int onoff;
+
+ public __env_flags_msg() {
+ }
+
+ public __env_flags_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(onoff);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ onoff = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_flags_reply.java b/bdb/rpc_server/java/gen/__env_flags_reply.java
new file mode 100644
index 00000000000..d348a9224ea
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_flags_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_flags_reply implements XdrAble {
+ public int status;
+
+ public __env_flags_reply() {
+ }
+
+ public __env_flags_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_flags_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_open_msg.java b/bdb/rpc_server/java/gen/__env_open_msg.java
new file mode 100644
index 00000000000..e4649b41f9e
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_open_msg.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+ public int mode;
+
+ public __env_open_msg() {
+ }
+
+ public __env_open_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ xdr.xdrEncodeInt(mode);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ mode = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_open_reply.java b/bdb/rpc_server/java/gen/__env_open_reply.java
new file mode 100644
index 00000000000..1994afb4cf2
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_open_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_open_reply implements XdrAble {
+ public int status;
+ public int envcl_id;
+
+ public __env_open_reply() {
+ }
+
+ public __env_open_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(envcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ envcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_open_reply.java
diff --git a/bdb/rpc_server/java/gen/__env_remove_msg.java b/bdb/rpc_server/java/gen/__env_remove_msg.java
new file mode 100644
index 00000000000..b32d758f0f5
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_remove_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_msg implements XdrAble {
+ public int dbenvcl_id;
+ public String home;
+ public int flags;
+
+ public __env_remove_msg() {
+ }
+
+ public __env_remove_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeString(home);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ home = xdr.xdrDecodeString();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_msg.java
diff --git a/bdb/rpc_server/java/gen/__env_remove_reply.java b/bdb/rpc_server/java/gen/__env_remove_reply.java
new file mode 100644
index 00000000000..19e4d52f662
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__env_remove_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __env_remove_reply implements XdrAble {
+ public int status;
+
+ public __env_remove_reply() {
+ }
+
+ public __env_remove_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __env_remove_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_abort_msg.java b/bdb/rpc_server/java/gen/__txn_abort_msg.java
new file mode 100644
index 00000000000..ff44c534e46
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_abort_msg.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_msg implements XdrAble {
+ public int txnpcl_id;
+
+ public __txn_abort_msg() {
+ }
+
+ public __txn_abort_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_abort_reply.java b/bdb/rpc_server/java/gen/__txn_abort_reply.java
new file mode 100644
index 00000000000..58f275c1a8f
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_abort_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_abort_reply implements XdrAble {
+ public int status;
+
+ public __txn_abort_reply() {
+ }
+
+ public __txn_abort_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_abort_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_begin_msg.java b/bdb/rpc_server/java/gen/__txn_begin_msg.java
new file mode 100644
index 00000000000..877031e8d3a
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_begin_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int parentcl_id;
+ public int flags;
+
+ public __txn_begin_msg() {
+ }
+
+ public __txn_begin_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(parentcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ parentcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_begin_reply.java b/bdb/rpc_server/java/gen/__txn_begin_reply.java
new file mode 100644
index 00000000000..65a0c4016c2
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_begin_reply.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_begin_reply implements XdrAble {
+ public int status;
+ public int txnidcl_id;
+
+ public __txn_begin_reply() {
+ }
+
+ public __txn_begin_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeInt(txnidcl_id);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txnidcl_id = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_begin_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_commit_msg.java b/bdb/rpc_server/java/gen/__txn_commit_msg.java
new file mode 100644
index 00000000000..4b988d0c282
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_commit_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_commit_msg() {
+ }
+
+ public __txn_commit_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_commit_reply.java b/bdb/rpc_server/java/gen/__txn_commit_reply.java
new file mode 100644
index 00000000000..b26937b82dd
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_commit_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_commit_reply implements XdrAble {
+ public int status;
+
+ public __txn_commit_reply() {
+ }
+
+ public __txn_commit_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_commit_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_discard_msg.java b/bdb/rpc_server/java/gen/__txn_discard_msg.java
new file mode 100644
index 00000000000..87f5d4f77a7
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_discard_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_msg implements XdrAble {
+ public int txnpcl_id;
+ public int flags;
+
+ public __txn_discard_msg() {
+ }
+
+ public __txn_discard_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_discard_reply.java b/bdb/rpc_server/java/gen/__txn_discard_reply.java
new file mode 100644
index 00000000000..9792211afcc
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_discard_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_discard_reply implements XdrAble {
+ public int status;
+
+ public __txn_discard_reply() {
+ }
+
+ public __txn_discard_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_discard_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_prepare_msg.java b/bdb/rpc_server/java/gen/__txn_prepare_msg.java
new file mode 100644
index 00000000000..6e09f2c7771
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_prepare_msg.java
@@ -0,0 +1,35 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_msg implements XdrAble {
+ public int txnpcl_id;
+ public byte [] gid;
+
+ public __txn_prepare_msg() {
+ }
+
+ public __txn_prepare_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(txnpcl_id);
+ xdr.xdrEncodeOpaque(gid, 128);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ txnpcl_id = xdr.xdrDecodeInt();
+ gid = xdr.xdrDecodeOpaque(128);
+ }
+
+}
+// End of __txn_prepare_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_prepare_reply.java b/bdb/rpc_server/java/gen/__txn_prepare_reply.java
new file mode 100644
index 00000000000..d7590117952
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_prepare_reply.java
@@ -0,0 +1,32 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_prepare_reply implements XdrAble {
+ public int status;
+
+ public __txn_prepare_reply() {
+ }
+
+ public __txn_prepare_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_prepare_reply.java
diff --git a/bdb/rpc_server/java/gen/__txn_recover_msg.java b/bdb/rpc_server/java/gen/__txn_recover_msg.java
new file mode 100644
index 00000000000..65153334403
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_recover_msg.java
@@ -0,0 +1,38 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_msg implements XdrAble {
+ public int dbenvcl_id;
+ public int count;
+ public int flags;
+
+ public __txn_recover_msg() {
+ }
+
+ public __txn_recover_msg(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(dbenvcl_id);
+ xdr.xdrEncodeInt(count);
+ xdr.xdrEncodeInt(flags);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ dbenvcl_id = xdr.xdrDecodeInt();
+ count = xdr.xdrDecodeInt();
+ flags = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_msg.java
diff --git a/bdb/rpc_server/java/gen/__txn_recover_reply.java b/bdb/rpc_server/java/gen/__txn_recover_reply.java
new file mode 100644
index 00000000000..0161ec949da
--- /dev/null
+++ b/bdb/rpc_server/java/gen/__txn_recover_reply.java
@@ -0,0 +1,41 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 12/18/01 7:23 PM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+public class __txn_recover_reply implements XdrAble {
+ public int status;
+ public int [] txn;
+ public byte [] gid;
+ public int retcount;
+
+ public __txn_recover_reply() {
+ }
+
+ public __txn_recover_reply(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ xdrDecode(xdr);
+ }
+
+ public void xdrEncode(XdrEncodingStream xdr)
+ throws OncRpcException, IOException {
+ xdr.xdrEncodeInt(status);
+ xdr.xdrEncodeIntVector(txn);
+ xdr.xdrEncodeDynamicOpaque(gid);
+ xdr.xdrEncodeInt(retcount);
+ }
+
+ public void xdrDecode(XdrDecodingStream xdr)
+ throws OncRpcException, IOException {
+ status = xdr.xdrDecodeInt();
+ txn = xdr.xdrDecodeIntVector();
+ gid = xdr.xdrDecodeDynamicOpaque();
+ retcount = xdr.xdrDecodeInt();
+ }
+
+}
+// End of __txn_recover_reply.java
diff --git a/bdb/rpc_server/java/gen/db_server.java b/bdb/rpc_server/java/gen/db_server.java
new file mode 100644
index 00000000000..a14a77028a2
--- /dev/null
+++ b/bdb/rpc_server/java/gen/db_server.java
@@ -0,0 +1,67 @@
+/*
+ * Automatically generated by jrpcgen 0.95.1 on 3/19/02 10:30 AM
+ * jrpcgen is part of the "Remote Tea" ONC/RPC package for Java
+ * See http://acplt.org/ks/remotetea.html for details
+ */
+package com.sleepycat.db.rpcserver;
+import org.acplt.oncrpc.*;
+import java.io.IOException;
+
+/**
+ * A collection of constants used by the "db_server" ONC/RPC program.
+ */
+public interface db_server {
+ public static final int __DB_db_close_4001 = 19;
+ public static final int __DB_db_flags_4001 = 24;
+ public static final int __DB_dbc_dup_4001 = 47;
+ public static final int __DB_db_encrypt_4001 = 22;
+ public static final int __DB_env_dbrename_4001 = 5;
+ public static final int __DB_env_remove_4001 = 9;
+ public static final int __DB_dbc_pget_4001 = 49;
+ public static final int __DB_env_cachesize_4001 = 1;
+ public static final int __DB_db_lorder_4001 = 29;
+ public static final int __DB_db_key_range_4001 = 28;
+ public static final int __DB_db_bt_minkey_4001 = 18;
+ public static final int __DB_db_sync_4001 = 40;
+ public static final int __DB_dbc_close_4001 = 44;
+ public static final int __DB_db_join_4001 = 43;
+ public static final int __DB_db_pagesize_4001 = 31;
+ public static final int DB_RPC_SERVERVERS = 4001;
+ public static final int __DB_db_open_4001 = 30;
+ public static final int __DB_dbc_get_4001 = 48;
+ public static final int __DB_db_cursor_4001 = 42;
+ public static final int __DB_txn_commit_4001 = 12;
+ public static final int __DB_dbc_del_4001 = 46;
+ public static final int __DB_env_create_4001 = 3;
+ public static final int __DB_env_open_4001 = 8;
+ public static final int __DB_txn_prepare_4001 = 14;
+ public static final int __DB_db_pget_4001 = 32;
+ public static final int __DB_db_stat_4001 = 39;
+ public static final int __DB_db_h_nelem_4001 = 27;
+ public static final int __DB_db_remove_4001 = 37;
+ public static final int __DB_db_re_delim_4001 = 34;
+ public static final int __DB_db_re_pad_4001 = 36;
+ public static final int __DB_txn_abort_4001 = 10;
+ public static final int __DB_txn_recover_4001 = 15;
+ public static final int __DB_db_get_4001 = 25;
+ public static final int __DB_db_extentsize_4001 = 23;
+ public static final int DB_RPC_SERVERPROG = 351457;
+ public static final int __DB_dbc_put_4001 = 50;
+ public static final int __DB_db_truncate_4001 = 41;
+ public static final int __DB_db_del_4001 = 21;
+ public static final int __DB_db_bt_maxkey_4001 = 17;
+ public static final int __DB_env_dbremove_4001 = 4;
+ public static final int __DB_txn_discard_4001 = 13;
+ public static final int __DB_db_re_len_4001 = 35;
+ public static final int __DB_env_close_4001 = 2;
+ public static final int __DB_env_flags_4001 = 7;
+ public static final int __DB_db_rename_4001 = 38;
+ public static final int __DB_db_associate_4001 = 16;
+ public static final int __DB_txn_begin_4001 = 11;
+ public static final int __DB_env_encrypt_4001 = 6;
+ public static final int __DB_db_h_ffactor_4001 = 26;
+ public static final int __DB_db_put_4001 = 33;
+ public static final int __DB_db_create_4001 = 20;
+ public static final int __DB_dbc_count_4001 = 45;
+}
+// End of db_server.java
diff --git a/bdb/rpc_server/java/jrpcgen.jar b/bdb/rpc_server/java/jrpcgen.jar
new file mode 100644
index 00000000000..338825b848d
--- /dev/null
+++ b/bdb/rpc_server/java/jrpcgen.jar
Binary files differ
diff --git a/bdb/rpc_server/java/oncrpc.jar b/bdb/rpc_server/java/oncrpc.jar
new file mode 100644
index 00000000000..e0f5cfa6966
--- /dev/null
+++ b/bdb/rpc_server/java/oncrpc.jar
Binary files differ
diff --git a/bdb/rpc_server/java/s_jrpcgen b/bdb/rpc_server/java/s_jrpcgen
new file mode 100644
index 00000000000..fed8cbf56bb
--- /dev/null
+++ b/bdb/rpc_server/java/s_jrpcgen
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+java -jar jrpcgen.jar -d gen -noclient -nobackup -p com.sleepycat.db.rpcserver -s DbServerStub ../db_server.x
diff --git a/bdb/rpc_server/rpc.src b/bdb/rpc_server/rpc.src
index 5dd25205136..7afee49b066 100644
--- a/bdb/rpc_server/rpc.src
+++ b/bdb/rpc_server/rpc.src
@@ -1,17 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc.src,v 1.75 2002/07/18 02:57:19 margo Exp $
#
-# $Id: rpc.src,v 1.30 2000/12/20 21:53:05 ubell Exp $
# Syntax:
-# BEGIN function_name RPC # {CODE | NOCLNTCODE | RETCODE | NOFUNC}
+# BEGIN function_name {CODE | RETCODE | NOFUNC}
# CODE: generate XDR and client code, return status
# Used for functions that just return a status and nothing else.
# RETCODE:generate XDR and client code, call return function
# (generate template return function)
# Used for functions that returns data.
-# NOCLNTCODE: generate only XDR and server functions
-# Used for functions that are "different" on the client.
-# Primarily used for envcreate (which is called from
-# the dbenv->set_server method on the client side) and
-# dbcreate, which is called from non-generated code.
# NOFUNC: generate a client "unsupported function" with right args
# Used for unsupported functions.
#
@@ -19,9 +19,10 @@
# IGNORE: not passed to server
# STRING: string passed to server
# DBT: DBT arg passed to server
-# LIST: opaque list passed to server (NULL-terminated opaque list)
+# LIST: list passed to server (NULL-terminated list of something)
# INT: integer passed to server
# ID: cl_id from arg passed to server
+# GID: global id passed to server
# CONST: do not generate COMPQUIET (for NOFUNC only)
# FUNCPROT prototype
# FUNCARG functiontype
@@ -38,11 +39,11 @@
# All messages automatically return "status" and return that from
# the call to the function. RET's are additional things the server
# may return. RET is like ARG but does not need the IGNORE option.
-# RET {STRING | INT | DBT | LIST | ID} varname [STRING | INT | ID]
+# RET {STRING | INT | DBT | LIST | ID} varname [GID | INT | ID]
# STRING: string from server
# DBT: DBT arg from server
-# LIST: opaque list from server (NULL-terminated opaque list)
-# Must have list type of STRING, ID or INT specified
+# LIST: list from server (NULL-terminated list)
+# Must have list type of GID, ID or INT specified
# INT: integer from server
# ID: id from server stored in cl_id
# END function end.
@@ -50,122 +51,157 @@
#
# Environment functions
#
-BEGIN env_cachesize 1 CODE
+BEGIN env_alloc NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN set_app_dispatch NOFUNC
+ARG ID DB_ENV * dbenv
+FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)
+FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops))
+END
+BEGIN env_cachesize CODE
ARG ID DB_ENV * dbenv
ARG INT u_int32_t gbytes
ARG INT u_int32_t bytes
ARG INT int ncache
END
-BEGIN env_close 1 RETCODE
+BEGIN env_close RETCODE
ARG ID DB_ENV * dbenv
ARG INT u_int32_t flags
END
-BEGIN env_create 1 NOCLNTCODE
+BEGIN env_create RETCODE
+ARG IGNORE DB_ENV * dbenv
ARG INT long timeout
RET ID long env
END
-BEGIN set_data_dir 1 NOFUNC
+BEGIN set_data_dir NOFUNC
ARG ID DB_ENV * dbenv
ARG STRING const char * dir
END
-BEGIN env_set_feedback 1 NOFUNC
+BEGIN env_dbremove CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG INT u_int32_t flags
+END
+BEGIN env_dbrename CODE
+ARG ID DB_ENV * dbenv
+ARG ID DB_TXN * txnp
+ARG STRING const char * name
+ARG STRING const char * subdb
+ARG STRING const char * newname
+ARG INT u_int32_t flags
+END
+BEGIN env_encrypt CODE
+ARG ID DB_ENV * dbenv
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN env_set_feedback NOFUNC
ARG ID DB_ENV * dbenv
FUNCPROT void (*)(DB_ENV *, int, int)
FUNCARG void (*func0) __P((DB_ENV *, int, int))
END
-BEGIN env_flags 1 CODE
+BEGIN env_flags CODE
ARG ID DB_ENV * dbenv
ARG INT u_int32_t flags
ARG INT int onoff
END
-BEGIN set_lg_bsize 1 NOFUNC
+BEGIN set_lg_bsize NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t bsize
END
-BEGIN set_lg_dir 1 NOFUNC
+BEGIN set_lg_dir NOFUNC
ARG ID DB_ENV * dbenv
ARG STRING const char * dir
END
-BEGIN set_lg_max 1 NOFUNC
+BEGIN set_lg_max NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_lk_conflict 1 NOFUNC
+BEGIN set_lg_regionmax NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t max
+END
+BEGIN set_lk_conflict NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int8_t * conflicts
ARG INT int modes
END
-BEGIN set_lk_detect 1 NOFUNC
+BEGIN set_lk_detect NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t detect
END
-BEGIN set_lk_max 1 NOFUNC
+BEGIN set_lk_max NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_lk_max_locks 1 NOFUNC
+BEGIN set_lk_max_locks NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_lk_max_lockers 1 NOFUNC
+BEGIN set_lk_max_lockers NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_lk_max_objects 1 NOFUNC
+BEGIN set_lk_max_objects NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_mp_mmapsize 1 NOFUNC
+BEGIN set_mp_mmapsize NOFUNC
ARG ID DB_ENV * dbenv
ARG INT size_t mmapsize
END
-BEGIN set_mutex_locks 1 NOFUNC
-ARG ID DB_ENV * dbenv
-ARG INT int do_lock
-END
-BEGIN env_open 1 RETCODE
+BEGIN env_open RETCODE
ARG ID DB_ENV * dbenv
ARG STRING const char * home
ARG INT u_int32_t flags
ARG INT int mode
+RET ID long env
END
-BEGIN env_paniccall 1 NOFUNC
+BEGIN env_paniccall NOFUNC
ARG ID DB_ENV * dbenv
FUNCPROT void (*)(DB_ENV *, int)
FUNCARG void (*func0) __P((DB_ENV *, int))
END
-BEGIN set_recovery_init 1 NOFUNC
-ARG ID DB_ENV * dbenv
-FUNCPROT int (*)(DB_ENV *)
-FUNCARG int (*func0) __P((DB_ENV *))
-END
-BEGIN env_remove 1 RETCODE
+BEGIN env_remove RETCODE
ARG ID DB_ENV * dbenv
ARG STRING const char * home
ARG INT u_int32_t flags
END
-BEGIN set_shm_key 1 NOFUNC
+BEGIN set_shm_key NOFUNC
ARG ID DB_ENV * dbenv
ARG INT long shm_key
END
-BEGIN set_tmp_dir 1 NOFUNC
+BEGIN set_tas_spins NOFUNC
ARG ID DB_ENV * dbenv
-ARG STRING const char * dir
+ARG INT u_int32_t tas_spins
END
-BEGIN set_tx_recover 1 NOFUNC
+BEGIN set_timeout NOFUNC
ARG ID DB_ENV * dbenv
-FUNCPROT int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)
-FUNCARG int (*func0) __P((DB_ENV *, DBT *, DB_LSN *, db_recops))
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+BEGIN set_tmp_dir NOFUNC
+ARG ID DB_ENV * dbenv
+ARG STRING const char * dir
END
-BEGIN set_tx_max 1 NOFUNC
+BEGIN set_tx_max NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t max
END
-BEGIN set_tx_timestamp 1 NOFUNC
+BEGIN set_tx_timestamp NOFUNC
ARG ID DB_ENV * dbenv
ARG INT time_t * max
END
-BEGIN set_verbose 1 NOFUNC
+BEGIN set_verbose NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t which
ARG INT int onoff
@@ -173,100 +209,197 @@ END
#
# Transaction functions
#
-BEGIN txn_abort 1 RETCODE
+BEGIN txn_abort RETCODE
ARG ID DB_TXN * txnp
END
-BEGIN txn_begin 1 RETCODE
-ARG ID DB_ENV * envp
+BEGIN txn_begin RETCODE
+ARG ID DB_ENV * dbenv
ARG ID DB_TXN * parent
ARG IGNORE DB_TXN ** txnpp
ARG INT u_int32_t flags
RET ID long txnid
END
-BEGIN txn_checkpoint 1 NOFUNC
+BEGIN txn_checkpoint NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t kbyte
ARG INT u_int32_t min
+ARG INT u_int32_t flags
+END
+BEGIN txn_commit RETCODE
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t flags
END
-BEGIN txn_commit 1 RETCODE
+BEGIN txn_discard RETCODE
ARG ID DB_TXN * txnp
ARG INT u_int32_t flags
END
-BEGIN txn_prepare 1 NOFUNC
+BEGIN txn_prepare CODE
ARG ID DB_TXN * txnp
+ARG GID u_int8_t * gid
END
-BEGIN txn_stat 1 NOFUNC
+BEGIN txn_recover RETCODE
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_PREPLIST * preplist
+ARG INT long count
+ARG IGNORE long * retp
+ARG INT u_int32_t flags
+RET LIST DB_TXN * txn ID
+RET LIST u_int8_t * gid GID
+RET INT long retcount
+END
+BEGIN txn_stat NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_TXN_STAT ** statp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
+ARG INT u_int32_t flags
+END
+BEGIN txn_timeout NOFUNC
+ARG ID DB_TXN * txnp
+ARG INT u_int32_t timeout
+ARG INT u_int32_t flags
+END
+#
+# Replication functions
+#
+BEGIN rep_elect NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int nsites
+ARG INT int pri
+ARG INT u_int32_t timeout
+ARG IGNORE int * idp
+END
+BEGIN rep_flush NOFUNC
+ARG ID DB_ENV * dbenv
+END
+BEGIN rep_process_message NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * rec
+ARG DBT DBT * control
+ARG IGNORE int * idp
+END
+BEGIN rep_set_limit NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t mbytes
+ARG INT u_int32_t bytes
+END
+BEGIN rep_set_request NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t min
+ARG INT u_int32_t max
+END
+BEGIN rep_set_rep_transport NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT int id
+FUNCPROT int (*)(DB_ENV *, const DBT *, const DBT *, int, u_int32_t)
+FUNCARG int (*func0) __P((DB_ENV *, const DBT *, const DBT *, int, u_int32_t))
+END
+BEGIN rep_start NOFUNC
+ARG ID DB_ENV * dbenv
+ARG DBT DBT * cdata
+ARG INT u_int32_t flags
+END
+BEGIN rep_stat NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_REP_STAT ** statp
+ARG INT u_int32_t flags
END
#
# Database functions
#
-BEGIN db_bt_compare 1 NOFUNC
+BEGIN db_alloc NOFUNC
+ARG ID DB * dbp
+FUNCPROT void *(*)(size_t)
+FUNCARG void *(*func0) __P((size_t))
+FUNCPROT void *(*)(void *, size_t)
+FUNCARG void *(*func1) __P((void *, size_t))
+FUNCPROT void (*)(void *)
+FUNCARG void (*func2) __P((void *))
+END
+BEGIN db_associate CODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG ID DB * sdbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *, DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *, DBT *))
+ARG INT u_int32_t flags
+END
+BEGIN db_bt_compare NOFUNC
ARG ID DB * dbp
FUNCPROT int (*)(DB *, const DBT *, const DBT *)
FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
END
-BEGIN db_bt_maxkey 1 CODE
+BEGIN db_bt_maxkey CODE
ARG ID DB * dbp
ARG INT u_int32_t maxkey
END
-BEGIN db_bt_minkey 1 CODE
+BEGIN db_bt_minkey CODE
ARG ID DB * dbp
ARG INT u_int32_t minkey
END
-BEGIN db_bt_prefix 1 NOFUNC
+BEGIN db_bt_prefix NOFUNC
ARG ID DB * dbp
FUNCPROT size_t(*)(DB *, const DBT *, const DBT *)
FUNCARG size_t (*func0) __P((DB *, const DBT *, const DBT *))
END
-BEGIN db_set_append_recno 1 NOFUNC
+BEGIN db_set_append_recno NOFUNC
ARG ID DB * dbp
FUNCPROT int (*)(DB *, DBT *, db_recno_t)
FUNCARG int (*func0) __P((DB *, DBT *, db_recno_t))
END
-BEGIN db_cachesize 1 NOFUNC
+BEGIN db_cache_priority NOFUNC
+ARG ID DB * dbp
+ARG INT DB_CACHE_PRIORITY priority
+END
+BEGIN db_cachesize NOFUNC
ARG ID DB * dbp
ARG INT u_int32_t gbytes
ARG INT u_int32_t bytes
ARG INT int ncache
END
-BEGIN db_close 1 RETCODE
+BEGIN db_close RETCODE
ARG ID DB * dbp
ARG INT u_int32_t flags
END
-BEGIN db_create 1 NOCLNTCODE
+BEGIN db_create RETCODE
+ARG IGNORE DB * dbp
+ARG ID DB_ENV * dbenv
ARG INT u_int32_t flags
-ARG ID DB_ENV * envp
-RET ID long dbp
+RET ID long db
END
-BEGIN db_del 1 CODE
+BEGIN db_del CODE
ARG ID DB * dbp
ARG ID DB_TXN * txnp
ARG DBT DBT * key
ARG INT u_int32_t flags
END
-BEGIN db_extentsize 1 CODE
+BEGIN db_dup_compare NOFUNC
+ARG ID DB * dbp
+FUNCPROT int (*)(DB *, const DBT *, const DBT *)
+FUNCARG int (*func0) __P((DB *, const DBT *, const DBT *))
+END
+BEGIN db_encrypt CODE
+ARG ID DB * dbp
+ARG STRING const char * passwd
+ARG INT u_int32_t flags
+END
+BEGIN db_extentsize CODE
ARG ID DB * dbp
ARG INT u_int32_t extentsize
END
-BEGIN db_fd 1 NOFUNC
+BEGIN db_fd NOFUNC
ARG ID DB * dbp
ARG IGNORE int * fdp
END
-BEGIN db_feedback 1 NOFUNC
+BEGIN db_feedback NOFUNC
ARG ID DB * dbp
FUNCPROT void (*)(DB *, int, int)
FUNCARG void (*func0) __P((DB *, int, int))
END
-BEGIN db_flags 1 CODE
+BEGIN db_flags CODE
ARG ID DB * dbp
ARG INT u_int32_t flags
END
-BEGIN db_get 1 RETCODE
+BEGIN db_get RETCODE
ARG ID DB * dbp
ARG ID DB_TXN * txnp
ARG DBT DBT * key
@@ -275,20 +408,20 @@ ARG INT u_int32_t flags
RET DBT DBT * key
RET DBT DBT * data
END
-BEGIN db_h_ffactor 1 CODE
+BEGIN db_h_ffactor CODE
ARG ID DB * dbp
ARG INT u_int32_t ffactor
END
-BEGIN db_h_hash 1 NOFUNC
+BEGIN db_h_hash NOFUNC
ARG ID DB * dbp
FUNCPROT u_int32_t(*)(DB *, const void *, u_int32_t)
FUNCARG u_int32_t (*func0) __P((DB *, const void *, u_int32_t))
END
-BEGIN db_h_nelem 1 CODE
+BEGIN db_h_nelem CODE
ARG ID DB * dbp
ARG INT u_int32_t nelem
END
-BEGIN db_key_range 1 RETCODE
+BEGIN db_key_range RETCODE
ARG ID DB * dbp
ARG ID DB_TXN * txnp
ARG DBT DBT * key
@@ -298,139 +431,152 @@ RET DBL double less
RET DBL double equal
RET DBL double greater
END
-BEGIN db_lorder 1 CODE
+BEGIN db_lorder CODE
ARG ID DB * dbp
ARG INT int lorder
END
-BEGIN db_malloc 1 NOFUNC
-ARG ID DB * dbp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
-END
# XXX
# The line:
# RET INT u_int32_t dbflags
# should go away when a get_flags method exists. It is
# needed now because Tcl looks at dbp->flags.
#
-BEGIN db_open 1 RETCODE
+BEGIN db_open RETCODE
ARG ID DB * dbp
+ARG ID DB_TXN * txnp
ARG STRING const char * name
ARG STRING const char * subdb
ARG INT DBTYPE type
ARG INT u_int32_t flags
ARG INT int mode
+RET ID long db
RET INT DBTYPE type
RET INT u_int32_t dbflags
+RET INT int lorder
END
-BEGIN db_pagesize 1 CODE
+BEGIN db_pagesize CODE
ARG ID DB * dbp
ARG INT u_int32_t pagesize
END
-BEGIN db_panic 1 NOFUNC
+BEGIN db_panic NOFUNC
ARG ID DB * dbp
FUNCPROT void (*)(DB_ENV *, int)
FUNCARG void (*func0) __P((DB_ENV *, int))
END
-BEGIN db_put 1 RETCODE
+BEGIN db_pget RETCODE
ARG ID DB * dbp
ARG ID DB_TXN * txnp
-ARG DBT DBT * key
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
ARG DBT DBT * data
ARG INT u_int32_t flags
-RET DBT DBT * key
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
END
-BEGIN db_realloc 1 NOFUNC
+BEGIN db_put RETCODE
ARG ID DB * dbp
-FUNCPROT void *(*)(void *, size_t)
-FUNCARG void *(*func0) __P((void *, size_t))
+ARG ID DB_TXN * txnp
+ARG DBT DBT * key
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * key
END
-BEGIN db_re_delim 1 CODE
+BEGIN db_re_delim CODE
ARG ID DB * dbp
ARG INT int delim
END
-BEGIN db_re_len 1 CODE
+BEGIN db_re_len CODE
ARG ID DB * dbp
ARG INT u_int32_t len
END
-BEGIN db_re_pad 1 CODE
+BEGIN db_re_pad CODE
ARG ID DB * dbp
ARG INT int pad
END
-BEGIN db_re_source 1 NOFUNC
+BEGIN db_re_source NOFUNC
ARG ID DB * dbp
ARG STRING const char * re_source
END
-BEGIN db_remove 1 RETCODE
+BEGIN db_remove RETCODE
ARG ID DB * dbp
ARG STRING const char * name
ARG STRING const char * subdb
ARG INT u_int32_t flags
END
-BEGIN db_rename 1 RETCODE
+BEGIN db_rename RETCODE
ARG ID DB * dbp
ARG STRING const char * name
ARG STRING const char * subdb
ARG STRING const char * newname
ARG INT u_int32_t flags
END
-BEGIN db_stat 1 RETCODE
+BEGIN db_stat RETCODE
ARG ID DB * dbp
ARG IGNORE void * sp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
ARG INT u_int32_t flags
RET LIST u_int32_t * stats INT
END
-BEGIN db_swapped 1 CODE
+BEGIN db_sync CODE
ARG ID DB * dbp
+ARG INT u_int32_t flags
+END
+BEGIN db_truncate RETCODE
+ARG ID DB * dbp
+ARG ID DB_TXN * txnp
+ARG IGNORE u_int32_t * countp
+ARG INT u_int32_t flags
+RET INT u_int32_t count
END
-BEGIN db_sync 1 CODE
+BEGIN db_upgrade NOFUNC
ARG ID DB * dbp
+ARG STRING const char * fname
ARG INT u_int32_t flags
END
-BEGIN db_upgrade 1 NOFUNC
+BEGIN db_verify NOFUNC
ARG ID DB * dbp
ARG STRING const char * fname
+ARG STRING const char * subdb
+ARG IGNORE FILE * outfile
ARG INT u_int32_t flags
END
#
# Cursor functions
#
-BEGIN db_cursor 1 RETCODE
+BEGIN db_cursor RETCODE
ARG ID DB * dbp
ARG ID DB_TXN * txnp
ARG IGNORE DBC ** dbcpp
ARG INT u_int32_t flags
RET ID long dbcid
END
-BEGIN db_join 1 RETCODE
+BEGIN db_join RETCODE
ARG ID DB * dbp
ARG LIST DBC ** curs ID
ARG IGNORE DBC ** dbcp
ARG INT u_int32_t flags
RET ID long dbcid
END
-BEGIN dbc_close 1 RETCODE
+BEGIN dbc_close RETCODE
ARG ID DBC * dbc
END
-BEGIN dbc_count 1 RETCODE
+BEGIN dbc_count RETCODE
ARG ID DBC * dbc
ARG IGNORE db_recno_t * countp
ARG INT u_int32_t flags
RET INT db_recno_t dupcount
END
-BEGIN dbc_del 1 CODE
+BEGIN dbc_del CODE
ARG ID DBC * dbc
ARG INT u_int32_t flags
END
-BEGIN dbc_dup 1 RETCODE
+BEGIN dbc_dup RETCODE
ARG ID DBC * dbc
ARG IGNORE DBC ** dbcp
ARG INT u_int32_t flags
RET ID long dbcid
END
-BEGIN dbc_get 1 RETCODE
+BEGIN dbc_get RETCODE
ARG ID DBC * dbc
ARG DBT DBT * key
ARG DBT DBT * data
@@ -438,7 +584,17 @@ ARG INT u_int32_t flags
RET DBT DBT * key
RET DBT DBT * data
END
-BEGIN dbc_put 1 RETCODE
+BEGIN dbc_pget RETCODE
+ARG ID DBC * dbc
+ARG DBT DBT * skey
+ARG DBT DBT * pkey
+ARG DBT DBT * data
+ARG INT u_int32_t flags
+RET DBT DBT * skey
+RET DBT DBT * pkey
+RET DBT DBT * data
+END
+BEGIN dbc_put RETCODE
ARG ID DBC * dbc
ARG DBT DBT * key
ARG DBT DBT * data
@@ -452,13 +608,13 @@ END
#
# Locking subsystem
#
-BEGIN lock_detect 1 NOFUNC
+BEGIN lock_detect NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t flags
ARG INT u_int32_t atype
ARG IGNORE int * aborted
END
-BEGIN lock_get 1 NOFUNC
+BEGIN lock_get NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t locker
ARG INT u_int32_t flags
@@ -466,21 +622,24 @@ ARG CONST const DBT * obj
ARG INT db_lockmode_t mode
ARG IGNORE DB_LOCK * lock
END
-BEGIN lock_id 1 NOFUNC
+BEGIN lock_id NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t * idp
END
-BEGIN lock_put 1 NOFUNC
+BEGIN lock_id_free NOFUNC
+ARG ID DB_ENV * dbenv
+ARG INT u_int32_t id
+END
+BEGIN lock_put NOFUNC
ARG ID DB_ENV * dbenv
ARG ID DB_LOCK * lock
END
-BEGIN lock_stat 1 NOFUNC
+BEGIN lock_stat NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_LOCK_STAT ** statp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
+ARG INT u_int32_t flags
END
-BEGIN lock_vec 1 NOFUNC
+BEGIN lock_vec NOFUNC
ARG ID DB_ENV * dbenv
ARG INT u_int32_t locker
ARG INT u_int32_t flags
@@ -491,89 +650,50 @@ END
#
# Logging subsystem
#
-BEGIN log_archive 1 NOFUNC
+BEGIN log_archive NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE char *** listp
ARG INT u_int32_t flags
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
+END
+BEGIN log_cursor NOFUNC
+ARG ID DB_ENV * dbenv
+ARG IGNORE DB_LOGC ** logcp
+ARG INT u_int32_t flags
END
#
# Don't do log_compare. It doesn't have an env we can get at,
# and it doesn't manipulate DB internal information.
#
-BEGIN log_file 1 NOFUNC
+BEGIN log_file NOFUNC
ARG ID DB_ENV * dbenv
ARG CONST const DB_LSN * lsn
ARG STRING char * namep
ARG INT size_t len
END
-BEGIN log_flush 1 NOFUNC
+BEGIN log_flush NOFUNC
ARG ID DB_ENV * dbenv
ARG CONST const DB_LSN * lsn
END
-BEGIN log_get 1 NOFUNC
-ARG ID DB_ENV * dbenv
-ARG IGNORE DB_LSN * lsn
-ARG DBT DBT * data
-ARG INT u_int32_t flags
-END
-BEGIN log_put 1 NOFUNC
+BEGIN log_put NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_LSN * lsn
-ARG CONST const DBT * data
+ARG DBT const DBT * data
ARG INT u_int32_t flags
END
-BEGIN log_register 1 NOFUNC
-ARG ID DB_ENV * dbenv
-ARG ID DB * dbp
-ARG CONST const char * namep
-END
-BEGIN log_stat 1 NOFUNC
+BEGIN log_stat NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_LOG_STAT ** statp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
-END
-BEGIN log_unregister 1 NOFUNC
-ARG ID DB_ENV * dbenv
-ARG ID DB * dbp
+ARG INT u_int32_t flags
END
#
# Mpool Subsystem
#
-BEGIN memp_fclose 1 NOFUNC
-ARG ID DB_MPOOLFILE * mpf
-END
-BEGIN memp_fget 1 NOFUNC
-ARG ID DB_MPOOLFILE * mpf
-ARG IGNORE db_pgno_t * pgno
-ARG INT u_int32_t flags
-ARG IGNORE void ** pagep
-END
-BEGIN memp_fopen 1 NOFUNC
+BEGIN memp_fcreate NOFUNC
ARG ID DB_ENV * dbenv
-ARG CONST const char * file
-ARG INT u_int32_t flags
-ARG INT int mode
-ARG INT size_t pagesize
-ARG IGNORE DB_MPOOL_FINFO * finfop
ARG IGNORE DB_MPOOLFILE ** mpf
+ARG IGNORE u_int32_t flags
END
-BEGIN memp_fput 1 NOFUNC
-ARG ID DB_MPOOLFILE * mpf
-ARG IGNORE void * pgaddr
-ARG INT u_int32_t flags
-END
-BEGIN memp_fset 1 NOFUNC
-ARG ID DB_MPOOLFILE * mpf
-ARG IGNORE void * pgaddr
-ARG INT u_int32_t flags
-END
-BEGIN memp_fsync 1 NOFUNC
-ARG ID DB_MPOOLFILE * mpf
-END
-BEGIN memp_register 1 NOFUNC
+BEGIN memp_register NOFUNC
ARG ID DB_ENV * dbenv
ARG INT int ftype
FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
@@ -581,18 +701,17 @@ FUNCARG int (*func0) __P((DB_ENV *, db_pgno_t, void *, DBT *))
FUNCPROT int (*)(DB_ENV *, db_pgno_t, void *, DBT *)
FUNCARG int (*func1) __P((DB_ENV *, db_pgno_t, void *, DBT *))
END
-BEGIN memp_stat 1 NOFUNC
+BEGIN memp_stat NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_MPOOL_STAT ** gstatp
ARG IGNORE DB_MPOOL_FSTAT *** fstatp
-FUNCPROT void *(*)(size_t)
-FUNCARG void *(*func0) __P((size_t))
+ARG INT u_int32_t flags
END
-BEGIN memp_sync 1 NOFUNC
+BEGIN memp_sync NOFUNC
ARG ID DB_ENV * dbenv
ARG IGNORE DB_LSN * lsn
END
-BEGIN memp_trickle 1 NOFUNC
+BEGIN memp_trickle NOFUNC
ARG ID DB_ENV * dbenv
ARG INT int pct
ARG IGNORE int * nwrotep
diff --git a/bdb/tcl/docs/db.html b/bdb/tcl/docs/db.html
index c75ab6ecf4f..4f04c2c4f96 100644
--- a/bdb/tcl/docs/db.html
+++ b/bdb/tcl/docs/db.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
@@ -8,184 +9,154 @@
<H2>
<A NAME="Database Commands"></A>Database Commands</H2>
-The database commands provide a conduit into the DB method functions.&nbsp;
-They are all fairly straightforward and I describe them in terms of their
-DB functions briefly here, with a link to the DB page where appropriate.&nbsp;
-The first set of commands are those I believe will be the primary functions
-used by most databases.&nbsp; Some are directly related to their DB counterparts,
-and some are higher level functions that are useful to provide the user.
-<P><B>> berkdb open [-env <I>env</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-btree|-hash|-recno|-queue|-unknown]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-create] [-excl] [-nommap] [-rdonly] [-truncate]
-[-mode
-<I>mode</I>] [-errfile <I>filename</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-dup] [-dupsort] [-recnum] [-renumber] [-revsplitoff]
-[-snapshot]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-extent <I>size</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-ffactor <I>density</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-nelem <I>size</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lorder <I>order</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-delim <I>delim</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-len <I>len</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-pad <I>pad</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-source <I>file</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-minkey <I>minkey</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-cachesize {<I>gbytes bytes ncaches</I>}]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-pagesize <I>pagesize</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [--]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [<I>filename </I>[<I>subdbname</I>]]</B>
-<P>This command will invoke the <A HREF="../../docs/api_c/db_create.html">db_create</A>
-function.&nbsp; If the command is given the <B>-env</B> option, then we
-will accordingly creating the database within the context of that environment.&nbsp;
-After it successfully gets a handle to a database, we bind it to a new
-Tcl command of the form <B><I>dbX, </I></B>where X is an integer starting
-at 0 (e.g. <B>db0, db1, </B>etc).&nbsp; We use the <I>Tcl_CreateObjCommand()&nbsp;</I>
-to create the top level database function.&nbsp; It is through this handle
-that the user can access all of the commands described in the <A HREF="#Database Commands">Database
-Commands</A> section.&nbsp; Internally, the database handle is sent as
-the <I>ClientData</I> portion of the new command set so that all future
-database calls access the appropriate handle.
-<P>After parsing all of the optional arguments affecting the setup of the
-database and making the appropriate calls to DB to manipulate those values,
-we open the database for the user. It&nbsp; translates to the
-<A HREF="../../docs/api_c/db_open.html">DB->open</A>
-method call after parsing all of the various optional arguments.&nbsp;
-We automatically set the DB_THREAD flag.&nbsp; The arguments are:
-<UL>
-<LI>
-<B>-- </B>- Terminate the list of options and use remaining arguments as
-the file or subdb names (thus allowing the use of filenames beginning with
-a dash '-')</LI>
-
-<LI>
-<B>-btree</B> - DB_BTREE database</LI>
-
-<LI>
-<B>-hash</B> -&nbsp; DB_HASH database</LI>
-
-<LI>
-<B>-recno&nbsp;</B> - DB_RECNO database</LI>
-
-<LI>
-<B>-queue</B> - DB_QUEUE database</LI>
-
-<LI>
-<B>-create</B> selects the DB_CREATE flag&nbsp; to create underlying files</LI>
-
-<LI>
-<B>-excl</B> selects the DB_EXCL flag&nbsp; to exclusively create underlying
-files</LI>
-
-<LI>
-<B>-nommap</B> selects the DB_NOMMAP flag to forbid mmaping of files</LI>
-
-<LI>
-<B>-rdonly</B> selects the DB_RDONLY flag for opening in read-only mode</LI>
-
-<LI>
-<B>-truncate</B> selects the DB_TRUNCATE flag to truncate the database</LI>
-
-<LI>
-<B>-mode<I> mode</I></B> specifies the mode for created files</LI>
-
-<LI>
-<B>-errfile </B>specifies the error file to use for this environment to
-<B><I>filename</I></B>
-by calling <A HREF="../../docs/api_c/db_set_errfile.html">DB->set_errfile</A><B><I>.
-</I></B>If
-the file already exists then we will append to the end of the file</LI>
-
-<LI>
-<B>-dup </B>selects the DB_DUP flag to permit duplicates in the database</LI>
-
-<LI>
-<B>-dupsort</B> selects the DB_DUPSORT flag to support sorted duplicates</LI>
-
-<LI>
-<B>-recnum</B> selects the DB_RECNUM flag to support record numbers in
-btrees</LI>
-
-<LI>
-<B>-renumber </B>selects the DB_RENUMBER flag to support mutable record
-numbers</LI>
+The database commands provide a fairly straightforward mapping to the
+DB method functions.
-<LI>
-<B>-revsplitoff </B>selects the DB_REVSPLITOFF flag to suppress reverse
-splitting of pages on deletion</LI>
-
-<LI>
-<B>-snapshot </B>selects the DB_SNAPSHOT flag to support database snapshots</LI>
-
-<LI>
-<B>-extent </B>sets the size of a Queue database extent to the given <B><I>size
-</I></B>using
-the <A HREF="../../docs/api_c/db_set_q_extentsize.html">DB->set_q_extentsize</A>
-method</LI>
-
-<LI>
-<B>-ffactor</B> sets the hash table key density to the given <B><I>density
-</I></B>using
-the <A HREF="../../docs/api_c/db_set_h_ffactor.html">DB->set_h_ffactor</A>
-method</LI>
-
-<LI>
-<B>-nelem </B>sets the hash table size estimate to the given <B><I>size
-</I></B>using
-the <A HREF="../../docs/api_c/db_set_h_nelem.html">DB->set_h_nelem</A>
-method</LI>
-
-<LI>
-<B>-lorder </B>sets the byte order for integers stored in the database
-meta-data to the given <B><I>order</I></B> using the <A HREF="../../docs/api_c/db_set_lorder.html">DB->set_lorder</A>
-method</LI>
-
-<LI>
-<B>-delim </B>sets the delimiting byte for variable length records to
-<B><I>delim</I></B>
-using the <A HREF="../../docs/api_c/db_set_re_delim.html">DB->set_re_delim</A>
-method</LI>
-
-<LI>
-<B>-len </B>sets the length of fixed-length records to <B><I>len</I></B>
-using the <A HREF="../../docs/api_c/db_set_re_len.html">DB->set_re_len</A>
-method</LI>
-
-<LI>
-<B>-pad </B>sets the pad character used for fixed length records to
-<B><I>pad</I></B>&nbsp;
-using the <A HREF="../../docs/db_set_re_pad.html">DB->set_re_pad</A> method</LI>
-
-<LI>
-<B>-source </B>sets the backing source file name to <B><I>file</I></B>
-using the <A HREF="../../docs/api_c/db_set_re_source.html">DB->set_re_source</A>
-method</LI>
-
-<LI>
-<B>-minkey </B>sets the minimum number of keys per Btree page to <B><I>minkey</I></B>
-using the <A HREF="../../docs/api_c/db_set_bt_minkey.html">DB->set_bt_minkey</A>
-method</LI>
-
-<LI>
-<B>-cachesize </B>sets the size of the database cache to the size&nbsp;
-specified by <B><I>gbytes </I></B>and <B><I>bytes, </I></B>broken up into
-<B><I>ncaches</I></B>
-number of caches using the <A HREF="../../docs/api_c/db_set_cachesize.html">DB->set_cachesize</A>
-method</LI>
-
-<LI>
-<B>-pagesize </B>sets the size of the database page to <B><I>pagesize </I></B>using
-the <A HREF="../../docs/api_c/db_set_pagesize.html">DB->set_pagesize</A>
-method</LI>
-
-<LI>
-<B><I>filename</I></B> indicates the name of the database</LI>
-
-<LI>
-<B><I>subdbname</I></B> indicate the name of the sub-database</LI>
-</UL>
+<P>
+<B>> berkdb open</B>
+<dl>
+
+<dt><B>[-btcompare <I>proc</I>]</B><dd>
+Sets the Btree comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_bt_compare.html">DB->set_bt_compare</A>
+method.
+
+<dt><B>[-btree|-hash|-recno|-queue|-unknown]</B><dd>
+</td><td>
+Select the database type:<br>
+DB_BTREE, DB_HASH, DB_RECNO, DB_QUEUE or DB_UNKNOWN.
+
+
+<dt><B>[-cachesize {<I>gbytes bytes ncaches</I>}]</B><dd>
+Sets the size of the database cache to the size specified by
+<I>gbytes</I> and <I>bytes</I>, broken up into <I>ncaches</I> number of
+caches using the
+<A HREF="../../docs/api_c/db_set_cachesize.html">DB->set_cachesize</A>
+method.
+
+<dt><B>[-create]</B><dd>
+Selects the DB_CREATE flag to create underlying files.
+
+<dt><B>[-delim <I>delim</I>]</B><dd>
+Sets the delimiting byte for variable length records to <I>delim</I>
+using the
+<A HREF="../../docs/api_c/db_set_re_delim.html">DB->set_re_delim</A>
+method.
+
+<dt><B>[-dup]</B><dd>
+Selects the DB_DUP flag to permit duplicates in the database.
+
+<dt><B>[-dupcompare <I>proc</I>]</B><dd>
+Sets the duplicate data comparison function to the Tcl procedure named
+<I>proc</I> using the
+<A HREF="../../docs/api_c/db_set_dup_compare.html">DB->set_dup_compare</A>
+method.
+
+<dt><B>[-dupsort]</B><dd>
+Selects the DB_DUPSORT flag to support sorted duplicates.
+
+<dt><B>[-env <I>env</I>]</B><dd>
+The database environment.
+
+<dt><B>[-errfile <I>filename</I>]</B><dd>
+Specifies the error file to use for this environment to <I>filename</I>
+by calling
+<A HREF="../../docs/api_c/db_set_errfile.html">DB->set_errfile</A>.
+If the file already exists then we will append to the end of the file.
+
+<dt><B>[-excl]</B><dd>
+Selects the DB_EXCL flag to exclusively create underlying files.
+
+<dt><B>[-extent <I>size</I>]</B><dd>
+Sets the size of a Queue database extent to the given <I>size</I> using
+the
+<A HREF="../../docs/api_c/db_set_q_extentsize.html">DB->set_q_extentsize</A>
+method.
+
+<dt><B>[-ffactor <I>density</I>]</B><dd>
+Sets the hash table key density to the given <I>density</I> using the
+<A HREF="../../docs/api_c/db_set_h_ffactor.html">DB->set_h_ffactor</A>
+method.
+
+<dt><B>[-hashproc <I>proc</I>]</B><dd>
+Sets a user-defined hash function to the Tcl procedure named <I>proc</I>
+using the
+<A HREF="../../docs/api_c/db_set_h_hash.html">DB->set_h_hash</A> method.
+
+<dt><B>[-len <I>len</I>]</B><dd>
+Sets the length of fixed-length records to <I>len</I> using the
+<A HREF="../../docs/api_c/db_set_re_len.html">DB->set_re_len</A>
+method.
+
+<dt><B>[-lorder <I>order</I>]</B><dd>
+Sets the byte order for integers stored in the database meta-data to
+the given <I>order</I> using the
+<A HREF="../../docs/api_c/db_set_lorder.html">DB->set_lorder</A>
+method.
+
+<dt><B>[-minkey <I>minkey</I>]</B><dd>
+Sets the minimum number of keys per Btree page to <I>minkey</I> using
+the
+<A HREF="../../docs/api_c/db_set_bt_minkey.html">DB->set_bt_minkey</A>
+method.
+
+<dt><B>[-mode <I>mode</I>]</B><dd>
+Specifies the mode for created files.
+
+<dt><B>[-nelem <I>size</I>]</B><dd>
+Sets the hash table size estimate to the given <I>size</I> using the
+<A HREF="../../docs/api_c/db_set_h_nelem.html">DB->set_h_nelem</A>
+method.
+
+<dt><B>[-nommap]</B><dd>
+Selects the DB_NOMMAP flag to forbid mmaping of files.
+
+<dt><B>[-pad <I>pad</I>]</B><dd>
+Sets the pad character used for fixed length records to <I>pad</I> using
+the
+<A HREF="../../docs/db_set_re_pad.html">DB->set_re_pad</A> method.
+
+<dt><B>[-pagesize <I>pagesize</I>]</B><dd>
+Sets the size of the database page to <I>pagesize</I> using the
+<A HREF="../../docs/api_c/db_set_pagesize.html">DB->set_pagesize</A>
+method.
+
+<dt><B>[-rdonly]</B><dd>
+Selects the DB_RDONLY flag for opening in read-only mode.
+
+<dt><B>[-recnum]</B><dd>
+Selects the DB_RECNUM flag to support record numbers in Btrees.
+
+<dt><B>[-renumber]</B><dd>
+Selects the DB_RENUMBER flag to support mutable record numbers.
+
+<dt><B>[-revsplitoff]</B><dd>
+Selects the DB_REVSPLITOFF flag to suppress reverse splitting of pages
+on deletion.
+
+<dt><B>[-snapshot]</B><dd>
+Selects the DB_SNAPSHOT flag to support database snapshots.
+
+<dt><B>[-source <I>file</I>]</B><dd>
+Sets the backing source file name to <I>file</I> using the
+<A HREF="../../docs/api_c/db_set_re_source.html">DB->set_re_source</A>
+method.
+
+<dt><B>[-truncate]</B><dd>
+Selects the DB_TRUNCATE flag to truncate the database.
+
+<dt><B>[--]</B><dd>
+Terminate the list of options and use remaining arguments as the file
+or subdb names (thus allowing the use of filenames beginning with a dash
+'-').
+
+<dt><B>[<I>filename </I>[<I>subdbname</I>]]</B><dd>
+The names of the database and sub-database.
+</dl>
<HR WIDTH="100%">
-<BR><B>&nbsp;berkdb upgrade [-dupsort] [-env <I>env</I>] [--] [<I>filename</I>]</B>
+<B>> berkdb upgrade [-dupsort] [-env <I>env</I>] [--] [<I>filename</I>]</B>
<P>This command will invoke the <A HREF="../../docs/api_c/db_upgrade.html">DB->upgrade</A>
function.&nbsp; If the command is given the <B>-env</B> option, then we
will accordingly upgrade the database filename within the context of that
@@ -193,14 +164,21 @@ environment. The <B>-dupsort</B> option selects the DB_DUPSORT flag for
upgrading. The use of --<B> </B>terminates the list of options, thus allowing
filenames beginning with a dash.
<P>
-<HR WIDTH="100%"><B>> berkdb verify [-env <I>env</I>] [--] [<I>filename</I>]</B>
+
+<HR WIDTH="100%">
+<B>> berkdb verify [-env <I>env</I>] [--] [<I>filename</I>]</B>
<P>This command will invoke the <A HREF="../../docs/api_c/db_verify.html">DB->verify</A>
function.&nbsp; If the command is given the <B>-env</B> option, then we
will accordingly verify the database filename within the context of that
environment.&nbsp; The use of --<B> </B>terminates the list of options,
thus allowing filenames beginning with a dash.
<P>
-<HR WIDTH="100%"><B>> <I>db</I> join [-nosort] <I>db0.c0 db1.c0</I> ...</B>
+
+<HR WIDTH="100%"><B>> <I>db</I> del</B>
+<P>There are no undocumented options.
+
+<HR WIDTH="100%">
+<B>> <I>db</I> join [-nosort] <I>db0.c0 db1.c0</I> ...</B>
<P>This command will invoke the <A HREF="../../docs/api_c/db_join.html">db_join</A>
function.&nbsp; After it successfully joins a database, we bind it to a
new Tcl command of the form <B><I>dbN.cX, </I></B>where X is an integer
@@ -215,7 +193,33 @@ number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
flag being set.</LI>
</UL>
-<HR WIDTH="100%"><B>> <I>db</I> get_join [-nosort] {db key} {db key} ...</B>
+<P>
+This command will invoke the
+<A HREF="../../docs/api_c/db_create.html">db_create</A> function. If
+the command is given the <B>-env</B> option, then we will accordingly
+creating the database within the context of that environment. After it
+successfully gets a handle to a database, we bind it to a new Tcl
+command of the form <B><I>dbX, </I></B>where X is an integer starting
+at 0 (e.g. <B>db0, db1, </B>etc).
+
+<p>
+We use the <I>Tcl_CreateObjCommand()</I> to create the top level
+database function. It is through this handle that the user can access
+all of the commands described in the <A HREF="#Database Commands">
+Database Commands</A> section. Internally, the database handle
+is sent as the <I>ClientData</I> portion of the new command set so that
+all future database calls access the appropriate handle.
+
+<P>
+After parsing all of the optional arguments affecting the setup of the
+database and making the appropriate calls to DB to manipulate those
+values, we open the database for the user. It translates to the
+<A HREF="../../docs/api_c/db_open.html">DB->open</A> method call after
+parsing all of the various optional arguments. We automatically set the
+DB_THREAD flag. The arguments are:
+
+<HR WIDTH="100%">
+<B>> <I>db</I> get_join [-nosort] {db key} {db key} ...</B>
<P>This command performs a join operation on the keys specified and returns
a list of the joined {key data} pairs.
<P>The options are:
@@ -226,41 +230,34 @@ number of data items they reference.&nbsp; It results in the DB_JOIN_NOSORT
flag being set.</LI>
</UL>
-<HR WIDTH="100%"><B>> <I>db</I> keyrange [-txn <I>id</I>] key</B>
+<HR WIDTH="100%">
+<B>> <I>db</I> keyrange [-txn <I>id</I>] key</B>
<P>This command returns the range for the given <B>key</B>.&nbsp; It returns
a list of 3 double elements of the form {<B><I>less equal greater</I></B>}
where <B><I>less</I></B> is the percentage of keys less than the given
key, <B><I>equal</I></B> is the percentage equal to the given key and <B><I>greater</I></B>
is the percentage greater than the given key.&nbsp; If the -txn option
is specified it performs this operation under transaction protection.
-<BR>
-<HR WIDTH="100%"><B>> <I>db</I> put</B>
-<P>The <B>undocumented</B> options are:
-<UL>
-<LI>
-<B>-nodupdata</B> This flag causes DB not to insert the key/data pair if
-it already exists, that is, both the key and data items are already in
-the database. The -nodupdata flag may only be specified if the underlying
-database has been configured to support sorted duplicates.</LI>
-</UL>
-<HR WIDTH="100%"><B>> <I>db</I> stat</B>
+<HR WIDTH="100%"><B>> <I>db</I> put</B>
<P>The <B>undocumented</B> options are:
-<UL>
-<LI>
-<B>-cachedcounts</B> This flag causes DB to return the cached key/record
-counts, similar to the DB_CACHED_COUNTS flags to DB->stat.</LI>
-</UL>
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
<HR WIDTH="100%"><B>> <I>dbc</I> put</B>
<P>The <B>undocumented</B> options are:
-<UL>
-<LI>
-<B>-nodupdata</B> This flag causes DB not to insert the key/data pair if
-it already exists, that is, both the key and data items are already in
-the database. The -nodupdata flag may only be specified if the underlying
-database has been configured to support sorted duplicates.</LI>
-</UL>
+<dl>
+<dt><B>-nodupdata</B><dd>
+This flag causes DB not to insert the key/data pair if it already
+exists, that is, both the key and data items are already in the
+database. The -nodupdata flag may only be specified if the underlying
+database has been configured to support sorted duplicates.
+</dl>
</BODY>
</HTML>
diff --git a/bdb/tcl/docs/env.html b/bdb/tcl/docs/env.html
index a1bd08fd163..79c349841ac 100644
--- a/bdb/tcl/docs/env.html
+++ b/bdb/tcl/docs/env.html
@@ -1,303 +1,354 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
- <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
- <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 3.3-RELEASE i386) [Netscape]">
-</HEAD>
-<BODY>
-
-<H2>
-Environment Commands</H2>
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+Environment Commands</h2>
Environments provide a structure for creating a consistent environment
for processes using one or more of the features of Berkeley DB.&nbsp; Unlike
some of the database commands, the environment commands are very low level.
-<BR>
-<HR WIDTH="100%">
-<P>The user may create and open a new DB environment&nbsp; by invoking:
-<P><B>> berkdb env</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-create] [-home<I> directory</I>] [-mode <I>mode</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-data_dir <I>directory</I>] [-log_dir <I>directory</I>]
-[-tmp_dir <I>directory</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-nommap] [-private] [-recover] [-recover_fatal]
-[-system_mem] [-errfile <I>filename</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-use_environ] [-use_environ_root] [-verbose
-{<I>which </I>on|off}]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-region_init]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-cachesize {<I>gbytes bytes ncaches</I>}]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-mmapsize<I> size</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-log_max <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-log_buffer <I>size</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_conflict {<I>nmodes </I>{<I>matrix</I>}}]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_detect default|oldest|random|youngest]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_locks <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_lockers <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-lock_max_objects <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-txn_max <I>max</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-client_timeout <I>seconds</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-server_timeout <I>seconds</I>]</B>
-<BR><B>&nbsp;&nbsp;&nbsp; [-server <I>hostname</I>]</B>
-<BR>&nbsp;
-<P>This command opens up an environment.&nbsp;&nbsp; We automatically set
+<br>
+<hr WIDTH="100%">
+<p>The user may create and open a new DB environment&nbsp; by invoking:
+<p><b>> berkdb env</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cdb] [-cdb_alldb] [-lock] [-log] [-txn [nosync]]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-create] [-home<i> directory</i>] [-mode <i>mode</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-data_dir <i>directory</i>] [-log_dir <i>directory</i>]
+[-tmp_dir <i>directory</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-nommap] [-private] [-recover] [-recover_fatal]
+[-system_mem] [-errfile <i>filename</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-use_environ] [-use_environ_root] [-verbose
+{<i>which </i>on|off}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-region_init]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-cachesize {<i>gbytes bytes ncaches</i>}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-mmapsize<i> size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-log_buffer <i>size</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_conflict {<i>nmodes </i>{<i>matrix</i>}}]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_detect default|oldest|random|youngest]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_locks <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_lockers <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_max_objects <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-lock_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-overwrite]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_max <i>max</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-txn_timeout <i>timeout</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-client_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server_timeout <i>seconds</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-server <i>hostname</i>]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_master] [-rep_client]</b>
+<br><b>&nbsp;&nbsp;&nbsp; [-rep_transport <i>{ machineid sendproc }</i>]</b>
+<br>&nbsp;
+<p>This command opens up an environment.&nbsp;&nbsp; We automatically set
the DB_THREAD and the DB_INIT_MPOOL flags.&nbsp; The arguments are:
-<UL>
-<LI>
-<B>-cdb</B> selects the DB_INIT_CDB flag for Concurrent Data Store</LI>
-
-<LI>
-<B>-cdb_alldb</B> selects the DB_CDB_ALLDB flag for Concurrent Data Store</LI>
-
-<LI>
-<B>-lock</B> selects the DB_INIT_LOCK flag for the locking subsystem</LI>
-
-<LI>
-<B>-log</B> selects the DB_INIT_LOG flag for the logging subsystem</LI>
-
-<LI>
-<B>-txn</B> selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags
-for the transaction subsystem.&nbsp; If <B>nosync</B> is specified, then
-it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits</LI>
-
-<LI>
-<B>-create </B>selects the DB_CREATE flag to create underlying files</LI>
-
-<LI>
-<B>-home <I>directory </I></B>selects the home directory of the environment</LI>
-
-<LI>
-<B>-data_dir <I>directory </I></B>selects the data file directory of the
-environment by calling <A HREF="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</A>.</LI>
-
-<LI>
-<B>-log_dir <I>directory </I></B>selects the log file directory of the
-environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</A>.</LI>
-
-<LI>
-<B>-tmp_dir <I>directory </I></B>selects the temporary file directory of
-the environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</A>.</LI>
-
-<LI>
-<B>-mode <I>mode </I></B>sets the permissions of created files to <B><I>mode</I></B></LI>
-
-<LI>
-<B>-nommap</B> selects the DB_NOMMAP flag to disallow using mmap'ed files</LI>
-
-<LI>
-<B>-private</B> selects the DB_PRIVATE flag for a private environment</LI>
-
-<LI>
-<B>-recover</B> selects the DB_RECOVER flag for recovery</LI>
-
-<LI>
-<B>-recover_fatal</B> selects the DB_RECOVER_FATAL flag for catastrophic
-recovery</LI>
-
-<LI>
-<B>-system_mem</B> selects the DB_SYSTEM_MEM flag to use system memory</LI>
-
-<LI>
-<B>-errfile </B>specifies the error file to use for this environment to
-<B><I>filename</I></B>
-by calling <A HREF="../../docs/api_c/env_set_errfile.html">DBENV->set_errfile</A><B><I>.
-</I></B>If
-the file already exists then we will append to the end of the file</LI>
-
-<LI>
-<B>-use_environ</B> selects the DB_USE_ENVIRON flag to affect file naming</LI>
-
-<LI>
-<B>-use_environ_root</B> selects the DB_USE_ENVIRON_ROOT flag to have the
-root environment affect file naming</LI>
-
-<LI>
-<B>-verbose</B> produces verbose error output for the given which subsystem,
-using the <A HREF="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</A>
-method.&nbsp;&nbsp; See the description of <A HREF="#> <env> verbose which on|off">verbose</A>
-below for valid <B><I>which </I></B>values</LI>
-
-<LI>
-<B>-region_init </B>specifies that the user wants to page fault the region
-in on startup using the <A HREF="../../docs/api_c/env_set_region_init.html">DBENV->set_region_init</A>
-method call</LI>
-
-<LI>
-<B>-cachesize </B>sets the size of the database cache to the size&nbsp;
-specified by <B><I>gbytes </I></B>and <B><I>bytes, </I></B>broken up into
-<B><I>ncaches</I></B>
-number of caches using the <A HREF="../../docs/api_c/env_set_cachesize.html">DBENV->set_cachesize</A>
-method</LI>
-
-<LI>
-<B>-mmapsize </B>sets the size of the database page to <B><I>size </I></B>using
-the <A HREF="../../docs/api_c/env_set_mp_mmapsize.html">DBENV->set_mp_mmapsize</A>
-method</LI>
-
-<LI>
-<B>-log_max </B>sets the maximum size of the log file to <B><I>max</I></B>
-using the <A HREF="../../docs/api_c/env_set_lg_max.html">DBENV->set_lg_max</A>
-call</LI>
-
-<LI>
-<B>-log_buffer </B>sets the size of the log file in bytes to <B><I>size</I></B>
-using the <A HREF="../../docs/api_c/env_set_lg_bsize.html">DBENV->set_lg_bsize</A>
-call</LI>
-
-<LI>
-<B>-lock_conflict </B>sets the number of lock modes to <B><I>nmodes</I></B>
-and sets the locking policy for those modes to the <B><I>conflict_matrix</I></B>
-given using the <A HREF="../../docs/api_c/env_set_lk_conflict.html">DBENV->set_lk_conflict</A>
-method call</LI>
-
-<LI>
-<B>-lock_detect </B>sets the deadlock detection policy to the given policy
-using the <A HREF="../../docs/env_set_lk_detect.html">DBENV->set_lk_detect</A>
-method call.&nbsp; The policy choices are:</LI>
-
-<UL>
-<LI>
-<B>default</B> selects the DB_LOCK_DEFAULT policy for default detection</LI>
-
-<LI>
-<B>oldest </B>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</LI>
-
-<LI>
-<B>random</B> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</LI>
-
-<LI>
-<B>youngest</B> selects DB_LOCK_YOUNGEST to abort the youngest locker on
-a deadlock</LI>
-</UL>
-
-<LI>
-<B>-lock_max </B>sets the maximum size of the lock table to <B><I>max </I></B>using
-the <A HREF="../../docs/api_c/env_set_lk_max.html">DBENV->set_lk_max</A>
-method call</LI>
-
-<LI>
-<B>-lock_max_locks </B>sets the maximum number of locks to <B><I>max </I></B>using
-the <A HREF="../../docs/api_c/env_set_lk_max_locks.html">DBENV->set_lk_max_locks</A>
-method call</LI>
-
-<LI>
-<B>-lock_max_lockers </B>sets the maximum number of locking entities to
-<B><I>max </I></B>using the <A HREF="../../docs/api_c/env_set_lk_max_lockers.html">DBENV->set_lk_max_lockers</A>
-method call</LI>
-
-<LI>
-<B>-lock_max_objects </B>sets the maximum number of simultaneously locked
-objects to <B><I>max </I></B>using the <A HREF="../../docs/api_c/env_set_lk_max_objects.html">DBENV->set_lk_max_objects</A>
-method call</LI>
-
-<LI>
-<B>-txn_max </B>sets the maximum size of the transaction table to <B><I>max</I></B>
-using the <A HREF="../../docs/api_c/env_set_txn_max.html">DBENV->set_txn_max</A>
-method call</LI>
-
-<LI>
-<B>-client_timeout</B> sets the timeout value for the client waiting for
-a reply from the server for RPC operations to <B><I>seconds</I></B>.</LI>
-
-<LI>
-<B>-server_timeout</B> sets the timeout value for the server to determine
-an idle client is gone to <B><I>seconds</I></B>.</LI>
-
-<LI>
-<B>&nbsp;-server </B>specifies the <B><I>hostname</I></B> of the server
-to connect to in the <A HREF="../../docs/api_c/env_set_server.html">DBENV->set_server</A>
-call.</LI>
-</UL>
-This command will invoke the <A HREF="../../docs/api_c/env_create.html">db_env_create</A>
+<ul>
+<li>
+<b>-cdb</b> selects the DB_INIT_CDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-cdb_alldb</b> selects the DB_CDB_ALLDB flag for Concurrent Data Store</li>
+
+<li>
+<b>-lock</b> selects the DB_INIT_LOCK flag for the locking subsystem</li>
+
+<li>
+<b>-log</b> selects the DB_INIT_LOG flag for the logging subsystem</li>
+
+<li>
+<b>-txn</b> selects the DB_INIT_TXN, DB_INIT_LOCK and DB_INIT_LOG flags
+for the transaction subsystem.&nbsp; If <b>nosync</b> is specified, then
+it will also select DB_TXN_NOSYNC to indicate no flushes of log on commits</li>
+
+<li>
+<b>-create </b>selects the DB_CREATE flag to create underlying files</li>
+
+<li>
+<b>-home <i>directory </i></b>selects the home directory of the environment</li>
+
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
+
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
+
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
+
+<li>
+<b>-mode <i>mode </i></b>sets the permissions of created files to <b><i>mode</i></b></li>
+
+<li>
+<b>-nommap</b> selects the DB_NOMMAP flag to disallow using mmap'ed files</li>
+
+<li>
+<b>-private</b> selects the DB_PRIVATE flag for a private environment</li>
+
+<li>
+<b>-recover</b> selects the DB_RECOVER flag for recovery</li>
+
+<li>
+<b>-recover_fatal</b> selects the DB_RECOVER_FATAL flag for catastrophic
+recovery</li>
+
+<li>
+<b>-system_mem</b> selects the DB_SYSTEM_MEM flag to use system memory</li>
+
+<li>
+<b>-errfile </b>specifies the error file to use for this environment to
+<b><i>filename</i></b>
+by calling <a href="../../docs/api_c/env_set_errfile.html">DBENV->set_errfile</a><b><i>.
+</i></b>If
+the file already exists then we will append to the end of the file</li>
+
+<li>
+<b>-use_environ</b> selects the DB_USE_ENVIRON flag to affect file naming</li>
+
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to have the
+root environment affect file naming</li>
+
+<li>
+<b>-verbose</b> produces verbose error output for the given which subsystem,
+using the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
+method.&nbsp;&nbsp; See the description of <a href="#> <env> verbose which on|off">verbose</a>
+below for valid <b><i>which </i></b>values</li>
+
+<li>
+<b>-region_init </b>specifies that the user wants to page fault the region
+in on startup using the <a href="../../docs/api_c/env_set_region_init.html">DBENV->set_region_init</a>
+method call</li>
+
+<li>
+<b>-cachesize </b>sets the size of the database cache to the size&nbsp;
+specified by <b><i>gbytes </i></b>and <b><i>bytes, </i></b>broken up into
+<b><i>ncaches</i></b>
+number of caches using the <a href="../../docs/api_c/env_set_cachesize.html">DBENV->set_cachesize</a>
+method</li>
+
+<li>
+<b>-mmapsize </b>sets the size of the database page to <b><i>size </i></b>using
+the <a href="../../docs/api_c/env_set_mp_mmapsize.html">DBENV->set_mp_mmapsize</a>
+method</li>
+
+<li>
+<b>-log_max </b>sets the maximum size of the log file to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_max.html">DBENV->set_lg_max</a>
+call</li>
+
+<li>
+<b>-log_regionmax </b>sets the size of the log region to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_lg_regionmax.html">DBENV->set_lg_regionmax</a>
+call</li>
+
+<li>
+<b>-log_buffer </b>sets the size of the log file in bytes to <b><i>size</i></b>
+using the <a href="../../docs/api_c/env_set_lg_bsize.html">DBENV->set_lg_bsize</a>
+call</li>
+
+<li>
+<b>-lock_conflict </b>sets the number of lock modes to <b><i>nmodes</i></b>
+and sets the locking policy for those modes to the <b><i>conflict_matrix</i></b>
+given using the <a href="../../docs/api_c/env_set_lk_conflict.html">DBENV->set_lk_conflict</a>
+method call</li>
+
+<li>
+<b>-lock_detect </b>sets the deadlock detection policy to the given policy
+using the <a href="../../docs/env_set_lk_detect.html">DBENV->set_lk_detect</a>
+method call.&nbsp; The policy choices are:</li>
+
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<li>
+<b>-lock_max </b>sets the maximum size of the lock table to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max.html">DBENV->set_lk_max</a>
+method call</li>
+
+<li>
+<b>-lock_max_locks </b>sets the maximum number of locks to <b><i>max </i></b>using
+the <a href="../../docs/api_c/env_set_lk_max_locks.html">DBENV->set_lk_max_locks</a>
+method call</li>
+
+<li>
+<b>-lock_max_lockers </b>sets the maximum number of locking entities to
+<b><i>max
+</i></b>using the <a href="../../docs/api_c/env_set_lk_max_lockers.html">DBENV->set_lk_max_lockers</a>
+method call</li>
+
+<li>
+<b>-lock_max_objects </b>sets the maximum number of simultaneously locked
+objects to <b><i>max </i></b>using the <a href="../../docs/api_c/env_set_lk_max_objects.html">DBENV->set_lk_max_objects</a>
+method call</li>
+
+<li>
+<b>-lock_timeout </b>sets the timeout for locks in the environment</li>
+
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
+
+<li>
+<b>-txn_max </b>sets the maximum size of the transaction table to <b><i>max</i></b>
+using the <a href="../../docs/api_c/env_set_txn_max.html">DBENV->set_txn_max</a>
+method call</li>
+
+<li>
+<b>-txn_timeout </b>sets the timeout for transactions in the environment</li>
+
+<li>
+<b>-client_timeout</b> sets the timeout value for the client waiting for
+a reply from the server for RPC operations to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server_timeout</b> sets the timeout value for the server to determine
+an idle client is gone to <b><i>seconds</i></b>.</li>
+
+<li>
+<b>-server </b>specifies the <b><i>hostname</i></b> of the server
+to connect to in the <a href="../../docs/api_c/env_set_server.html">DBENV->set_server</a>
+call.</li>
+
+<li>
+<b>-rep_client </b>sets the newly created environment to be a
+replication client, using the <a href="../../docs/api_c/rep_client.html">
+DBENV->rep_client</a> call.</li>
+
+<li>
+<b>-rep_master </b>sets the newly created environment to be a
+replication master, using the <a href="../../docs/api_c/rep_master.html">
+DBENV->rep_master</a> call.</li>
+
+<li>
+<b>-rep_transport </b>specifies the replication transport function,
+using the
+<a href="../../docs/api_c/rep_transport.html">DBENV->set_rep_transport</a>
+call. This site's machine ID is set to <b><i>machineid</i></b> and
+the send function, a Tcl proc, is set to <b><i>sendproc</i></b>.</li>
+
+</ul>
+
+This command will invoke the <a href="../../docs/api_c/env_create.html">db_env_create</a>
function.&nbsp; After it successfully gets a handle to an environment,
-we bind it to a new Tcl command of the form <B><I>envX</I></B>, where X
-is an integer starting at&nbsp; 0 (e.g. <B>env0, env1, </B>etc).&nbsp;
-We use the <I>Tcl_CreateObjCommand()</I> to create the top level environment
+we bind it to a new Tcl command of the form <b><i>envX</i></b>, where X
+is an integer starting at&nbsp; 0 (e.g. <b>env0, env1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level environment
command function.&nbsp; It is through this handle that the user can access
-all the commands described in the <A HREF="#Environment Commands">Environment
-Commands</A> section.&nbsp; Internally, the handle we get back from DB
-will be stored as the <I>ClientData</I> portion of the new command set
+all the commands described in the <a href="#Environment Commands">Environment
+Commands</a> section.&nbsp; Internally, the handle we get back from DB
+will be stored as the <i>ClientData</i> portion of the new command set
so that all future environment calls will have that handle readily available.&nbsp;
-Then we call the <A HREF="../../docs/api_c/env_open.html">DBENV->open</A>
+Then we call the <a href="../../docs/api_c/env_open.html">DBENV->open</a>
method call and possibly some number of setup calls as described above.
-<P>
-<HR WIDTH="100%">
-<BR><A NAME="> <env> verbose which on|off"></A><B>> &lt;env> verbose <I>which</I>
-on|off</B>
-<P>This command controls the use of debugging output for the environment.&nbsp;
-This command directly translates to a call to the <A HREF="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</A>
+<p>
+<hr WIDTH="100%">
+<br><a NAME="> <env> verbose which on|off"></a><b>> &lt;env> verbose <i>which</i>
+on|off</b>
+<p>This command controls the use of debugging output for the environment.&nbsp;
+This command directly translates to a call to the <a href="../../docs/api_c/dbenv_set_verbose.html">DBENV->set_verbose</a>
method call.&nbsp; It returns either a 0 (for success), a DB error message
or it throws a Tcl error with a system message.&nbsp; The user specifies
-<B><I>which</I></B>
+<b><i>which</i></b>
subsystem to control, and indicates whether debug messages should be turned
-<B>on</B>
-or <B>off</B> for that subsystem.&nbsp; The value of <B><I>which</I></B>
+<b>on</b>
+or <b>off</b> for that subsystem.&nbsp; The value of <b><i>which</i></b>
must be one of the following:
-<UL>
-<LI>
-<B>chkpt</B> - Chooses the checkpointing code by using the DB_VERB_CHKPOINT
-value</LI>
-
-<LI>
-<B>deadlock </B>- Chooses the deadlocking code by using the DB_VERB_DEADLOCK
-value</LI>
-
-<LI>
-<B>recovery </B>- Chooses the recovery code by using the DB_VERB_RECOVERY
-value</LI>
-
-<LI>
-<B>wait </B>- Chooses the waitsfor code by using the DB_VERB_WAITSFOR value</LI>
-</UL>
-
-<HR WIDTH="100%">
-<P><A NAME="> <env> close"></A><B>> &lt;env> close</B>
-<P>This command closes an environment and deletes the handle.&nbsp; This
-command directly translates to a call to the <A HREF="../../docs/api_c/env_close.html">DBENV->close</A>
+<ul>
+<li>
+<b>chkpt</b> - Chooses the checkpointing code by using the DB_VERB_CHKPOINT
+value</li>
+
+<li>
+<b>deadlock </b>- Chooses the deadlocking code by using the DB_VERB_DEADLOCK
+value</li>
+
+<li>
+<b>recovery </b>- Chooses the recovery code by using the DB_VERB_RECOVERY
+value</li>
+
+<li>
+<b>wait </b>- Chooses the waitsfor code by using the DB_VERB_WAITSFOR value</li>
+</ul>
+
+<hr WIDTH="100%">
+<p><a NAME="> <env> close"></a><b>> &lt;env> close</b>
+<p>This command closes an environment and deletes the handle.&nbsp; This
+command directly translates to a call to the <a href="../../docs/api_c/env_close.html">DBENV->close</a>
method call.&nbsp; It returns either a 0 (for success), a DB error message
or it throws a Tcl error with a system message.
-<P>Additionally, since the handle is no longer valid, we will call <I>Tcl_DeleteCommand()
-</I>so
+<p>Additionally, since the handle is no longer valid, we will call <i>Tcl_DeleteCommand()
+</i>so
that further uses of the handle will be dealt with properly by Tcl itself.
-<P>Also, the close command will automatically abort any <A HREF="txn.html">transactions</A>
-and close any <A HREF="mpool.html">mpool</A> memory files.&nbsp; As such
+<p>Also, the close command will automatically abort any <a href="txn.html">transactions</a>
+and close any <a href="mpool.html">mpool</a> memory files.&nbsp; As such
we must maintain a list of open transaction and mpool handles so that we
-can call <I>Tcl_DeleteCommand</I> on those as well.
-<P>
-<HR WIDTH="100%">
-<BR><B>> berkdb envremove [-data_dir <I>directory</I>] [-force] [-home
-<I>directory</I>]
--log_dir <I>directory</I>] [-tmp_dir <I>directory</I>] [-use_environ] [-use_environ_root]</B>
-<P>This command removes the environment if it is not in use and deletes
-the handle.&nbsp; This command directly translates to a call to the <A HREF="../../docs/api_c/env_remove.html">DBENV->remove</A>
+can call <i>Tcl_DeleteCommand</i> on those as well.
+<p>
+<hr WIDTH="100%">
+
+<b>> berkdb envremove<br>
+[-data_dir <i>directory</i>]<br>
+[-force]<br>
+[-home <i>directory</i>]<br>
+[-log_dir <i>directory</i>]<br>
+[-overwrite]<br>
+[-tmp_dir <i>directory</i>]<br>
+[-use_environ]<br>
+[-use_environ_root]</b>
+
+<p>This command removes the environment if it is not in use and deletes
+the handle.&nbsp; This command directly translates to a call to the <a href="../../docs/api_c/env_remove.html">DBENV->remove</a>
method call.&nbsp; It returns either a 0 (for success), a DB error message
or it throws a Tcl error with a system message.&nbsp; The arguments are:
-<UL>
-<LI>
-<B>-force</B> selects the DB_FORCE flag to remove even if other processes
-have the environment open</LI>
+<ul>
+<li>
+<b>-force</b> selects the DB_FORCE flag to remove even if other processes
+have the environment open</li>
+
+<li>
+<b>-home <i>directory</i> </b>specifies the home directory of the environment</li>
-<LI>
-<B>-home <I>directory</I> </B>specifies the home directory of the environment</LI>
+<li>
+<b>-data_dir <i>directory </i></b>selects the data file directory of the
+environment by calling <a href="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</a>.</li>
-<LI>
-<B>-data_dir <I>directory </I></B>selects the data file directory of the
-environment by calling <A HREF="../../docs/api_c/env_set_data_dir.html">DBENV->set_data_dir</A>.</LI>
+<li>
+<b>-log_dir <i>directory </i></b>selects the log file directory of the
+environment&nbsp; by calling <a href="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</a>.</li>
-<LI>
-<B>-log_dir <I>directory </I></B>selects the log file directory of the
-environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_lg_dir.html">DBENV->set_lg_dir</A>.</LI>
+<li>
+<b>-overwrite </b>sets DB_OVERWRITE flag</li>
-<LI>
-<B>-tmp_dir <I>directory </I></B>selects the temporary file directory of
-the environment&nbsp; by calling <A HREF="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</A>.</LI>
+<li>
+<b>-tmp_dir <i>directory </i></b>selects the temporary file directory of
+the environment&nbsp; by calling <a href="../../docs/api_c/env_set_tmp_dir.so">DBENV->set_tmp_dir</a>.</li>
-<LI>
-<B>-use_environ </B>selects the DB_USE_ENVIRON flag to affect file naming</LI>
+<li>
+<b>-use_environ </b>selects the DB_USE_ENVIRON flag to affect file naming</li>
-<LI>
-<B>-use_environ_root</B> selects the DB_USE_ENVIRON_ROOT flag to affect
-file naming</LI>
-</UL>
+<li>
+<b>-use_environ_root</b> selects the DB_USE_ENVIRON_ROOT flag to affect
+file naming</li>
+</ul>
-</BODY>
-</HTML>
+</body>
+</html>
diff --git a/bdb/tcl/docs/historic.html b/bdb/tcl/docs/historic.html
index 216dc456b72..85f474fbc0f 100644
--- a/bdb/tcl/docs/historic.html
+++ b/bdb/tcl/docs/historic.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
diff --git a/bdb/tcl/docs/index.html b/bdb/tcl/docs/index.html
index 2866c1e23db..845b6ca81e2 100644
--- a/bdb/tcl/docs/index.html
+++ b/bdb/tcl/docs/index.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
@@ -29,6 +30,9 @@ Complete Tcl Interface for Berkeley DB</H1></CENTER>
<A HREF="./mpool.html">Memory Pool commands</A></LI>
<LI>
+<A HREF="./rep.html">Replication commands</A></LI>
+
+<LI>
<A HREF="./txn.html">Transaction commands</A></LI>
</UL>
diff --git a/bdb/tcl/docs/library.html b/bdb/tcl/docs/library.html
index abd656d8e5d..bfb1588c3f2 100644
--- a/bdb/tcl/docs/library.html
+++ b/bdb/tcl/docs/library.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
diff --git a/bdb/tcl/docs/lock.html b/bdb/tcl/docs/lock.html
index 87a20e9a6bf..d65142b798b 100644
--- a/bdb/tcl/docs/lock.html
+++ b/bdb/tcl/docs/lock.html
@@ -1,187 +1,207 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
- <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
- <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
-</HEAD>
-<BODY>
-
-<H2>
-<A NAME="Locking Commands"></A>Locking Commands</H2>
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
+
+<h2>
+<a NAME="Locking Commands"></a>Locking Commands</h2>
Most locking commands work with the environment handle.&nbsp; However,
when a user gets a lock we create a new lock handle that they then use
with in a similar manner to all the other handles to release the lock.&nbsp;
We present the general locking functions first, and then those that manipulate
locks.
-<P><B>> &lt;env> lock_detect [-lock_conflict] [default|oldest|youngest|random]</B>
-<P>This command runs the deadlock detector.&nbsp; It directly translates
-to the <A HREF="../../docs/api_c/lock_detect.html">lock_detect</A> DB call.&nbsp;
+<p><b>> &lt;env> lock_detect [default|oldest|youngest|random]</b>
+<p>This command runs the deadlock detector.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_detect.html">lock_detect</a> DB call.&nbsp;
It returns either a 0 (for success), a DB error message or it throws a
Tcl error with a system message.&nbsp; The first argument sets the policy
for deadlock as follows:
-<UL>
-<LI>
-<B>default</B> selects the DB_LOCK_DEFAULT policy for default detection
-(default if not specified)</LI>
-
-<LI>
-<B>oldest </B>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</LI>
-
-<LI>
-<B>random</B> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</LI>
-
-<LI>
-<B>youngest</B> selects DB_LOCK_YOUNGEST to abort the youngest locker on
-a deadlock</LI>
-</UL>
-The second argument, <B>-lock_conflict</B>, selects the DB_LOCK_CONFLICT
-flag to only run the detector if a lock conflict has occurred since the
-last time the detector was run.
-<HR WIDTH="100%">
-<BR><B>> &lt;env> lock_stat</B>
-<P>This command returns a list of name/value pairs where the names correspond
+<ul>
+<li>
+<b>default</b> selects the DB_LOCK_DEFAULT policy for default detection
+(default if not specified)</li>
+
+<li>
+<b>oldest </b>selects DB_LOCK_OLDEST to abort the oldest locker on a deadlock</li>
+
+<li>
+<b>random</b> selects DB_LOCK_RANDOM to abort a random locker on a deadlock</li>
+
+<li>
+<b>youngest</b> selects DB_LOCK_YOUNGEST to abort the youngest locker on
+a deadlock</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> lock_stat</b>
+<p>This command returns a list of name/value pairs where the names correspond
to the C-structure field names of DB_LOCK_STAT and the values are the data
-returned.&nbsp; This command is a direct translation of the <A HREF="../../docs/api_c/lock_stat.html">lock_stat</A>
+returned.&nbsp; This command is a direct translation of the <a href="../../docs/api_c/lock_stat.html">lock_stat</a>
DB call.
-<HR WIDTH="100%">
-<BR><A NAME="> <env> lock_id"></A><B>> &lt;env> lock_id</B>
-<P>This command returns a unique locker ID value.&nbsp; It directly translates
-to the <A HREF="../../docs/api_c/lock_id.html">lock_id</A> DB call.
-<HR WIDTH="100%">
-<BR><A NAME="> <env> lock_get"></A><B>> &lt;env> lock_get [-nowait]<I>lockmode
-locker obj</I></B>
-<P>This command gets a lock. It will invoke the <A HREF="../../docs/api_c/lock_get.html">lock_get</A>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id</b>
+<p>This command returns a unique locker ID value.&nbsp; It directly translates
+to the <a href="../../docs/api_c/lock_id.html">lock_id</a> DB call.
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_free&nbsp; </b><i>locker</i>
+<p>This command frees the locker allockated by the lock_id call. It directly
+translates to the&nbsp; <a href="../../docs/api_c/lock_id.html">lock_id_free
+</a>DB
+call.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_id"></a><b>> &lt;env> lock_id_set&nbsp; </b><i>current
+max</i>
+<p>This&nbsp; is a diagnostic command to set the locker id that will get
+allocated next and the maximum id that
+<br>will trigger the id reclaim algorithm.
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_get"></a><b>> &lt;env> lock_get [-nowait]<i>lockmode
+locker obj</i></b>
+<p>This command gets a lock. It will invoke the <a href="../../docs/api_c/lock_get.html">lock_get</a>
function.&nbsp; After it successfully gets a handle to a lock, we bind
-it to a new Tcl command of the form <B><I>$env.lockX</I></B>, where X is
-an integer starting at&nbsp; 0 (e.g. <B>$env.lock0, $env.lock1, </B>etc).&nbsp;
-We use the <I>Tcl_CreateObjCommand()</I> to create the top level locking
+it to a new Tcl command of the form <b><i>$env.lockX</i></b>, where X is
+an integer starting at&nbsp; 0 (e.g. <b>$env.lock0, $env.lock1, </b>etc).&nbsp;
+We use the <i>Tcl_CreateObjCommand()</i> to create the top level locking
command function.&nbsp; It is through this handle that the user can release
the lock.&nbsp; Internally, the handle we get back from DB will be stored
-as the <I>ClientData</I> portion of the new command set so that future
+as the <i>ClientData</i> portion of the new command set so that future
locking calls will have that handle readily available.
-<P>The arguments are:
-<UL>
-<LI>
-<B><I>locker</I></B> specifies the locker ID returned from the <A HREF="#> <env> lock_id">lock_id</A>
-command</LI>
+<p>The arguments are:
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
-<LI>
-<B><I>obj</I></B> specifies an object to lock</LI>
+<li>
+<b><i>obj</i></b> specifies an object to lock</li>
-<LI>
-the <B><I>lock mode</I></B> is specified as one of the following:</LI>
+<li>
+the <b><i>lock mode</i></b> is specified as one of the following:</li>
-<UL>
-<LI>
-<B>ng </B>specifies DB_LOCK_NG for not granted (always 0)</LI>
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
-<LI>
-<B>read</B> specifies DB_LOCK_READ for a read (shared) lock</LI>
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
-<LI>
-<B>write</B> specifies DB_LOCK_WRITE for an exclusive write lock</LI>
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
-<LI>
-<B>iwrite </B>specifies DB_LOCK_IWRITE for intent for exclusive write lock</LI>
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
-<LI>
-<B>iread </B>specifies DB_LOCK_IREAD for intent for shared read lock</LI>
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
-<LI>
-<B>iwr </B>specifies DB_LOCK_IWR for intent for eread and write lock</LI>
-</UL>
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
-<LI>
-<B>-nowait</B> selects the DB_LOCK_NOWAIT to indicate that we do not want
-to wait on the lock</LI>
-</UL>
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
+</ul>
-<HR WIDTH="100%">
-<BR><B>> &lt;lock> put</B>
-<P>This command releases the lock referenced by the command.&nbsp; It is
-a direct translation of the <A HREF="../../docs/api_c/lock_put.html">lock_put</A>
+<hr WIDTH="100%">
+<br><b>> &lt;lock> put</b>
+<p>This command releases the lock referenced by the command.&nbsp; It is
+a direct translation of the <a href="../../docs/api_c/lock_put.html">lock_put</a>
function.&nbsp; It returns either a 0 (for success), a DB error message
or it throws a Tcl error with a system message.&nbsp; Additionally, since
the handle is no longer valid, we will call
-<I>Tcl_DeleteCommand()
-</I>so
+<i>Tcl_DeleteCommand()
+</i>so
that further uses of the handle will be dealt with properly by Tcl itself.
-<BR>
-<HR WIDTH="100%">
-<BR><A NAME="> <env> lock_vec"></A><B>> &lt;env> lock_vec [-nowait] <I>locker
-</I>{get|put|put_all|put_obj
-[<I>obj</I>] [<I>lockmode</I>] [<I>lock</I>]} ...</B>
-<P>This command performs a series of lock calls.&nbsp; It is a direct translation
-of the <A HREF="../../docs/api_c/lock_vec.html">lock_vec</A> function.&nbsp;
+<br>
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_vec [-nowait] <i>locker
+</i>{get|put|put_all|put_obj
+[<i>obj</i>] [<i>lockmode</i>] [<i>lock</i>]} ...</b>
+<p>This command performs a series of lock calls.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/lock_vec.html">lock_vec</a> function.&nbsp;
This command will return a list of the return values from each operation
specified in the argument list.&nbsp; For the 'put' operations the entry
in the return value list is either a 0 (for success) or an error.&nbsp;
-For the 'get' operation, the entry is the lock widget handle, <B>$env.lockN</B>
-(as described above in <A HREF="#> <env> lock_get">&lt;env> lock_get</A>)
+For the 'get' operation, the entry is the lock widget handle, <b>$env.lockN</b>
+(as described above in <a href="#> <env> lock_get">&lt;env> lock_get</a>)
or an error.&nbsp; If an error occurs, the return list will contain the
return values for all the successful operations up the erroneous one and
the error code for that operation.&nbsp; Subsequent operations will be
ignored.
-<P>As for the other operations, if we are doing a 'get' we will create
+<p>As for the other operations, if we are doing a 'get' we will create
the commands and if we are doing a 'put' we will have to delete the commands.&nbsp;
Additionally, we will have to do this after the call to the DB lock_vec
and iterate over the results, creating and/or deleting Tcl commands.&nbsp;
It is possible that we may return a lock widget from a get operation that
-is considered invalid, if, for instance, there was a <B>put_all</B> operation
+is considered invalid, if, for instance, there was a <b>put_all</b> operation
performed later in the vector of operations.&nbsp; The arguments are:
-<UL>
-<LI>
-<B><I>locker</I></B> specifies the locker ID returned from the <A HREF="#> <env> lock_id">lock_id</A>
-command</LI>
+<ul>
+<li>
+<b><i>locker</i></b> specifies the locker ID returned from the <a href="#> <env> lock_id">lock_id</a>
+command</li>
-<LI>
-<B>-nowait</B> selects the DB_LOCK_NOWAIT to indicate that we do not want
-to wait on the lock</LI>
+<li>
+<b>-nowait</b> selects the DB_LOCK_NOWAIT to indicate that we do not want
+to wait on the lock</li>
-<LI>
+<li>
the lock vectors are tuple consisting of {an operation, lock object, lock
-mode, lock handle} where what is required is based on the operation desired:</LI>
-
-<UL>
-<LI>
-<B>get</B> specifes DB_LOCK_GET to get a lock.&nbsp; Requires a tuple <B>{get
-<I>obj</I>
-<I>mode</I>}
-</B>where
-<B><I>mode</I></B>
-is:</LI>
-
-<UL>
-<LI>
-<B>ng </B>specifies DB_LOCK_NG for not granted (always 0)</LI>
-
-<LI>
-<B>read</B> specifies DB_LOCK_READ for a read (shared) lock</LI>
-
-<LI>
-<B>write</B> specifies DB_LOCK_WRITE for an exclusive write lock</LI>
-
-<LI>
-<B>iwrite </B>specifies DB_LOCK_IWRITE for intent for exclusive write lock</LI>
-
-<LI>
-<B>iread </B>specifies DB_LOCK_IREAD for intent for shared read lock</LI>
-
-<LI>
-<B>iwr </B>specifies DB_LOCK_IWR for intent for eread and write lock</LI>
-</UL>
-
-<LI>
-<B>put</B> specifies DB_LOCK_PUT to release a <B><I>lock</I></B>.&nbsp;
-Requires a tuple <B>{put <I>lock}</I></B></LI>
-
-<LI>
-<B>put_all </B>specifies DB_LOCK_PUT_ALL to release all locks held by <B><I>locker</I></B>.&nbsp;
-Requires a tuple <B>{put_all}</B></LI>
-
-<LI>
-<B>put_obj</B> specifies DB_LOCK_PUT_OBJ to release all locks held by <B><I>locker</I></B>
-associated with the given <B><I>obj</I></B>.&nbsp; Requires a tuple <B>{put_obj
-<I>obj</I>}</B></LI>
-</UL>
-</UL>
+mode, lock handle} where what is required is based on the operation desired:</li>
+
+<ul>
+<li>
+<b>get</b> specifes DB_LOCK_GET to get a lock.&nbsp; Requires a tuple <b>{get
+<i>objmode</i>}
+</b>where
+<b><i>mode</i></b>
+is:</li>
+
+<ul>
+<li>
+<b>ng </b>specifies DB_LOCK_NG for not granted (always 0)</li>
+
+<li>
+<b>read</b> specifies DB_LOCK_READ for a read (shared) lock</li>
+
+<li>
+<b>write</b> specifies DB_LOCK_WRITE for an exclusive write lock</li>
+
+<li>
+<b>iwrite </b>specifies DB_LOCK_IWRITE for intent for exclusive write lock</li>
+
+<li>
+<b>iread </b>specifies DB_LOCK_IREAD for intent for shared read lock</li>
+
+<li>
+<b>iwr </b>specifies DB_LOCK_IWR for intent for eread and write lock</li>
+</ul>
+
+<li>
+<b>put</b> specifies DB_LOCK_PUT to release a <b><i>lock</i></b>.&nbsp;
+Requires a tuple <b>{put <i>lock}</i></b></li>
+
+<li>
+<b>put_all </b>specifies DB_LOCK_PUT_ALL to release all locks held by <b><i>locker</i></b>.&nbsp;
+Requires a tuple <b>{put_all}</b></li>
+
+<li>
+<b>put_obj</b> specifies DB_LOCK_PUT_OBJ to release all locks held by <b><i>locker</i></b>
+associated with the given <b><i>obj</i></b>.&nbsp; Requires a tuple <b>{put_obj
+<i>obj}</i></b></li>
+</ul>
+</ul>
+
+<hr WIDTH="100%">
+<br><a NAME="> <env> lock_vec"></a><b>> &lt;env> lock_timeout <i>timeout</i></b>
+<p>This command sets the lock timeout for all future locks in this environment.&nbsp;
+The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/bdb/tcl/docs/log.html b/bdb/tcl/docs/log.html
index 35ecfc2f5f5..49f2f0ad2e0 100644
--- a/bdb/tcl/docs/log.html
+++ b/bdb/tcl/docs/log.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
@@ -100,7 +101,7 @@ given <B><I>lsn</I></B></LI>
<HR WIDTH="100%">
<BR><A NAME="> <env> log_put"></A><B>> &lt;env> log_put<I> </I>[-checkpoint]
-[-curlsn] [-flush] <I>record</I></B>
+[-flush] <I>record</I></B>
<P>This command stores a <B><I>record</I></B> into the log and returns
the LSN of the log record.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_put.html">log_put</A>
function.&nbsp; It returns either an LSN or it throws a Tcl error with
@@ -110,29 +111,10 @@ a system message.&nbsp;<B> </B>The arguments are:
<B>-checkpoint </B>selects the DB_CHECKPOINT flag</LI>
<LI>
-<B>-curlsn</B> selects the DB_CURLSN flag to return the LSN of the next
-record</LI>
-
-<LI>
<B>-flush </B>selects the DB_FLUSH flag to flush the log to disk.</LI>
</UL>
<HR WIDTH="100%">
-<BR><A NAME="> <env> log_register"></A><B>> &lt;env> log_register <I>db</I>
-<I>file</I></B>
-<P>This command registers a <B><I>file</I></B> and <B><I>db</I></B> with
-the log manager.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_register.html">log_register</A>
-function.&nbsp; It returns either a 0 (for success), a DB error message
-or it throws a Tcl error with a system message.
-<BR>
-<HR WIDTH="100%">
-<BR><A NAME="> <env> log_unregister"></A><B>> &lt;env> log_unregister <I>db</I></B>
-<P>This command unregisters the file specified by the database handle <B><I>db
-</I></B>from the log manager.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_unregister.html">log_unregister</A>
-function.&nbsp; It returns either a 0 (for success), a DB error message
-or it throws a Tcl error with a system message.
-<BR>
-<HR WIDTH="100%">
<BR><B>> &lt;env> log_stat</B>
<P>This command returns&nbsp; the statistics associated with the logging
subsystem.&nbsp; It is a direct call to the <A HREF="../../docs/api_c/log_stat.html">log_stat</A>
diff --git a/bdb/tcl/docs/mpool.html b/bdb/tcl/docs/mpool.html
index 666219306ca..7f2359b36e9 100644
--- a/bdb/tcl/docs/mpool.html
+++ b/bdb/tcl/docs/mpool.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
diff --git a/bdb/tcl/docs/rep.html b/bdb/tcl/docs/rep.html
new file mode 100644
index 00000000000..079fe443a63
--- /dev/null
+++ b/bdb/tcl/docs/rep.html
@@ -0,0 +1,51 @@
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <title>Replication commands</title>
+</head>
+<body>
+
+<h2>
+<a NAME="Replication Commands"></a>Replication Commands</h2>
+Replication commands are invoked from the environment handle, after
+it has been opened with the appropriate flags defined
+<a href="./env.html">here</a>.<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> rep_process_message <i>machid</i> <i>control</i>
+<i>rec</i></b>
+<p>This command processes a single incoming replication message.&nbsp; It
+is a direct translation of the <a
+href="../../docs/api_c/rep_process_message.html">rep_process_message</a>
+function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>machid </b>is the machine ID of the machine that <i>sent</i> this
+message.</li>
+
+<li>
+<b>control</b> is a binary string containing the exact contents of the
+<b><i>control</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+
+<li>
+<b>rec</b> is a binary string containing the exact contents of the
+<b><i>rec</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> rep_elect <i>nsites</i> <i>pri</i> <i>wait</i>
+<i>sleep</i></b>
+<p>This command causes a replication election.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/rep_elect.html">rep_elect</a> function.&nbsp;
+Its arguments, all integers, correspond exactly to that C function's
+parameters.
+It will return a list containing two integers, which contain,
+respectively, the integer values returned in the C function's
+<i><b>midp</b></i> and <i><b>selfp</b></i> parameters.
+</body>
+</html>
diff --git a/bdb/tcl/docs/test.html b/bdb/tcl/docs/test.html
index 10cf09efba7..603ae56a51e 100644
--- a/bdb/tcl/docs/test.html
+++ b/bdb/tcl/docs/test.html
@@ -1,4 +1,5 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
diff --git a/bdb/tcl/docs/txn.html b/bdb/tcl/docs/txn.html
index 863c9a875e6..07c88c0fe1d 100644
--- a/bdb/tcl/docs/txn.html
+++ b/bdb/tcl/docs/txn.html
@@ -1,56 +1,67 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
- <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
- <META NAME="GENERATOR" CONTENT="Mozilla/4.08 [en] (X11; I; FreeBSD 2.2.8-19990120-SNAP i386) [Netscape]">
-</HEAD>
-<BODY>
+<!--Copyright 1999-2002 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.75 [en] (X11; U; Linux 2.2.16-22 i686) [Netscape]">
+</head>
+<body>
-<H2>
-<A NAME="Transaction Commands"></A>Transaction Commands</H2>
+<h2>
+<a NAME="Transaction Commands"></a>Transaction Commands</h2>
Transactions are used in a manner similar to the other subsystems.&nbsp;
We create a handle to the transaction and&nbsp; then use it for a variety
of operations.&nbsp; Some of the transaction commands use the environment
instead.&nbsp; Those are presented first.&nbsp; The transaction command
handle returned is the handle used by the various commands that can be
-transaction protected, such as <A HREF="../../docs/api_tcl/db_cursor.html">cursors</A>.<BR>
-
-<HR WIDTH="100%">
-<P><B>> &lt;env> txn_checkpoint [-kbyte <I>kb</I>] [-min <I>min</I>]</B>
-<P>This command causes a checkpoint of the transaction region.&nbsp; It
-is a direct translation of the <A HREF="../../docs/api_c/txn_checkpoint.html">txn_checkpoint
-</A>function.&nbsp;
+transaction protected, such as <a href="../../docs/api_tcl/db_cursor.html">cursors</a>.
+<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> txn_checkpoint [-kbyte <i>kb</i>] [-min <i>min</i>]</b>
+<p>This command causes a checkpoint of the transaction region.&nbsp; It
+is a direct translation of the <a href="../../docs/api_c/txn_checkpoint.html">txn_checkpoint
+</a>function.&nbsp;
It returns either a 0 (for success), a DB error message or it throws a
Tcl error with a system message.&nbsp; The arguments are:
-<UL>
-<LI>
-<B>-kbyte </B>causes the checkpoint to occur only if <B><I>kb</I></B> kilobytes
-of log data has been written since the last checkpoint</LI>
+<ul>
+<li>
+<b>-kbyte </b>causes the checkpoint to occur only if <b><i>kb</i></b> kilobytes
+of log data has been written since the last checkpoint</li>
-<LI>
-<B>-min</B> causes the checkpoint to occur only if <B><I>min</I></B> minutes
-have passed since the last checkpoint</LI>
-</UL>
+<li>
+<b>-min</b> causes the checkpoint to occur only if <b><i>min</i></b> minutes
+have passed since the last checkpoint</li>
+</ul>
-<HR WIDTH="100%">
-<BR><B>> &lt;env> txn_stat</B>
-<P>This command returns transaction statistics.&nbsp; It is a direct translation
-of the <A HREF="../../docs/api_c/txn_stat.html">txn_stat</A> function.&nbsp;
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_stat</b>
+<p>This command returns transaction statistics.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/txn_stat.html">txn_stat</a> function.&nbsp;
It will return a list of name/value pairs that correspond to the DB_TXN_STAT
structure.
-<HR WIDTH="100%">
-<BR><B>>&nbsp; &lt;txn> id</B>
-<P>This command returns the transaction id.&nbsp; It is a direct call to
-the <A HREF="../../docs/api_c/txn_id.html">txn_id</A> function.&nbsp; The
-typical use of this identifier is as the <B><I>locker</I></B> value for
-the <A HREF="lock.html">lock_get</A> and <A HREF="lock.html">lock_vec</A>
+<hr WIDTH="100%">
+<br><b>> &lt;env> txn_id_set&nbsp;</b><i> current max</i>
+<p>This is a diagnosic command that sets the next transaction id to be
+allocated and the maximum transaction
+<br>id, which is the point at which the relcaimation algorthm is triggered.
+<hr WIDTH="100%">
+<br><b>>&nbsp; &lt;txn> id</b>
+<p>This command returns the transaction id.&nbsp; It is a direct call to
+the <a href="../../docs/api_c/txn_id.html">txn_id</a> function.&nbsp; The
+typical use of this identifier is as the <b><i>locker</i></b> value for
+the <a href="lock.html">lock_get</a> and <a href="lock.html">lock_vec</a>
calls.
-<HR WIDTH="100%">
-<BR><B>> &lt;txn> prepare</B>
-<P>This command initiates a two-phase commit.&nbsp; It is a direct call
-to the <A HREF="../../docs/api_c/txn_prepare.html">txn_prepare</A> function.&nbsp;
+<hr WIDTH="100%">
+<br><b>> &lt;txn> prepare</b>
+<p>This command initiates a two-phase commit.&nbsp; It is a direct call
+to the <a href="../../docs/api_c/txn_prepare.html">txn_prepare</a> function.&nbsp;
It returns either a 0 (for success), a DB error message or it throws a
Tcl error with a system message.
-<HR WIDTH="100%">
-</BODY>
-</HTML>
+<hr WIDTH="100%"><a NAME="> <env> lock_vec"></a><b>> &lt;env> txn_timeout
+<i>timeout</i></b>
+<p>This command sets thetransaction timeout for transactions started in
+the future in this environment.&nbsp; The timeout is in micorseconds.
+<br>&nbsp;
+<br>&nbsp;
+</body>
+</html>
diff --git a/bdb/tcl/tcl_compat.c b/bdb/tcl/tcl_compat.c
index 41caee95cc7..e77bc32aedf 100644
--- a/bdb/tcl/tcl_compat.c
+++ b/bdb/tcl/tcl_compat.c
@@ -1,16 +1,18 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_compat.c,v 11.22 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: tcl_compat.c,v 11.39 2002/08/15 14:05:38 bostic Exp $";
#endif /* not lint */
+#if CONFIG_TEST
+
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
@@ -23,12 +25,7 @@ static const char revid[] = "$Id: tcl_compat.c,v 11.22 2001/01/11 18:19:55 bosti
#define DB_DBM_HSEARCH 1
#include "db_int.h"
-#include "tcl_db.h"
-
-/*
- * Prototypes for procedures defined later in this file:
- */
-static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+#include "dbinc/tcl_db.h"
/*
* bdb_HCommand --
@@ -91,7 +88,7 @@ bdb_HCommand(interp, objc, objv)
if (result == TCL_OK) {
_debug_check();
ret = hcreate(nelem) == 0 ? 1: 0;
- _ReturnSetup(interp, ret, "hcreate");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "hcreate");
}
break;
case HHSEARCH:
@@ -104,17 +101,17 @@ bdb_HCommand(interp, objc, objv)
}
item.key = Tcl_GetStringFromObj(objv[2], NULL);
item.data = Tcl_GetStringFromObj(objv[3], NULL);
- action = 0;
if (Tcl_GetIndexFromObj(interp, objv[4], srchacts,
"action", TCL_EXACT, &actindex) != TCL_OK)
return (IS_HELP(objv[4]));
switch ((enum srchacts)actindex) {
- case ACT_FIND:
- action = FIND;
- break;
case ACT_ENTER:
action = ENTER;
break;
+ default:
+ case ACT_FIND:
+ action = FIND;
+ break;
}
_debug_check();
hres = hsearch(item, action);
@@ -182,7 +179,7 @@ bdb_NdbmOpen(interp, objc, objv, dbpp)
};
u_int32_t open_flags;
- int endarg, i, mode, optindex, read_only, result;
+ int endarg, i, mode, optindex, read_only, result, ret;
char *arg, *db;
result = TCL_OK;
@@ -281,7 +278,9 @@ bdb_NdbmOpen(interp, objc, objv, dbpp)
open_flags |= O_RDWR;
_debug_check();
if ((*dbpp = dbm_open(db, open_flags, mode)) == NULL) {
- result = _ReturnSetup(interp, Tcl_GetErrno(), "db open");
+ ret = Tcl_GetErrno();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db open");
goto error;
}
return (TCL_OK);
@@ -335,10 +334,13 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
STINSERT, STREPLACE
};
datum key, data;
- int cmdindex, stindex, result, ret;
+ void *dtmp, *ktmp;
+ u_int32_t size;
+ int cmdindex, freedata, freekey, stindex, result, ret;
char *name, *t;
result = TCL_OK;
+ freekey = freedata = 0;
/*
* Get the command name index from the object based on the cmds
* defined above. This SHOULD NOT fail because we already checked
@@ -365,7 +367,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
"Bad interface flag for command", TCL_STATIC);
return (TCL_ERROR);
}
- _ReturnSetup(interp, ret, "dbmclose");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbmclose");
break;
case DBMINIT:
/*
@@ -383,7 +385,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
TCL_STATIC);
return (TCL_ERROR);
}
- _ReturnSetup(interp, ret, "dbminit");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "dbminit");
break;
case DBMFETCH:
/*
@@ -393,7 +395,14 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
Tcl_WrongNumArgs(interp, 2, objv, "key");
return (TCL_ERROR);
}
- key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
_debug_check();
if (flag == DBTCL_DBM)
data = fetch(key);
@@ -402,16 +411,17 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
else {
Tcl_SetResult(interp,
"Bad interface flag for command", TCL_STATIC);
- return (TCL_ERROR);
+ result = TCL_ERROR;
+ goto out;
}
if (data.dptr == NULL ||
- (ret = __os_malloc(NULL, data.dsize + 1, NULL, &t)) != 0)
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
Tcl_SetResult(interp, "-1", TCL_STATIC);
else {
memcpy(t, data.dptr, data.dsize);
t[data.dsize] = '\0';
Tcl_SetResult(interp, t, TCL_VOLATILE);
- __os_free(t, data.dsize + 1);
+ __os_free(NULL, t);
}
break;
case DBMSTORE:
@@ -426,9 +436,22 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
Tcl_WrongNumArgs(interp, 2, objv, "key data action");
return (TCL_ERROR);
}
- key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
- data.dptr =
- (char *)Tcl_GetByteArrayFromObj(objv[3], &data.dsize);
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
+ if ((ret = _CopyObjBytes(
+ interp, objv[3], &dtmp, &size, &freedata)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ data.dsize = size;
+ data.dptr = (char *)dtmp;
_debug_check();
if (flag == DBTCL_DBM)
ret = store(key, data);
@@ -450,7 +473,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
"Bad interface flag for command", TCL_STATIC);
return (TCL_ERROR);
}
- _ReturnSetup(interp, ret, "store");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "store");
break;
case DBMDELETE:
/*
@@ -460,7 +483,14 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
Tcl_WrongNumArgs(interp, 2, objv, "key");
return (TCL_ERROR);
}
- key.dptr = (char *)Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
_debug_check();
if (flag == DBTCL_DBM)
ret = delete(key);
@@ -471,7 +501,7 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
"Bad interface flag for command", TCL_STATIC);
return (TCL_ERROR);
}
- _ReturnSetup(interp, ret, "delete");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "delete");
break;
case DBMFIRST:
/*
@@ -492,13 +522,13 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
return (TCL_ERROR);
}
if (key.dptr == NULL ||
- (ret = __os_malloc(NULL, key.dsize + 1, NULL, &t)) != 0)
+ (ret = __os_malloc(NULL, key.dsize + 1, &t)) != 0)
Tcl_SetResult(interp, "-1", TCL_STATIC);
else {
memcpy(t, key.dptr, key.dsize);
t[key.dsize] = '\0';
Tcl_SetResult(interp, t, TCL_VOLATILE);
- __os_free(t, key.dsize + 1);
+ __os_free(NULL, t);
}
break;
case DBMNEXT:
@@ -511,8 +541,14 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
Tcl_WrongNumArgs(interp, 2, objv, NULL);
return (TCL_ERROR);
}
- key.dptr = (char *)
- Tcl_GetByteArrayFromObj(objv[2], &key.dsize);
+ if ((ret = _CopyObjBytes(
+ interp, objv[2], &ktmp, &size, &freekey)) != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "dbm fetch");
+ goto out;
+ }
+ key.dsize = size;
+ key.dptr = (char *)ktmp;
data = nextkey(key);
} else if (flag == DBTCL_NDBM) {
if (objc != 2) {
@@ -526,16 +562,21 @@ bdb_DbmCommand(interp, objc, objv, flag, dbm)
return (TCL_ERROR);
}
if (data.dptr == NULL ||
- (ret = __os_malloc(NULL, data.dsize + 1, NULL, &t)) != 0)
+ (ret = __os_malloc(NULL, data.dsize + 1, &t)) != 0)
Tcl_SetResult(interp, "-1", TCL_STATIC);
else {
memcpy(t, data.dptr, data.dsize);
t[data.dsize] = '\0';
Tcl_SetResult(interp, t, TCL_VOLATILE);
- __os_free(t, data.dsize + 1);
+ __os_free(NULL, t);
}
break;
}
+out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
return (result);
}
@@ -636,7 +677,8 @@ ndbm_Cmd(clientData, interp, objc, objv)
_debug_check();
ret = dbm_clearerr(dbp);
if (ret)
- _ReturnSetup(interp, ret, "clearerr");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "clearerr");
else
res = Tcl_NewIntObj(ret);
break;
@@ -688,7 +730,7 @@ ndbm_Cmd(clientData, interp, objc, objv)
_debug_check();
ret = dbm_rdonly(dbp);
if (ret)
- _ReturnSetup(interp, ret, "rdonly");
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "rdonly");
else
res = Tcl_NewIntObj(ret);
break;
@@ -701,355 +743,4 @@ ndbm_Cmd(clientData, interp, objc, objv)
Tcl_SetObjResult(interp, res);
return (result);
}
-
-/*
- * bdb_RandCommand --
- * Implements rand* functions.
- *
- * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
- */
-int
-bdb_RandCommand(interp, objc, objv)
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
-{
- static char *rcmds[] = {
- "rand", "random_int", "srand",
- NULL
- };
- enum rcmds {
- RRAND, RRAND_INT, RSRAND
- };
- long t;
- int cmdindex, hi, lo, result, ret;
- Tcl_Obj *res;
- char msg[MSG_SIZE];
-
- result = TCL_OK;
- /*
- * Get the command name index from the object based on the cmds
- * defined above. This SHOULD NOT fail because we already checked
- * in the 'berkdb' command.
- */
- if (Tcl_GetIndexFromObj(interp,
- objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
- return (IS_HELP(objv[1]));
-
- res = NULL;
- switch ((enum rcmds)cmdindex) {
- case RRAND:
- /*
- * Must be 0 args. Error if different.
- */
- if (objc != 2) {
- Tcl_WrongNumArgs(interp, 2, objv, NULL);
- return (TCL_ERROR);
- }
- ret = rand();
- res = Tcl_NewIntObj(ret);
- break;
- case RRAND_INT:
- /*
- * Must be 4 args. Error if different.
- */
- if (objc != 4) {
- Tcl_WrongNumArgs(interp, 2, objv, "lo hi");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &lo);
- if (result != TCL_OK)
- break;
- result = Tcl_GetIntFromObj(interp, objv[3], &hi);
- if (result == TCL_OK) {
-#ifndef RAND_MAX
-#define RAND_MAX 0x7fffffff
-#endif
- t = rand();
- if (t > RAND_MAX) {
- snprintf(msg, MSG_SIZE,
- "Max random is higher than %ld\n",
- (long)RAND_MAX);
- Tcl_SetResult(interp, msg, TCL_VOLATILE);
- result = TCL_ERROR;
- break;
- }
- _debug_check();
- ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) *
- (hi - lo + 1));
- ret += lo;
- res = Tcl_NewIntObj(ret);
- }
- break;
- case RSRAND:
- /*
- * Must be 1 arg. Error if different.
- */
- if (objc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "seed");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &lo);
- if (result == TCL_OK) {
- srand((u_int)lo);
- res = Tcl_NewIntObj(0);
- }
- break;
- }
- /*
- * Only set result if we have a res. Otherwise, lower
- * functions have already done so.
- */
- if (result == TCL_OK && res)
- Tcl_SetObjResult(interp, res);
- return (result);
-}
-
-/*
- *
- * tcl_Mutex --
- * Opens an env mutex.
- *
- * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
- * PUBLIC: DBTCL_INFO *));
- */
-int
-tcl_Mutex(interp, objc, objv, envp, envip)
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
- DB_ENV *envp; /* Environment pointer */
- DBTCL_INFO *envip; /* Info pointer */
-{
- DBTCL_INFO *ip;
- Tcl_Obj *res;
- _MUTEX_DATA *md;
- int i, mode, nitems, result, ret;
- char newname[MSG_SIZE];
-
- md = NULL;
- result = TCL_OK;
- mode = nitems = ret = 0;
- memset(newname, 0, MSG_SIZE);
-
- if (objc != 4) {
- Tcl_WrongNumArgs(interp, 2, objv, "mode nitems");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &mode);
- if (result != TCL_OK)
- return (TCL_ERROR);
- result = Tcl_GetIntFromObj(interp, objv[3], &nitems);
- if (result != TCL_OK)
- return (TCL_ERROR);
-
- snprintf(newname, sizeof(newname),
- "%s.mutex%d", envip->i_name, envip->i_envmutexid);
- ip = _NewInfo(interp, NULL, newname, I_MUTEX);
- if (ip == NULL) {
- Tcl_SetResult(interp, "Could not set up info",
- TCL_STATIC);
- return (TCL_ERROR);
- }
- /*
- * Set up mutex.
- */
- /*
- * Map in the region.
- *
- * XXX
- * We don't bother doing this "right", i.e., using the shalloc
- * functions, just grab some memory knowing that it's correctly
- * aligned.
- */
- _debug_check();
- if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0)
- goto posixout;
- md->env = envp;
- md->n_mutex = nitems;
- md->size = sizeof(_MUTEX_ENTRY) * nitems;
-
- md->reginfo.type = REGION_TYPE_MUTEX;
- md->reginfo.id = INVALID_REGION_TYPE;
- md->reginfo.mode = mode;
- md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK;
- if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0)
- goto posixout;
- md->marray = md->reginfo.addr;
-
- /* Initialize a created region. */
- if (F_ISSET(&md->reginfo, REGION_CREATE))
- for (i = 0; i < nitems; i++) {
- md->marray[i].val = 0;
- if ((ret =
- __db_mutex_init(envp, &md->marray[i].m, i, 0)) != 0)
- goto posixout;
- }
- R_UNLOCK(envp, &md->reginfo);
-
- /*
- * Success. Set up return. Set up new info
- * and command widget for this mutex.
- */
- envip->i_envmutexid++;
- ip->i_parent = envip;
- _SetInfoData(ip, md);
- Tcl_CreateObjCommand(interp, newname,
- (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL);
- res = Tcl_NewStringObj(newname, strlen(newname));
- Tcl_SetObjResult(interp, res);
-
- return (TCL_OK);
-
-posixout:
- if (ret > 0)
- Tcl_PosixError(interp);
- result = _ReturnSetup(interp, ret, "mutex");
- _DeleteInfo(ip);
-
- if (md != NULL) {
- if (md->reginfo.addr != NULL)
- (void)__db_r_detach(md->env,
- &md->reginfo, F_ISSET(&md->reginfo, REGION_CREATE));
- __os_free(md, sizeof(*md));
- }
- return (result);
-}
-
-/*
- * mutex_Cmd --
- * Implements the "mutex" widget.
- */
-static int
-mutex_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Mutex handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
-{
- static char *mxcmds[] = {
- "close",
- "get",
- "getval",
- "release",
- "setval",
- NULL
- };
- enum mxcmds {
- MXCLOSE,
- MXGET,
- MXGETVAL,
- MXRELE,
- MXSETVAL
- };
- DB_ENV *dbenv;
- DBTCL_INFO *envip, *mpip;
- _MUTEX_DATA *mp;
- Tcl_Obj *res;
- int cmdindex, id, result, newval;
-
- Tcl_ResetResult(interp);
- mp = (_MUTEX_DATA *)clientData;
- mpip = _PtrToInfo((void *)mp);
- envip = mpip->i_parent;
- dbenv = envip->i_envp;
- result = TCL_OK;
-
- if (mp == NULL) {
- Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
- return (TCL_ERROR);
- }
- if (mpip == NULL) {
- Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
- return (TCL_ERROR);
- }
-
- /*
- * Get the command name index from the object based on the dbcmds
- * defined above.
- */
- if (Tcl_GetIndexFromObj(interp,
- objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
- return (IS_HELP(objv[1]));
-
- res = NULL;
- switch ((enum mxcmds)cmdindex) {
- case MXCLOSE:
- if (objc != 2) {
- Tcl_WrongNumArgs(interp, 1, objv, NULL);
- return (TCL_ERROR);
- }
- _debug_check();
- (void)__db_r_detach(mp->env, &mp->reginfo, 0);
- res = Tcl_NewIntObj(0);
- (void)Tcl_DeleteCommand(interp, mpip->i_name);
- _DeleteInfo(mpip);
- __os_free(mp, sizeof(*mp));
- break;
- case MXRELE:
- /*
- * Check for 1 arg. Error if different.
- */
- if (objc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "id");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &id);
- if (result != TCL_OK)
- break;
- MUTEX_UNLOCK(dbenv, &mp->marray[id].m);
- res = Tcl_NewIntObj(0);
- break;
- case MXGET:
- /*
- * Check for 1 arg. Error if different.
- */
- if (objc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "id");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &id);
- if (result != TCL_OK)
- break;
- MUTEX_LOCK(dbenv, &mp->marray[id].m, mp->env->lockfhp);
- res = Tcl_NewIntObj(0);
- break;
- case MXGETVAL:
- /*
- * Check for 1 arg. Error if different.
- */
- if (objc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "id");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &id);
- if (result != TCL_OK)
- break;
- res = Tcl_NewIntObj(mp->marray[id].val);
- break;
- case MXSETVAL:
- /*
- * Check for 2 args. Error if different.
- */
- if (objc != 4) {
- Tcl_WrongNumArgs(interp, 2, objv, "id val");
- return (TCL_ERROR);
- }
- result = Tcl_GetIntFromObj(interp, objv[2], &id);
- if (result != TCL_OK)
- break;
- result = Tcl_GetIntFromObj(interp, objv[3], &newval);
- if (result != TCL_OK)
- break;
- mp->marray[id].val = newval;
- res = Tcl_NewIntObj(0);
- break;
- }
- /*
- * Only set result if we have a res. Otherwise, lower
- * functions have already done so.
- */
- if (result == TCL_OK && res)
- Tcl_SetObjResult(interp, res);
- return (result);
-}
+#endif /* CONFIG_TEST */
diff --git a/bdb/tcl/tcl_db.c b/bdb/tcl/tcl_db.c
index 8e7215a272a..7df2e48311c 100644
--- a/bdb/tcl/tcl_db.c
+++ b/bdb/tcl/tcl_db.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_db.c,v 11.55 2000/11/28 20:12:31 bostic Exp $";
+static const char revid[] = "$Id: tcl_db.c,v 11.107 2002/08/06 06:20:31 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,24 +20,61 @@ static const char revid[] = "$Id: tcl_db.c,v 11.55 2000/11/28 20:12:31 bostic Ex
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc/tcl_db.h"
/*
* Prototypes for procedures defined later in this file:
*/
+static int tcl_DbAssociate __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST*, DB *));
static int tcl_DbClose __P((Tcl_Interp *,
int, Tcl_Obj * CONST*, DB *, DBTCL_INFO *));
static int tcl_DbDelete __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
-static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *, int));
static int tcl_DbKeyRange __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
static int tcl_DbPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
static int tcl_DbStat __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_DbTruncate __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
static int tcl_DbCursor __P((Tcl_Interp *,
int, Tcl_Obj * CONST*, DB *, DBC **));
static int tcl_DbJoin __P((Tcl_Interp *,
int, Tcl_Obj * CONST*, DB *, DBC **));
static int tcl_DbGetjoin __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
static int tcl_DbCount __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB *));
+static int tcl_second_call __P((DB *, const DBT *, const DBT *, DBT *));
+
+/*
+ * _DbInfoDelete --
+ *
+ * PUBLIC: void _DbInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+ */
+void
+_DbInfoDelete(interp, dbip)
+ Tcl_Interp *interp;
+ DBTCL_INFO *dbip;
+{
+ DBTCL_INFO *nextp, *p;
+ /*
+ * First we have to close any open cursors. Then we close
+ * our db.
+ */
+ for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
+ nextp = LIST_NEXT(p, entries);
+ /*
+ * Check if this is a cursor info structure and if
+ * it is, if it belongs to this DB. If so, remove
+ * its commands and info structure.
+ */
+ if (p->i_parent == dbip && p->i_type == I_DBC) {
+ (void)Tcl_DeleteCommand(interp, p->i_name);
+ _DeleteInfo(p);
+ }
+ }
+ (void)Tcl_DeleteCommand(interp, dbip->i_name);
+ _DeleteInfo(dbip);
+}
/*
*
@@ -54,6 +91,13 @@ db_Cmd(clientData, interp, objc, objv)
Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *dbcmds[] = {
+#if CONFIG_TEST
+ "keyrange",
+ "pget",
+ "rpcid",
+ "test",
+#endif
+ "associate",
"close",
"count",
"cursor",
@@ -63,16 +107,20 @@ db_Cmd(clientData, interp, objc, objv)
"get_type",
"is_byteswapped",
"join",
- "keyrange",
"put",
"stat",
"sync",
-#if CONFIG_TEST
- "test",
-#endif
+ "truncate",
NULL
};
enum dbcmds {
+#if CONFIG_TEST
+ DBKEYRANGE,
+ DBPGET,
+ DBRPCID,
+ DBTEST,
+#endif
+ DBASSOCIATE,
DBCLOSE,
DBCOUNT,
DBCURSOR,
@@ -82,20 +130,18 @@ db_Cmd(clientData, interp, objc, objv)
DBGETTYPE,
DBSWAPPED,
DBJOIN,
- DBKEYRANGE,
DBPUT,
DBSTAT,
- DBSYNC
-#if CONFIG_TEST
- , DBTEST
-#endif
+ DBSYNC,
+ DBTRUNCATE
};
DB *dbp;
DBC *dbc;
DBTCL_INFO *dbip;
DBTCL_INFO *ip;
+ DBTYPE type;
Tcl_Obj *res;
- int cmdindex, result, ret;
+ int cmdindex, isswapped, result, ret;
char newname[MSG_SIZE];
Tcl_ResetResult(interp);
@@ -126,6 +172,34 @@ db_Cmd(clientData, interp, objc, objv)
res = NULL;
switch ((enum dbcmds)cmdindex) {
+#if CONFIG_TEST
+ case DBKEYRANGE:
+ result = tcl_DbKeyRange(interp, objc, objv, dbp);
+ break;
+ case DBPGET:
+ result = tcl_DbGet(interp, objc, objv, dbp, 1);
+ break;
+ case DBRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbp->cl_id);
+ break;
+ case DBTEST:
+ result = tcl_EnvTest(interp, objc, objv, dbp->dbenv);
+ break;
+#endif
+ case DBASSOCIATE:
+ result = tcl_DbAssociate(interp, objc, objv, dbp);
+ break;
case DBCLOSE:
result = tcl_DbClose(interp, objc, objv, dbp, dbip);
break;
@@ -133,10 +207,7 @@ db_Cmd(clientData, interp, objc, objv)
result = tcl_DbDelete(interp, objc, objv, dbp);
break;
case DBGET:
- result = tcl_DbGet(interp, objc, objv, dbp);
- break;
- case DBKEYRANGE:
- result = tcl_DbKeyRange(interp, objc, objv, dbp);
+ result = tcl_DbGet(interp, objc, objv, dbp, 0);
break;
case DBPUT:
result = tcl_DbPut(interp, objc, objv, dbp);
@@ -153,8 +224,8 @@ db_Cmd(clientData, interp, objc, objv)
return (TCL_ERROR);
}
_debug_check();
- ret = dbp->get_byteswapped(dbp);
- res = Tcl_NewIntObj(ret);
+ ret = dbp->get_byteswapped(dbp, &isswapped);
+ res = Tcl_NewIntObj(isswapped);
break;
case DBGETTYPE:
/*
@@ -165,14 +236,14 @@ db_Cmd(clientData, interp, objc, objv)
return (TCL_ERROR);
}
_debug_check();
- ret = dbp->get_type(dbp);
- if (ret == DB_BTREE)
+ ret = dbp->get_type(dbp, &type);
+ if (type == DB_BTREE)
res = Tcl_NewStringObj("btree", strlen("btree"));
- else if (ret == DB_HASH)
+ else if (type == DB_HASH)
res = Tcl_NewStringObj("hash", strlen("hash"));
- else if (ret == DB_RECNO)
+ else if (type == DB_RECNO)
res = Tcl_NewStringObj("recno", strlen("recno"));
- else if (ret == DB_QUEUE)
+ else if (type == DB_QUEUE)
res = Tcl_NewStringObj("queue", strlen("queue"));
else {
Tcl_SetResult(interp,
@@ -248,11 +319,9 @@ db_Cmd(clientData, interp, objc, objv)
case DBGETJOIN:
result = tcl_DbGetjoin(interp, objc, objv, dbp);
break;
-#if CONFIG_TEST
- case DBTEST:
- result = tcl_EnvTest(interp, objc, objv, dbp->dbenv);
+ case DBTRUNCATE:
+ result = tcl_DbTruncate(interp, objc, objv, dbp);
break;
-#endif
}
/*
* Only set result if we have a res. Otherwise, lower
@@ -277,7 +346,7 @@ tcl_DbStat(interp, objc, objv, dbp)
DB_HASH_STAT *hsp;
DB_QUEUE_STAT *qsp;
void *sp;
- Tcl_Obj *res;
+ Tcl_Obj *res, *flaglist, *myobjv[2];
DBTYPE type;
u_int32_t flag;
int result, ret;
@@ -287,16 +356,14 @@ tcl_DbStat(interp, objc, objv, dbp)
flag = 0;
if (objc > 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "?-recordcount?");
+ Tcl_WrongNumArgs(interp, 2, objv, "?-faststat?");
return (TCL_ERROR);
}
if (objc == 3) {
arg = Tcl_GetStringFromObj(objv[2], NULL);
- if (strcmp(arg, "-recordcount") == 0)
- flag = DB_RECORDCOUNT;
- else if (strcmp(arg, "-cachedcounts") == 0)
- flag = DB_CACHED_COUNTS;
+ if (strcmp(arg, "-faststat") == 0)
+ flag = DB_FAST_STAT;
else {
Tcl_SetResult(interp,
"db stat: unknown arg", TCL_STATIC);
@@ -305,17 +372,18 @@ tcl_DbStat(interp, objc, objv, dbp)
}
_debug_check();
- ret = dbp->stat(dbp, &sp, NULL, flag);
- result = _ReturnSetup(interp, ret, "db stat");
+ ret = dbp->stat(dbp, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db stat");
if (result == TCL_ERROR)
return (result);
- type = dbp->get_type(dbp);
+ (void)dbp->get_type(dbp, &type);
/*
* Have our stats, now construct the name value
* list pairs and free up the memory.
*/
res = Tcl_NewObj();
+
/*
* MAKE_STAT_LIST assumes 'res' and 'error' label.
*/
@@ -326,42 +394,48 @@ tcl_DbStat(interp, objc, objv, dbp)
MAKE_STAT_LIST("Page size", hsp->hash_pagesize);
MAKE_STAT_LIST("Number of keys", hsp->hash_nkeys);
MAKE_STAT_LIST("Number of records", hsp->hash_ndata);
- MAKE_STAT_LIST("Estim. number of elements", hsp->hash_nelem);
MAKE_STAT_LIST("Fill factor", hsp->hash_ffactor);
MAKE_STAT_LIST("Buckets", hsp->hash_buckets);
- MAKE_STAT_LIST("Free pages", hsp->hash_free);
- MAKE_STAT_LIST("Bytes free", hsp->hash_bfree);
- MAKE_STAT_LIST("Number of big pages", hsp->hash_bigpages);
- MAKE_STAT_LIST("Big pages bytes free", hsp->hash_big_bfree);
- MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows);
- MAKE_STAT_LIST("Overflow bytes free", hsp->hash_ovfl_free);
- MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup);
- MAKE_STAT_LIST("Duplicate pages bytes free",
- hsp->hash_dup_free);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Free pages", hsp->hash_free);
+ MAKE_STAT_LIST("Bytes free", hsp->hash_bfree);
+ MAKE_STAT_LIST("Number of big pages",
+ hsp->hash_bigpages);
+ MAKE_STAT_LIST("Big pages bytes free",
+ hsp->hash_big_bfree);
+ MAKE_STAT_LIST("Overflow pages", hsp->hash_overflows);
+ MAKE_STAT_LIST("Overflow bytes free",
+ hsp->hash_ovfl_free);
+ MAKE_STAT_LIST("Duplicate pages", hsp->hash_dup);
+ MAKE_STAT_LIST("Duplicate pages bytes free",
+ hsp->hash_dup_free);
+ }
} else if (type == DB_QUEUE) {
qsp = (DB_QUEUE_STAT *)sp;
MAKE_STAT_LIST("Magic", qsp->qs_magic);
MAKE_STAT_LIST("Version", qsp->qs_version);
MAKE_STAT_LIST("Page size", qsp->qs_pagesize);
- MAKE_STAT_LIST("Number of records", qsp->qs_ndata);
- MAKE_STAT_LIST("Number of pages", qsp->qs_pages);
- MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree);
+ MAKE_STAT_LIST("Extent size", qsp->qs_extentsize);
+ MAKE_STAT_LIST("Number of records", qsp->qs_nkeys);
MAKE_STAT_LIST("Record length", qsp->qs_re_len);
MAKE_STAT_LIST("Record pad", qsp->qs_re_pad);
MAKE_STAT_LIST("First record number", qsp->qs_first_recno);
MAKE_STAT_LIST("Last record number", qsp->qs_cur_recno);
+ if (flag != DB_FAST_STAT) {
+ MAKE_STAT_LIST("Number of pages", qsp->qs_pages);
+ MAKE_STAT_LIST("Bytes free", qsp->qs_pgfree);
+ }
} else { /* BTREE and RECNO are same stats */
bsp = (DB_BTREE_STAT *)sp;
+ MAKE_STAT_LIST("Magic", bsp->bt_magic);
+ MAKE_STAT_LIST("Version", bsp->bt_version);
MAKE_STAT_LIST("Number of keys", bsp->bt_nkeys);
MAKE_STAT_LIST("Number of records", bsp->bt_ndata);
- if (flag != DB_RECORDCOUNT) {
- MAKE_STAT_LIST("Magic", bsp->bt_magic);
- MAKE_STAT_LIST("Version", bsp->bt_version);
- MAKE_STAT_LIST("Flags", bsp->bt_metaflags);
- MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey);
- MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len);
- MAKE_STAT_LIST("Record pad", bsp->bt_re_pad);
- MAKE_STAT_LIST("Page size", bsp->bt_pagesize);
+ MAKE_STAT_LIST("Minimum keys per page", bsp->bt_minkey);
+ MAKE_STAT_LIST("Fixed record length", bsp->bt_re_len);
+ MAKE_STAT_LIST("Record pad", bsp->bt_re_pad);
+ MAKE_STAT_LIST("Page size", bsp->bt_pagesize);
+ if (flag != DB_FAST_STAT) {
MAKE_STAT_LIST("Levels", bsp->bt_levels);
MAKE_STAT_LIST("Internal pages", bsp->bt_int_pg);
MAKE_STAT_LIST("Leaf pages", bsp->bt_leaf_pg);
@@ -378,9 +452,27 @@ tcl_DbStat(interp, objc, objv, dbp)
bsp->bt_over_pgfree);
}
}
+
+ /*
+ * Construct a {name {flag1 flag2 ... flagN}} list for the
+ * dbp flags. These aren't access-method dependent, but they
+ * include all the interesting flags, and the integer value
+ * isn't useful from Tcl--return the strings instead.
+ */
+ myobjv[0] = Tcl_NewStringObj("Flags", strlen("Flags"));
+ myobjv[1] = _GetFlagsList(interp, dbp->flags, __db_inmemdbflags);
+ flaglist = Tcl_NewListObj(2, myobjv);
+ if (flaglist == NULL) {
+ result = TCL_ERROR;
+ goto error;
+ }
+ if ((result =
+ Tcl_ListObjAppendElement(interp, res, flaglist)) != TCL_OK)
+ goto error;
+
Tcl_SetObjResult(interp, res);
error:
- __os_free(sp, 0);
+ free(sp);
return (result);
}
@@ -395,50 +487,62 @@ tcl_DbClose(interp, objc, objv, dbp, dbip)
DB *dbp; /* Database pointer */
DBTCL_INFO *dbip; /* Info pointer */
{
- DBTCL_INFO *p, *nextp;
+ static char *dbclose[] = {
+ "-nosync", "--", NULL
+ };
+ enum dbclose {
+ TCL_DBCLOSE_NOSYNC,
+ TCL_DBCLOSE_ENDARG
+ };
u_int32_t flag;
- int result, ret;
+ int endarg, i, optindex, result, ret;
char *arg;
result = TCL_OK;
+ endarg = 0;
flag = 0;
- if (objc > 3) {
+ if (objc > 4) {
Tcl_WrongNumArgs(interp, 2, objv, "?-nosync?");
return (TCL_ERROR);
}
- if (objc == 3) {
- arg = Tcl_GetStringFromObj(objv[2], NULL);
- if (strcmp(arg, "-nosync") == 0)
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbclose,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbclose)optindex) {
+ case TCL_DBCLOSE_NOSYNC:
flag = DB_NOSYNC;
- else {
- Tcl_SetResult(interp,
- "dbclose: unknown arg", TCL_STATIC);
- return (TCL_ERROR);
+ break;
+ case TCL_DBCLOSE_ENDARG:
+ endarg = 1;
+ break;
}
- }
-
- /*
- * First we have to close any open cursors. Then we close
- * our db.
- */
- for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
- nextp = LIST_NEXT(p, entries);
/*
- * Check if this is a cursor info structure and if
- * it is, if it belongs to this DB. If so, remove
- * its commands and info structure.
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
*/
- if (p->i_parent == dbip && p->i_type == I_DBC) {
- (void)Tcl_DeleteCommand(interp, p->i_name);
- _DeleteInfo(p);
- }
+ if (result != TCL_OK)
+ return (result);
+ if (endarg)
+ break;
}
- (void)Tcl_DeleteCommand(interp, dbip->i_name);
- _DeleteInfo(dbip);
+ _DbInfoDelete(interp, dbip);
_debug_check();
+
+ /* Paranoia. */
+ dbp->api_internal = NULL;
+
ret = (dbp)->close(dbp, flag);
- result = _ReturnSetup(interp, ret, "db close");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db close");
return (result);
}
@@ -453,16 +557,22 @@ tcl_DbPut(interp, objc, objv, dbp)
DB *dbp; /* Database pointer */
{
static char *dbputopts[] = {
- "-append",
+#if CONFIG_TEST
"-nodupdata",
+#endif
+ "-append",
+ "-auto_commit",
"-nooverwrite",
"-partial",
"-txn",
NULL
};
enum dbputopts {
- DBPUT_APPEND,
+#if CONFIG_TEST
DBGET_NODUPDATA,
+#endif
+ DBPUT_APPEND,
+ DBPUT_AUTO_COMMIT,
DBPUT_NOOVER,
DBPUT_PART,
DBPUT_TXN
@@ -475,9 +585,11 @@ tcl_DbPut(interp, objc, objv, dbp)
DBTYPE type;
DB_TXN *txn;
Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
db_recno_t recno;
u_int32_t flag;
- int elemc, end, i, itmp, optindex, result, ret;
+ int auto_commit, elemc, end, freekey, freedata;
+ int i, optindex, result, ret;
char *arg, msg[MSG_SIZE];
txn = NULL;
@@ -488,6 +600,7 @@ tcl_DbPut(interp, objc, objv, dbp)
return (TCL_ERROR);
}
+ freekey = freedata = 0;
memset(&key, 0, sizeof(key));
memset(&data, 0, sizeof(data));
@@ -496,7 +609,7 @@ tcl_DbPut(interp, objc, objv, dbp)
* and must be setup up to contain a db_recno_t. Otherwise the
* key is a "string".
*/
- type = dbp->get_type(dbp);
+ (void)dbp->get_type(dbp, &type);
/*
* We need to determine where the end of required args are. If we
@@ -527,12 +640,19 @@ tcl_DbPut(interp, objc, objv, dbp)
* defined above.
*/
i = 2;
+ auto_commit = 0;
while (i < end) {
if (Tcl_GetIndexFromObj(interp, objv[i],
dbputopts, "option", TCL_EXACT, &optindex) != TCL_OK)
return (IS_HELP(objv[i]));
i++;
switch ((enum dbputopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
case DBPUT_TXN:
if (i > (end - 1)) {
Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
@@ -548,14 +668,13 @@ tcl_DbPut(interp, objc, objv, dbp)
result = TCL_ERROR;
}
break;
+ case DBPUT_AUTO_COMMIT:
+ auto_commit = 1;
+ break;
case DBPUT_APPEND:
FLAG_CHECK(flag);
flag = DB_APPEND;
break;
- case DBGET_NODUPDATA:
- FLAG_CHECK(flag);
- flag = DB_NODUPDATA;
- break;
case DBPUT_NOOVER:
FLAG_CHECK(flag);
flag = DB_NOOVERWRITE;
@@ -579,12 +698,10 @@ tcl_DbPut(interp, objc, objv, dbp)
break;
}
data.flags = DB_DBT_PARTIAL;
- result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
- data.doff = itmp;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
- data.dlen = itmp;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
/*
* NOTE: We don't check result here because all we'd
* do is break anyway, and we are doing that. If you
@@ -597,6 +714,8 @@ tcl_DbPut(interp, objc, objv, dbp)
if (result != TCL_OK)
break;
}
+ if (auto_commit)
+ flag |= DB_AUTO_COMMIT;
if (result == TCL_ERROR)
return (result);
@@ -612,40 +731,41 @@ tcl_DbPut(interp, objc, objv, dbp)
if (flag == DB_APPEND)
recno = 0;
else {
- result = Tcl_GetIntFromObj(interp, objv[objc-2], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[objc-2], &recno);
if (result != TCL_OK)
return (result);
}
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[objc-2], &itmp);
- key.size = itmp;
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ return (result);
+ }
+ key.data = ktmp;
}
- /*
- * XXX
- * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
- *
- * This line (and the line for key.data above) were moved from
- * the beginning of the function to here.
- *
- * There is a bug in Tcl 8.1 and byte arrays in that if it happens
- * to use an object as both a byte array and something else like
- * an int, and you've done a Tcl_GetByteArrayFromObj, then you
- * do a Tcl_GetIntFromObj, your memory is deleted.
- *
- * Workaround is to make sure all Tcl_GetByteArrayFromObj calls
- * are done last.
- */
- data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- data.size = itmp;
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBPUT(ret), "db put");
+ goto out;
+ }
+ data.data = dtmp;
_debug_check();
ret = dbp->put(dbp, txn, &key, &data, flag);
- result = _ReturnSetup(interp, ret, "db put");
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBPUT(ret), "db put");
if (ret == 0 &&
(type == DB_RECNO || type == DB_QUEUE) && flag == DB_APPEND) {
- res = Tcl_NewIntObj(recno);
+ res = Tcl_NewLongObj((long)recno);
Tcl_SetObjResult(interp, res);
}
+out:
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
return (result);
}
@@ -653,13 +773,18 @@ tcl_DbPut(interp, objc, objv, dbp)
* tcl_db_get --
*/
static int
-tcl_DbGet(interp, objc, objv, dbp)
+tcl_DbGet(interp, objc, objv, dbp, ispget)
Tcl_Interp *interp; /* Interpreter */
int objc; /* How many arguments? */
Tcl_Obj *CONST objv[]; /* The argument objects */
DB *dbp; /* Database pointer */
+ int ispget; /* 1 for pget, 0 for get */
{
static char *dbgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-multi",
+#endif
"-consume",
"-consume_wait",
"-get_both",
@@ -668,9 +793,14 @@ tcl_DbGet(interp, objc, objv, dbp)
"-recno",
"-rmw",
"-txn",
+ "--",
NULL
};
enum dbgetopts {
+#if CONFIG_TEST
+ DBGET_DIRTY,
+ DBGET_MULTI,
+#endif
DBGET_CONSUME,
DBGET_CONSUME_WAIT,
DBGET_BOTH,
@@ -678,21 +808,25 @@ tcl_DbGet(interp, objc, objv, dbp)
DBGET_PART,
DBGET_RECNO,
DBGET_RMW,
- DBGET_TXN
+ DBGET_TXN,
+ DBGET_ENDARG
};
DBC *dbc;
- DBT key, data, save;
+ DBT key, pkey, data, save;
DBTYPE type;
DB_TXN *txn;
Tcl_Obj **elemv, *retlist;
- db_recno_t recno;
- u_int32_t flag, cflag, isdup, rmw;
- int elemc, end, i, itmp, optindex, result, ret, useglob, userecno;
+ void *dtmp, *ktmp;
+ u_int32_t flag, cflag, isdup, mflag, rmw;
+ int bufsize, elemc, end, endarg, freekey, freedata, i;
+ int optindex, result, ret, useglob, useprecno, userecno;
char *arg, *pattern, *prefix, msg[MSG_SIZE];
+ db_recno_t precno, recno;
result = TCL_OK;
- cflag = flag = rmw = 0;
- useglob = userecno = 0;
+ freekey = freedata = 0;
+ cflag = endarg = flag = mflag = rmw = 0;
+ useglob = userecno = useprecno = 0;
txn = NULL;
pattern = prefix = NULL;
@@ -705,23 +839,41 @@ tcl_DbGet(interp, objc, objv, dbp)
memset(&data, 0, sizeof(data));
memset(&save, 0, sizeof(save));
+ /* For the primary key in a pget call. */
+ memset(&pkey, 0, sizeof(pkey));
+
/*
* Get the command name index from the object based on the options
* defined above.
*/
i = 2;
- type = dbp->get_type(dbp);
+ (void)dbp->get_type(dbp, &type);
end = objc;
while (i < end) {
if (Tcl_GetIndexFromObj(interp, objv[i], dbgetopts, "option",
TCL_EXACT, &optindex) != TCL_OK) {
- if (IS_HELP(objv[i]) == TCL_OK)
- return (TCL_OK);
- Tcl_ResetResult(interp);
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto out;
+ } else
+ Tcl_ResetResult(interp);
break;
}
i++;
switch ((enum dbgetopts)optindex) {
+#if CONFIG_TEST
+ case DBGET_DIRTY:
+ rmw |= DB_DIRTY_READ;
+ break;
+ case DBGET_MULTI:
+ mflag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
case DBGET_BOTH:
/*
* Change 'end' and make sure we aren't already past
@@ -738,7 +890,7 @@ tcl_DbGet(interp, objc, objv, dbp)
flag = DB_GET_BOTH;
break;
case DBGET_TXN:
- if (i == end - 1) {
+ if (i >= end) {
Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
result = TCL_ERROR;
break;
@@ -773,7 +925,7 @@ tcl_DbGet(interp, objc, objv, dbp)
}
break;
case DBGET_RMW:
- rmw = DB_RMW;
+ rmw |= DB_RMW;
break;
case DBGET_PART:
end = objc - 1;
@@ -795,12 +947,10 @@ tcl_DbGet(interp, objc, objv, dbp)
break;
}
save.flags = DB_DBT_PARTIAL;
- result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
- save.doff = itmp;
+ result = _GetUInt32(interp, elemv[0], &save.doff);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
- save.dlen = itmp;
+ result = _GetUInt32(interp, elemv[1], &save.dlen);
/*
* NOTE: We don't check result here because all we'd
* do is break anyway, and we are doing that. If you
@@ -809,15 +959,54 @@ tcl_DbGet(interp, objc, objv, dbp)
* lines above and copy that.)
*/
break;
- }
+ case DBGET_ENDARG:
+ endarg = 1;
+ break;
+ } /* switch */
if (result != TCL_OK)
break;
+ if (endarg)
+ break;
}
if (result != TCL_OK)
goto out;
if (type == DB_RECNO || type == DB_QUEUE)
userecno = 1;
+
+ /*
+ * Check args we have left versus the flags we were given.
+ * We might have 0, 1 or 2 left. If we have 0, it must
+ * be DB_CONSUME*, if 2, then DB_GET_BOTH, all others should
+ * be 1.
+ */
+ if (((flag == DB_CONSUME || flag == DB_CONSUME_WAIT) && i != objc) ||
+ (flag == DB_GET_BOTH && i != objc - 2)) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given based on flags specified\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ } else if (flag == 0 && i != objc - 1) {
+ Tcl_SetResult(interp,
+ "Wrong number of key/data given\n", TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
+
+ /*
+ * XXX
+ * We technically shouldn't be looking inside the dbp like this,
+ * but this is the only way to figure out whether the primary
+ * key should also be a recno.
+ */
+ if (ispget) {
+ if (dbp->s_primary != NULL &&
+ (dbp->s_primary->type == DB_RECNO ||
+ dbp->s_primary->type == DB_QUEUE))
+ useprecno = 1;
+ }
+
/*
* Check for illegal combos of options.
*/
@@ -862,93 +1051,189 @@ tcl_DbGet(interp, objc, objv, dbp)
* ops that don't require returning multiple items, use DB->get
* instead of a cursor operation.
*/
- if (pattern == NULL && (isdup == 0 ||
+ if (pattern == NULL && (isdup == 0 || mflag != 0 ||
flag == DB_SET_RECNO || flag == DB_GET_BOTH ||
flag == DB_CONSUME || flag == DB_CONSUME_WAIT)) {
if (flag == DB_GET_BOTH) {
if (userecno) {
- result = Tcl_GetIntFromObj(interp,
- objv[(objc - 2)], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp,
+ objv[(objc - 2)], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
- return (result);
+ goto out;
} else {
- key.data =
- Tcl_GetByteArrayFromObj(objv[objc-2],
- &itmp);
- key.size = itmp;
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
}
/*
* Already checked args above. Fill in key and save.
* Save is used in the dbp->get call below to fill in
* data.
+ *
+ * If the "data" here is really a primary key--that
+ * is, if we're in a pget--and that primary key
+ * is a recno, treat it appropriately as an int.
*/
- save.data =
- Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- save.size = itmp;
+ if (useprecno) {
+ result = _GetUInt32(interp,
+ objv[objc - 1], &precno);
+ if (result == TCL_OK) {
+ save.data = &precno;
+ save.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &save.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ save.data = dtmp;
+ }
} else if (flag != DB_CONSUME && flag != DB_CONSUME_WAIT) {
if (userecno) {
- result = Tcl_GetIntFromObj(
- interp, objv[(objc - 1)], &itmp);
- recno = itmp;
+ result = _GetUInt32(
+ interp, objv[(objc - 1)], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
- return (result);
+ goto out;
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- key.size = itmp;
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ goto out;
+ }
+ key.data = ktmp;
+ }
+ if (mflag & DB_MULTIPLE) {
+ if ((ret = __os_malloc(dbp->dbenv,
+ bufsize, &save.data)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ goto out;
+ }
+ save.ulen = bufsize;
+ F_CLR(&save, DB_DBT_MALLOC);
+ F_SET(&save, DB_DBT_USERMEM);
}
}
- memset(&data, 0, sizeof(data));
data = save;
- _debug_check();
-
- ret = dbp->get(dbp, txn, &key, &data, flag | rmw);
- result = _ReturnSetup(interp, ret, "db get");
+ if (ispget) {
+ if (flag == DB_GET_BOTH) {
+ pkey.data = save.data;
+ pkey.size = save.size;
+ data.data = NULL;
+ data.size = 0;
+ }
+ F_SET(&pkey, DB_DBT_MALLOC);
+ _debug_check();
+ ret = dbp->pget(dbp,
+ txn, &key, &pkey, &data, flag | rmw);
+ } else {
+ _debug_check();
+ ret = dbp->get(dbp,
+ txn, &key, &data, flag | rmw | mflag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBGET(ret),
+ "db get");
if (ret == 0) {
/*
* Success. Return a list of the form {name value}
* If it was a recno in key.data, we need to convert
* into a string/object representation of that recno.
*/
- if (type == DB_RECNO || type == DB_QUEUE)
- result = _SetListRecnoElem(interp, retlist,
- *(db_recno_t *)key.data, data.data,
- data.size);
- else
- result = _SetListElem(interp, retlist,
- key.data, key.size, data.data, data.size);
- /*
- * Free space from DB_DBT_MALLOC
- */
- __os_free(data.data, data.size);
+ if (mflag & DB_MULTIPLE)
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if (type == DB_RECNO || type == DB_QUEUE)
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 1, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListRecnoElem(interp,
+ retlist, *(db_recno_t *)key.data,
+ data.data, data.size);
+ else {
+ if (ispget)
+ result = _Set3DBTList(interp,
+ retlist, &key, 0, &pkey,
+ useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size,
+ data.data, data.size);
+ }
}
+ /*
+ * Free space from DBT.
+ *
+ * If we set DB_DBT_MALLOC, we need to free the space if
+ * and only if we succeeded (and thus if DB allocated
+ * anything). If DB_DBT_MALLOC is not set, this is a bulk
+ * get buffer, and needs to be freed no matter what.
+ */
+ if (F_ISSET(&data, DB_DBT_MALLOC) && ret == 0)
+ __os_ufree(dbp->dbenv, data.data);
+ else if (!F_ISSET(&data, DB_DBT_MALLOC))
+ __os_free(dbp->dbenv, data.data);
+ if (ispget && ret == 0)
+ __os_ufree(dbp->dbenv, pkey.data);
if (result == TCL_OK)
Tcl_SetObjResult(interp, retlist);
goto out;
}
if (userecno) {
- result = Tcl_GetIntFromObj(interp, objv[(objc - 1)], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[(objc - 1)], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
- return (result);
+ goto out;
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- key.size = itmp;
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBGET(ret), "db get");
+ return (result);
+ }
+ key.data = ktmp;
}
ret = dbp->cursor(dbp, txn, &dbc, 0);
- result = _ReturnSetup(interp, ret, "db cursor");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db cursor");
if (result == TCL_ERROR)
goto out;
@@ -988,11 +1273,26 @@ tcl_DbGet(interp, objc, objv, dbp)
cflag = DB_SET_RANGE;
} else
cflag = DB_SET;
- _debug_check();
- ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
- result = _ReturnSetup(interp, ret, "db get (cursor)");
+ if (ispget) {
+ _debug_check();
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else {
+ _debug_check();
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db get (cursor)");
if (result == TCL_ERROR)
goto out1;
+ if (ret == 0 && pattern &&
+ memcmp(key.data, prefix, strlen(prefix)) != 0) {
+ /*
+ * Free space from DB_DBT_MALLOC
+ */
+ free(data.data);
+ goto out1;
+ }
if (pattern)
cflag = DB_NEXT;
else
@@ -1002,36 +1302,46 @@ tcl_DbGet(interp, objc, objv, dbp)
/*
* Build up our {name value} sublist
*/
- result = _SetListElem(interp, retlist,
- key.data, key.size,
- data.data, data.size);
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &pkey, useprecno, &data);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
/*
* Free space from DB_DBT_MALLOC
*/
- __os_free(data.data, data.size);
+ if (ispget)
+ free(pkey.data);
+ free(data.data);
if (result != TCL_OK)
break;
/*
* Append {name value} to return list
*/
memset(&key, 0, sizeof(key));
+ memset(&pkey, 0, sizeof(pkey));
memset(&data, 0, sizeof(data));
/*
* Restore any "partial" info we have saved.
*/
data = save;
- ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
+ if (ispget) {
+ F_SET(&pkey, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &pkey, &data, cflag | rmw);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, cflag | rmw);
if (ret == 0 && pattern &&
memcmp(key.data, prefix, strlen(prefix)) != 0) {
/*
* Free space from DB_DBT_MALLOC
*/
- __os_free(data.data, data.size);
+ free(data.data);
break;
}
}
- dbc->c_close(dbc);
out1:
+ dbc->c_close(dbc);
if (result == TCL_OK)
Tcl_SetObjResult(interp, retlist);
out:
@@ -1041,7 +1351,11 @@ out:
* have multiple nuls at the end, so we free using __os_free().
*/
if (prefix != NULL)
- __os_free(prefix,0);
+ __os_free(dbp->dbenv, prefix);
+ if (freedata)
+ (void)__os_free(dbp->dbenv, dtmp);
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
return (result);
}
@@ -1056,11 +1370,13 @@ tcl_DbDelete(interp, objc, objv, dbp)
DB *dbp; /* Database pointer */
{
static char *dbdelopts[] = {
+ "-auto_commit",
"-glob",
"-txn",
NULL
};
enum dbdelopts {
+ DBDEL_AUTO_COMMIT,
DBDEL_GLOB,
DBDEL_TXN
};
@@ -1068,12 +1384,14 @@ tcl_DbDelete(interp, objc, objv, dbp)
DBT key, data;
DBTYPE type;
DB_TXN *txn;
+ void *ktmp;
db_recno_t recno;
- int i, itmp, optindex, result, ret;
+ int freekey, i, optindex, result, ret;
u_int32_t flag;
char *arg, *pattern, *prefix, msg[MSG_SIZE];
result = TCL_OK;
+ freekey = 0;
flag = 0;
pattern = prefix = NULL;
txn = NULL;
@@ -1084,17 +1402,17 @@ tcl_DbDelete(interp, objc, objv, dbp)
memset(&key, 0, sizeof(key));
/*
- * The first arg must be -txn, -glob or a list of keys.
+ * The first arg must be -auto_commit, -glob, -txn or a list of keys.
*/
i = 2;
while (i < objc) {
if (Tcl_GetIndexFromObj(interp, objv[i], dbdelopts, "option",
TCL_EXACT, &optindex) != TCL_OK) {
/*
- * If we don't have a -glob or -txn, then the
- * remaining args must be exact keys.
- * Reset the result so we don't get
- * an errant error message if there is another error.
+ * If we don't have a -auto_commit, -glob or -txn,
+ * then the remaining args must be exact keys.
+ * Reset the result so we don't get an errant error
+ * message if there is another error.
*/
if (IS_HELP(objv[i]) == TCL_OK)
return (TCL_OK);
@@ -1121,6 +1439,9 @@ tcl_DbDelete(interp, objc, objv, dbp)
result = TCL_ERROR;
}
break;
+ case DBDEL_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
case DBDEL_GLOB:
/*
* Get the pattern. Get the prefix and use cursors to
@@ -1143,17 +1464,6 @@ tcl_DbDelete(interp, objc, objv, dbp)
if (result != TCL_OK)
goto out;
-
- /*
- * If we have a pattern AND more keys to process, then there
- * is an error. Either we have some number of exact keys,
- * or we have a pattern.
- */
- if (pattern != NULL && i != objc) {
- Tcl_WrongNumArgs(interp, 2, objv, "?args? -glob pattern | key");
- result = TCL_ERROR;
- goto out;
- }
/*
* XXX
* For consistency with get, we have decided for the moment, to
@@ -1163,11 +1473,33 @@ tcl_DbDelete(interp, objc, objv, dbp)
* than one, and at that time we'd make delete be consistent. In
* any case, the code is already here and there is no need to remove,
* just check that we only have one arg left.
+ *
+ * If we have a pattern AND more keys to process, there is an error.
+ * Either we have some number of exact keys, or we have a pattern.
+ *
+ * If we have a pattern and an auto commit flag, there is an error.
*/
- if (pattern == NULL && i != (objc - 1)) {
- Tcl_WrongNumArgs(interp, 2, objv, "?args? -glob pattern | key");
- result = TCL_ERROR;
- goto out;
+ if (pattern == NULL) {
+ if (i != (objc - 1)) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ } else {
+ if (i != objc) {
+ Tcl_WrongNumArgs(
+ interp, 2, objv, "?args? -glob pattern | key");
+ result = TCL_ERROR;
+ goto out;
+ }
+ if (flag & DB_AUTO_COMMIT) {
+ Tcl_SetResult(interp,
+ "Cannot use -auto_commit and patterns.\n",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ goto out;
+ }
}
/*
@@ -1177,32 +1509,39 @@ tcl_DbDelete(interp, objc, objv, dbp)
* If it is a RECNO database, the key is a record number and must be
* setup up to contain a db_recno_t. Otherwise the key is a "string".
*/
- type = dbp->get_type(dbp);
+ (void)dbp->get_type(dbp, &type);
ret = 0;
while (i < objc && ret == 0) {
memset(&key, 0, sizeof(key));
if (type == DB_RECNO || type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(interp, objv[i++], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[i++], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
return (result);
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[i++], &itmp);
- key.size = itmp;
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBDEL(ret), "db del");
+ return (result);
+ }
+ key.data = ktmp;
}
_debug_check();
- ret = dbp->del(dbp, txn, &key, 0);
+ ret = dbp->del(dbp, txn, &key, flag);
/*
* If we have any error, set up return result and stop
* processing keys.
*/
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
if (ret != 0)
break;
}
- result = _ReturnSetup(interp, ret, "db del");
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBDEL(ret), "db del");
/*
* At this point we've either finished or, if we have a pattern,
@@ -1212,7 +1551,8 @@ tcl_DbDelete(interp, objc, objv, dbp)
if (pattern) {
ret = dbp->cursor(dbp, txn, &dbc, 0);
if (ret != 0) {
- result = _ReturnSetup(interp, ret, "db cursor");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
goto out;
}
/*
@@ -1244,7 +1584,8 @@ tcl_DbDelete(interp, objc, objv, dbp)
_debug_check();
ret = dbc->c_del(dbc, 0);
if (ret != 0) {
- result = _ReturnSetup(interp, ret, "db c_del");
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCDEL(ret), "db c_del");
break;
}
/*
@@ -1262,9 +1603,9 @@ tcl_DbDelete(interp, objc, objv, dbp)
* by copying and condensing another string. Thus prefix may
* have multiple nuls at the end, so we free using __os_free().
*/
- __os_free(prefix,0);
+ __os_free(dbp->dbenv, prefix);
dbc->c_close(dbc);
- result = _ReturnSetup(interp, ret, "db del");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db del");
}
out:
return (result);
@@ -1282,11 +1623,19 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp)
DBC **dbcp; /* Return cursor pointer */
{
static char *dbcuropts[] = {
- "-txn", "-update",
+#if CONFIG_TEST
+ "-dirty",
+ "-update",
+#endif
+ "-txn",
NULL
};
enum dbcuropts {
- DBCUR_TXN, DBCUR_UPDATE
+#if CONFIG_TEST
+ DBCUR_DIRTY,
+ DBCUR_UPDATE,
+#endif
+ DBCUR_TXN
};
DB_TXN *txn;
u_int32_t flag;
@@ -1296,11 +1645,6 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp)
result = TCL_OK;
flag = 0;
txn = NULL;
- /*
- * If the user asks for -glob or -recno, it MUST be the second
- * last arg given. If it isn't given, then we must check if
- * they gave us a correct key.
- */
i = 2;
while (i < objc) {
if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
@@ -1310,6 +1654,14 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp)
}
i++;
switch ((enum dbcuropts)optindex) {
+#if CONFIG_TEST
+ case DBCUR_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCUR_UPDATE:
+ flag |= DB_WRITECURSOR;
+ break;
+#endif
case DBCUR_TXN:
if (i == objc) {
Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
@@ -1325,9 +1677,6 @@ tcl_DbCursor(interp, objc, objv, dbp, dbcp)
result = TCL_ERROR;
}
break;
- case DBCUR_UPDATE:
- flag = DB_WRITECURSOR;
- break;
}
if (result != TCL_OK)
break;
@@ -1344,6 +1693,192 @@ out:
}
/*
+ * tcl_DbAssociate --
+ * Call DB->associate().
+ */
+static int
+tcl_DbAssociate(interp, objc, objv, dbp)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB *dbp;
+{
+ static char *dbaopts[] = {
+ "-auto_commit",
+ "-create",
+ "-txn",
+ NULL
+ };
+ enum dbaopts {
+ DBA_AUTO_COMMIT,
+ DBA_CREATE,
+ DBA_TXN
+ };
+ DB *sdbp;
+ DB_TXN *txn;
+ DBTCL_INFO *sdbip;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+ u_int32_t flag;
+
+ txn = NULL;
+ result = TCL_OK;
+ flag = 0;
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "[callback] secondary");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbaopts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ if (result == TCL_OK)
+ return (result);
+ result = TCL_OK;
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum dbaopts)optindex) {
+ case DBA_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBA_CREATE:
+ flag |= DB_CREATE;
+ break;
+ case DBA_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ }
+ if (result != TCL_OK)
+ return (result);
+
+ /*
+ * Better be 1 or 2 args left. The last arg must be the sdb
+ * handle. If 2 args then objc-2 is the callback proc, else
+ * we have a NULL callback.
+ */
+ /* Get the secondary DB handle. */
+ arg = Tcl_GetStringFromObj(objv[objc - 1], NULL);
+ sdbp = NAME_TO_DB(arg);
+ if (sdbp == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Associate: Invalid database handle: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * The callback is simply a Tcl object containing the name
+ * of the callback proc, which is the second-to-last argument.
+ *
+ * Note that the callback needs to go in the *secondary* DB handle's
+ * info struct; we may have multiple secondaries with different
+ * callbacks.
+ */
+ sdbip = (DBTCL_INFO *)sdbp->api_internal;
+ if (i != objc - 1) {
+ /*
+ * We have 2 args, get the callback.
+ */
+ sdbip->i_second_call = objv[objc - 2];
+ Tcl_IncrRefCount(sdbip->i_second_call);
+
+ /* Now call associate. */
+ _debug_check();
+ ret = dbp->associate(dbp, txn, sdbp, tcl_second_call, flag);
+ } else {
+ /*
+ * We have a NULL callback.
+ */
+ sdbip->i_second_call = NULL;
+ ret = dbp->associate(dbp, txn, sdbp, NULL, flag);
+ }
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "associate");
+
+ return (result);
+}
+
+/*
+ * tcl_second_call --
+ * Callback function for secondary indices. Get the callback
+ * out of ip->i_second_call and call it.
+ */
+static int
+tcl_second_call(dbp, pkey, data, skey)
+ DB *dbp;
+ const DBT *pkey, *data;
+ DBT *skey;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *pobj, *dobj, *objv[3];
+ int len, result, ret;
+ void *retbuf, *databuf;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_second_call;
+
+ /*
+ * Create two ByteArray objects, with the contents of the pkey
+ * and data DBTs that are our inputs.
+ */
+ pobj = Tcl_NewByteArrayObj(pkey->data, pkey->size);
+ Tcl_IncrRefCount(pobj);
+ dobj = Tcl_NewByteArrayObj(data->data, data->size);
+ Tcl_IncrRefCount(dobj);
+
+ objv[1] = pobj;
+ objv[2] = dobj;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+
+ Tcl_DecrRefCount(pobj);
+ Tcl_DecrRefCount(dobj);
+
+ if (result != TCL_OK) {
+ __db_err(dbp->dbenv,
+ "Tcl callback function failed with code %d", result);
+ return (EINVAL);
+ }
+
+ retbuf =
+ Tcl_GetByteArrayFromObj(Tcl_GetObjResult(interp), &len);
+
+ /*
+ * retbuf is owned by Tcl; copy it into malloc'ed memory.
+ * We need to use __os_umalloc rather than ufree because this will
+ * be freed by DB using __os_ufree--the DB_DBT_APPMALLOC flag
+ * tells DB to free application-allocated memory.
+ */
+ if ((ret = __os_umalloc(dbp->dbenv, len, &databuf)) != 0)
+ return (ret);
+ memcpy(databuf, retbuf, len);
+
+ skey->data = databuf;
+ skey->size = len;
+ F_SET(skey, DB_DBT_APPMALLOC);
+
+ return (0);
+}
+
+/*
* tcl_db_join --
*/
static int
@@ -1399,7 +1934,7 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp)
* Allocate one more for NULL ptr at end of list.
*/
size = sizeof(DBC *) * ((objc - adj) + 1);
- ret = __os_malloc(dbp->dbenv, size, NULL, &listp);
+ ret = __os_malloc(dbp->dbenv, size, &listp);
if (ret != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
return (TCL_ERROR);
@@ -1420,10 +1955,10 @@ tcl_DbJoin(interp, objc, objv, dbp, dbcp)
listp[j] = NULL;
_debug_check();
ret = dbp->join(dbp, listp, dbcp, flag);
- result = _ReturnSetup(interp, ret, "db join");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
out:
- __os_free(listp, size);
+ __os_free(dbp->dbenv, listp);
return (result);
}
@@ -1438,12 +1973,16 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
DB *dbp; /* Database pointer */
{
static char *dbgetjopts[] = {
+#if CONFIG_TEST
"-nosort",
+#endif
"-txn",
NULL
};
enum dbgetjopts {
+#if CONFIG_TEST
DBGETJ_NOSORT,
+#endif
DBGETJ_TXN
};
DB_TXN *txn;
@@ -1452,12 +1991,14 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
DBC *dbc;
DBT key, data;
Tcl_Obj **elemv, *retlist;
+ void *ktmp;
u_int32_t flag;
- int adj, elemc, i, itmp, j, optindex, result, ret, size;
+ int adj, elemc, freekey, i, j, optindex, result, ret, size;
char *arg, msg[MSG_SIZE];
result = TCL_OK;
flag = 0;
+ freekey = 0;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 2, objv, "{db1 key1} {db2 key2} ...");
return (TCL_ERROR);
@@ -1478,10 +2019,12 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
}
i++;
switch ((enum dbgetjopts)optindex) {
+#if CONFIG_TEST
case DBGETJ_NOSORT:
flag |= DB_JOIN_NOSORT;
adj++;
break;
+#endif
case DBGETJ_TXN:
if (i == objc) {
Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
@@ -1503,7 +2046,7 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
if (result != TCL_OK)
return (result);
size = sizeof(DBC *) * ((objc - adj) + 1);
- ret = __os_malloc(NULL, size, NULL, &listp);
+ ret = __os_malloc(NULL, size, &listp);
if (ret != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
return (TCL_ERROR);
@@ -1535,22 +2078,28 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
goto out;
}
ret = elemdbp->cursor(elemdbp, txn, &listp[j], 0);
- if ((result = _ReturnSetup(interp, ret, "db cursor")) ==
- TCL_ERROR)
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor")) == TCL_ERROR)
goto out;
memset(&key, 0, sizeof(key));
memset(&data, 0, sizeof(data));
- key.data = Tcl_GetByteArrayFromObj(elemv[elemc-1], &itmp);
- key.size = itmp;
+ ret = _CopyObjBytes(interp, elemv[elemc-1], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db join");
+ goto out;
+ }
+ key.data = ktmp;
ret = (listp[j])->c_get(listp[j], &key, &data, DB_SET);
- if ((result = _ReturnSetup(interp, ret, "db cget")) ==
- TCL_ERROR)
+ if ((result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret),
+ "db cget")) == TCL_ERROR)
goto out;
}
listp[j] = NULL;
_debug_check();
ret = dbp->join(dbp, listp, &dbc, flag);
- result = _ReturnSetup(interp, ret, "db join");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db join");
if (result == TCL_ERROR)
goto out;
@@ -1568,20 +2117,22 @@ tcl_DbGetjoin(interp, objc, objv, dbp)
result = _SetListElem(interp, retlist,
key.data, key.size,
data.data, data.size);
- __os_free(key.data, key.size);
- __os_free(data.data, data.size);
+ free(key.data);
+ free(data.data);
}
}
dbc->c_close(dbc);
if (result == TCL_OK)
Tcl_SetObjResult(interp, retlist);
out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
while (j) {
if (listp[j])
(listp[j])->c_close(listp[j]);
j--;
}
- __os_free(listp, size);
+ __os_free(dbp->dbenv, listp);
return (result);
}
@@ -1598,11 +2149,13 @@ tcl_DbCount(interp, objc, objv, dbp)
Tcl_Obj *res;
DBC *dbc;
DBT key, data;
+ void *ktmp;
db_recno_t count, recno;
- int itmp, len, result, ret;
+ int freekey, result, ret;
result = TCL_OK;
count = 0;
+ freekey = 0;
res = NULL;
if (objc != 3) {
Tcl_WrongNumArgs(interp, 2, objv, "key");
@@ -1624,21 +2177,27 @@ tcl_DbCount(interp, objc, objv, dbp)
* treat the key as a recno rather than as a byte string.
*/
if (dbp->type == DB_RECNO || dbp->type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(interp, objv[2], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[2], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
return (result);
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[2], &len);
- key.size = len;
+ ret = _CopyObjBytes(interp, objv[2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db count");
+ return (result);
+ }
+ key.data = ktmp;
}
_debug_check();
ret = dbp->cursor(dbp, NULL, &dbc, 0);
if (ret != 0) {
- result = _ReturnSetup(interp, ret, "db cursor");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db cursor");
goto out;
}
/*
@@ -1650,16 +2209,21 @@ tcl_DbCount(interp, objc, objv, dbp)
else {
ret = dbc->c_count(dbc, &count, 0);
if (ret != 0) {
- result = _ReturnSetup(interp, ret, "db cursor");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db c count");
goto out;
}
}
- res = Tcl_NewIntObj(count);
+ res = Tcl_NewLongObj((long)count);
Tcl_SetObjResult(interp, res);
out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ (void)dbc->c_close(dbc);
return (result);
}
+#if CONFIG_TEST
/*
* tcl_DbKeyRange --
*/
@@ -1682,13 +2246,15 @@ tcl_DbKeyRange(interp, objc, objv, dbp)
DBT key;
DBTYPE type;
Tcl_Obj *myobjv[3], *retlist;
+ void *ktmp;
db_recno_t recno;
u_int32_t flag;
- int i, itmp, myobjc, optindex, result, ret;
+ int freekey, i, myobjc, optindex, result, ret;
char *arg, msg[MSG_SIZE];
result = TCL_OK;
flag = 0;
+ freekey = 0;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 2, objv, "?-txn id? key");
return (TCL_ERROR);
@@ -1727,7 +2293,7 @@ tcl_DbKeyRange(interp, objc, objv, dbp)
}
if (result != TCL_OK)
return (result);
- type = dbp->get_type(dbp);
+ (void)dbp->get_type(dbp, &type);
ret = 0;
/*
* Make sure we have a key.
@@ -1739,20 +2305,25 @@ tcl_DbKeyRange(interp, objc, objv, dbp)
}
memset(&key, 0, sizeof(key));
if (type == DB_RECNO || type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(interp, objv[i], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[i], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
return (result);
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[i++], &itmp);
- key.size = itmp;
+ ret = _CopyObjBytes(interp, objv[i++], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "db keyrange");
+ return (result);
+ }
+ key.data = ktmp;
}
_debug_check();
ret = dbp->key_range(dbp, txn, &key, &range, flag);
- result = _ReturnSetup(interp, ret, "db join");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db keyrange");
if (result == TCL_ERROR)
goto out;
@@ -1767,5 +2338,84 @@ tcl_DbKeyRange(interp, objc, objv, dbp)
if (result == TCL_OK)
Tcl_SetObjResult(interp, retlist);
out:
+ if (freekey)
+ (void)__os_free(dbp->dbenv, ktmp);
+ return (result);
+}
+#endif
+
+/*
+ * tcl_DbTruncate --
+ */
+static int
+tcl_DbTruncate(interp, objc, objv, dbp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB *dbp; /* Database pointer */
+{
+ static char *dbcuropts[] = {
+ "-auto_commit",
+ "-txn",
+ NULL
+ };
+ enum dbcuropts {
+ DBTRUNC_AUTO_COMMIT,
+ DBTRUNC_TXN
+ };
+ DB_TXN *txn;
+ Tcl_Obj *res;
+ u_int32_t count, flag;
+ int i, optindex, result, ret;
+ char *arg, msg[MSG_SIZE];
+
+ txn = NULL;
+ flag = 0;
+ result = TCL_OK;
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], dbcuropts, "option",
+ TCL_EXACT, &optindex) != TCL_OK) {
+ result = IS_HELP(objv[i]);
+ goto out;
+ }
+ i++;
+ switch ((enum dbcuropts)optindex) {
+ case DBTRUNC_AUTO_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case DBTRUNC_TXN:
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Truncate: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
+ }
+ if (result != TCL_OK)
+ break;
+ }
+ if (result != TCL_OK)
+ goto out;
+
+ _debug_check();
+ ret = dbp->truncate(dbp, txn, &count, flag);
+ if (ret != 0)
+ result = _ErrorSetup(interp, ret, "db truncate");
+
+ else {
+ res = Tcl_NewLongObj((long)count);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
return (result);
}
diff --git a/bdb/tcl/tcl_db_pkg.c b/bdb/tcl/tcl_db_pkg.c
index f83b5a7d2a9..ce37598dc1a 100644
--- a/bdb/tcl/tcl_db_pkg.c
+++ b/bdb/tcl/tcl_db_pkg.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_db_pkg.c,v 11.76 2001/01/19 18:02:36 bostic Exp $";
+static const char revid[] = "$Id: tcl_db_pkg.c,v 11.141 2002/08/14 20:15:47 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,10 +19,17 @@ static const char revid[] = "$Id: tcl_db_pkg.c,v 11.76 2001/01/19 18:02:36 bosti
#include <tcl.h>
#endif
+#if CONFIG_TEST
#define DB_DBM_HSEARCH 1
+#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/db_page.h"
+#include "dbinc/hash.h"
+#include "dbinc/tcl_db.h"
+
+/* XXX we must declare global data in just one place */
+DBTCL_GLOBAL __dbtcl_global;
/*
* Prototypes for procedures defined later in this file:
@@ -40,6 +47,20 @@ static int bdb_DbVerify __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
static int bdb_Version __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
static int bdb_Handles __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+static int tcl_bt_compare __P((DB *, const DBT *, const DBT *));
+static int tcl_compare_callback __P((DB *, const DBT *, const DBT *,
+ Tcl_Obj *, char *));
+static int tcl_dup_compare __P((DB *, const DBT *, const DBT *));
+static u_int32_t tcl_h_hash __P((DB *, const void *, u_int32_t));
+static int tcl_rep_send __P((DB_ENV *,
+ const DBT *, const DBT *, int, u_int32_t));
+
+#ifdef TEST_ALLOC
+static void * tcl_db_malloc __P((size_t));
+static void * tcl_db_realloc __P((void *, size_t));
+static void tcl_db_free __P((void *));
+#endif
+
/*
* Db_tcl_Init --
*
@@ -96,20 +117,24 @@ berkdb_Cmd(notused, interp, objc, objv)
Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *berkdbcmds[] = {
+#if CONFIG_TEST
+ "dbverify",
+ "handles",
+ "upgrade",
+#endif
"dbremove",
"dbrename",
- "dbverify",
"env",
"envremove",
- "handles",
"open",
- "upgrade",
"version",
+#if CONFIG_TEST
/* All below are compatibility functions */
"hcreate", "hsearch", "hdestroy",
"dbminit", "fetch", "store",
"delete", "firstkey", "nextkey",
"ndbm_open", "dbmclose",
+#endif
/* All below are convenience functions */
"rand", "random_int", "srand",
"debug_check",
@@ -119,28 +144,34 @@ berkdb_Cmd(notused, interp, objc, objv)
* All commands enums below ending in X are compatibility
*/
enum berkdbcmds {
+#if CONFIG_TEST
+ BDB_DBVERIFY,
+ BDB_HANDLES,
+ BDB_UPGRADE,
+#endif
BDB_DBREMOVE,
BDB_DBRENAME,
- BDB_DBVERIFY,
BDB_ENV,
BDB_ENVREMOVE,
- BDB_HANDLES,
BDB_OPEN,
- BDB_UPGRADE,
BDB_VERSION,
+#if CONFIG_TEST
BDB_HCREATEX, BDB_HSEARCHX, BDB_HDESTROYX,
BDB_DBMINITX, BDB_FETCHX, BDB_STOREX,
BDB_DELETEX, BDB_FIRSTKEYX, BDB_NEXTKEYX,
BDB_NDBMOPENX, BDB_DBMCLOSEX,
+#endif
BDB_RANDX, BDB_RAND_INTX, BDB_SRANDX,
BDB_DBGCKX
};
static int env_id = 0;
static int db_id = 0;
- static int ndbm_id = 0;
DB *dbp;
+#if CONFIG_TEST
DBM *ndbmp;
+ static int ndbm_id = 0;
+#endif
DBTCL_INFO *ip;
DB_ENV *envp;
Tcl_Obj *res;
@@ -166,13 +197,21 @@ berkdb_Cmd(notused, interp, objc, objv)
return (IS_HELP(objv[1]));
res = NULL;
switch ((enum berkdbcmds)cmdindex) {
- case BDB_VERSION:
- _debug_check();
- result = bdb_Version(interp, objc, objv);
+#if CONFIG_TEST
+ case BDB_DBVERIFY:
+ result = bdb_DbVerify(interp, objc, objv);
break;
case BDB_HANDLES:
result = bdb_Handles(interp, objc, objv);
break;
+ case BDB_UPGRADE:
+ result = bdb_DbUpgrade(interp, objc, objv);
+ break;
+#endif
+ case BDB_VERSION:
+ _debug_check();
+ result = bdb_Version(interp, objc, objv);
+ break;
case BDB_ENV:
snprintf(newname, sizeof(newname), "env%d", env_id);
ip = _NewInfo(interp, NULL, newname, I_ENV);
@@ -201,12 +240,6 @@ berkdb_Cmd(notused, interp, objc, objv)
case BDB_DBRENAME:
result = bdb_DbRename(interp, objc, objv);
break;
- case BDB_UPGRADE:
- result = bdb_DbUpgrade(interp, objc, objv);
- break;
- case BDB_DBVERIFY:
- result = bdb_DbVerify(interp, objc, objv);
- break;
case BDB_ENVREMOVE:
result = tcl_EnvRemove(interp, objc, objv, NULL, NULL);
break;
@@ -232,6 +265,7 @@ berkdb_Cmd(notused, interp, objc, objv)
result = TCL_ERROR;
}
break;
+#if CONFIG_TEST
case BDB_HCREATEX:
case BDB_HSEARCHX:
case BDB_HDESTROYX:
@@ -268,6 +302,7 @@ berkdb_Cmd(notused, interp, objc, objv)
result = TCL_ERROR;
}
break;
+#endif
case BDB_RANDX:
case BDB_RAND_INTX:
case BDB_SRANDX:
@@ -296,7 +331,7 @@ berkdb_Cmd(notused, interp, objc, objv)
* 1. Call db_env_create to create the env handle.
* 2. Parse args tracking options.
* 3. Make any pre-open setup calls necessary.
- * 4. Call DBENV->open to open the env.
+ * 4. Call DB_ENV->open to open the env.
* 5. Return env widget handle to user.
*/
static int
@@ -308,15 +343,11 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
DB_ENV **env; /* Environment pointer */
{
static char *envopen[] = {
- "-cachesize",
+#if CONFIG_TEST
+ "-auto_commit",
"-cdb",
"-cdb_alldb",
"-client_timeout",
- "-create",
- "-data_dir",
- "-errfile",
- "-errpfx",
- "-home",
"-lock",
"-lock_conflict",
"-lock_detect",
@@ -324,28 +355,46 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
"-lock_max_locks",
"-lock_max_lockers",
"-lock_max_objects",
+ "-lock_timeout",
"-log",
"-log_buffer",
- "-log_dir",
"-log_max",
+ "-log_regionmax",
"-mmapsize",
- "-mode",
"-nommap",
- "-private",
- "-recover",
- "-recover_fatal",
+ "-overwrite",
"-region_init",
+ "-rep_client",
+ "-rep_logsonly",
+ "-rep_master",
+ "-rep_transport",
"-server",
"-server_timeout",
+ "-txn_timeout",
+ "-txn_timestamp",
+ "-verbose",
+ "-wrnosync",
+#endif
+ "-cachesize",
+ "-create",
+ "-data_dir",
+ "-encryptaes",
+ "-encryptany",
+ "-errfile",
+ "-errpfx",
+ "-home",
+ "-log_dir",
+ "-mode",
+ "-private",
+ "-recover",
+ "-recover_fatal",
"-shm_key",
"-system_mem",
"-tmp_dir",
"-txn",
"-txn_max",
- "-txn_timestamp",
"-use_environ",
"-use_environ_root",
- "-verbose",
NULL
};
/*
@@ -354,15 +403,11 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
* which is close to but not quite alphabetical.
*/
enum envopen {
- ENV_CACHESIZE,
+#if CONFIG_TEST
+ ENV_AUTO_COMMIT,
ENV_CDB,
ENV_CDB_ALLDB,
ENV_CLIENT_TO,
- ENV_CREATE,
- ENV_DATA_DIR,
- ENV_ERRFILE,
- ENV_ERRPFX,
- ENV_HOME,
ENV_LOCK,
ENV_CONFLICT,
ENV_DETECT,
@@ -370,52 +415,82 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
ENV_LOCK_MAX_LOCKS,
ENV_LOCK_MAX_LOCKERS,
ENV_LOCK_MAX_OBJECTS,
+ ENV_LOCK_TIMEOUT,
ENV_LOG,
ENV_LOG_BUFFER,
- ENV_LOG_DIR,
ENV_LOG_MAX,
+ ENV_LOG_REGIONMAX,
ENV_MMAPSIZE,
- ENV_MODE,
ENV_NOMMAP,
- ENV_PRIVATE,
- ENV_RECOVER,
- ENV_RECOVER_FATAL,
+ ENV_OVERWRITE,
ENV_REGION_INIT,
+ ENV_REP_CLIENT,
+ ENV_REP_LOGSONLY,
+ ENV_REP_MASTER,
+ ENV_REP_TRANSPORT,
ENV_SERVER,
ENV_SERVER_TO,
+ ENV_TXN_TIMEOUT,
+ ENV_TXN_TIME,
+ ENV_VERBOSE,
+ ENV_WRNOSYNC,
+#endif
+ ENV_CACHESIZE,
+ ENV_CREATE,
+ ENV_DATA_DIR,
+ ENV_ENCRYPT_AES,
+ ENV_ENCRYPT_ANY,
+ ENV_ERRFILE,
+ ENV_ERRPFX,
+ ENV_HOME,
+ ENV_LOG_DIR,
+ ENV_MODE,
+ ENV_PRIVATE,
+ ENV_RECOVER,
+ ENV_RECOVER_FATAL,
ENV_SHM_KEY,
ENV_SYSTEM_MEM,
ENV_TMP_DIR,
ENV_TXN,
ENV_TXN_MAX,
- ENV_TXN_TIME,
ENV_USE_ENVIRON,
- ENV_USE_ENVIRON_ROOT,
- ENV_VERBOSE
+ ENV_USE_ENVIRON_ROOT
};
Tcl_Obj **myobjv, **myobjv1;
- time_t time;
- u_int32_t detect, gbytes, bytes, ncaches, open_flags, set_flag, size;
+ time_t timestamp;
+ u_int32_t detect, gbytes, bytes, ncaches, logbufset, logmaxset;
+ u_int32_t open_flags, rep_flags, set_flags, size, uintarg;
u_int8_t *conflicts;
- int i, intarg, itmp, j, logbufset, logmaxset;
- int mode, myobjc, nmodes, optindex, result, ret, temp;
+ int i, intarg, j, mode, myobjc, nmodes, optindex;
+ int result, ret, temp;
long client_to, server_to, shm;
- char *arg, *home, *server;
+ char *arg, *home, *passwd, *server;
result = TCL_OK;
mode = 0;
- set_flag = 0;
+ rep_flags = set_flags = 0;
home = NULL;
+
/*
* XXX
* If/when our Tcl interface becomes thread-safe, we should enable
- * DB_THREAD here. Note that DB_THREAD currently does not work
- * with log_get -next, -prev; if we wish to enable DB_THREAD,
- * those must either be made thread-safe first or we must come up with
- * a workaround. (We used to specify DB_THREAD if and only if
- * logging was not configured.)
+ * DB_THREAD here in all cases. For now, turn it on only when testing
+ * so that we exercise MUTEX_THREAD_LOCK cases.
+ *
+ * Historically, a key stumbling block was the log_get interface,
+ * which could only do relative operations in a non-threaded
+ * environment. This is no longer an issue, thanks to log cursors,
+ * but we need to look at making sure DBTCL_INFO structs
+ * are safe to share across threads (they're not mutex-protected)
+ * before we declare the Tcl interface thread-safe. Meanwhile,
+ * there's no strong reason to enable DB_THREAD.
*/
- open_flags = DB_JOINENV;
+ open_flags = DB_JOINENV |
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
logmaxset = logbufset = 0;
if (objc <= 2) {
@@ -436,6 +511,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
continue;
}
switch ((enum envopen)optindex) {
+#if CONFIG_TEST
case ENV_SERVER:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
@@ -465,6 +541,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
result = Tcl_GetLongFromObj(interp, objv[i++],
&client_to);
break;
+#endif
default:
break;
}
@@ -472,10 +549,11 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
if (server != NULL) {
ret = db_env_create(env, DB_CLIENT);
if (ret)
- return (_ReturnSetup(interp, ret, "db_env_create"));
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
(*env)->set_errpfx((*env), ip->i_name);
(*env)->set_errcall((*env), _ErrorFunc);
- if ((ret = (*env)->set_server((*env), server,
+ if ((ret = (*env)->set_rpc_server((*env), NULL, server,
client_to, server_to, 0)) != 0) {
result = TCL_ERROR;
goto error;
@@ -487,17 +565,30 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
*/
ret = db_env_create(env, 0);
if (ret)
- return (_ReturnSetup(interp, ret, "db_env_create"));
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create"));
(*env)->set_errpfx((*env), ip->i_name);
(*env)->set_errcall((*env), _ErrorFunc);
}
+ /* Hang our info pointer on the env handle, so we can do callbacks. */
+ (*env)->app_private = ip;
+
+ /*
+ * Use a Tcl-local alloc and free function so that we're sure to
+ * test whether we use umalloc/ufree in the right places.
+ */
+#ifdef TEST_ALLOC
+ (*env)->set_alloc(*env, tcl_db_malloc, tcl_db_realloc, tcl_db_free);
+#endif
+
/*
* Get the command name index from the object based on the bdbcmds
* defined above.
*/
i = 2;
while (i < objc) {
+ Tcl_ResetResult(interp);
if (Tcl_GetIndexFromObj(interp, objv[i], envopen, "option",
TCL_EXACT, &optindex) != TCL_OK) {
result = IS_HELP(objv[i]);
@@ -505,6 +596,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
}
i++;
switch ((enum envopen)optindex) {
+#if CONFIG_TEST
case ENV_SERVER:
case ENV_SERVER_TO:
case ENV_CLIENT_TO:
@@ -513,208 +605,20 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
*/
i++;
break;
+ case ENV_AUTO_COMMIT:
+ FLD_SET(set_flags, DB_AUTO_COMMIT);
+ break;
case ENV_CDB:
FLD_SET(open_flags, DB_INIT_CDB | DB_INIT_MPOOL);
FLD_CLR(open_flags, DB_JOINENV);
break;
case ENV_CDB_ALLDB:
- FLD_SET(set_flag, DB_CDB_ALLDB);
+ FLD_SET(set_flags, DB_CDB_ALLDB);
break;
case ENV_LOCK:
FLD_SET(open_flags, DB_INIT_LOCK | DB_INIT_MPOOL);
FLD_CLR(open_flags, DB_JOINENV);
break;
- case ENV_LOG:
- FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL);
- FLD_CLR(open_flags, DB_JOINENV);
- break;
- case ENV_TXN:
- FLD_SET(open_flags, DB_INIT_LOCK |
- DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN);
- FLD_CLR(open_flags, DB_JOINENV);
- /* Make sure we have an arg to check against! */
- if (i < objc) {
- arg = Tcl_GetStringFromObj(objv[i], NULL);
- if (strcmp(arg, "nosync") == 0) {
- FLD_SET(set_flag, DB_TXN_NOSYNC);
- i++;
- }
- }
- break;
- case ENV_CREATE:
- FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL);
- FLD_CLR(open_flags, DB_JOINENV);
- break;
- case ENV_HOME:
- /* Make sure we have an arg to check against! */
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-home dir?");
- result = TCL_ERROR;
- break;
- }
- home = Tcl_GetStringFromObj(objv[i++], NULL);
- break;
- case ENV_MODE:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-mode mode?");
- result = TCL_ERROR;
- break;
- }
- /*
- * Don't need to check result here because
- * if TCL_ERROR, the error message is already
- * set up, and we'll bail out below. If ok,
- * the mode is set and we go on.
- */
- result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
- break;
- case ENV_NOMMAP:
- FLD_SET(set_flag, DB_NOMMAP);
- break;
- case ENV_PRIVATE:
- FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL);
- FLD_CLR(open_flags, DB_JOINENV);
- break;
- case ENV_RECOVER:
- FLD_SET(open_flags, DB_RECOVER);
- break;
- case ENV_RECOVER_FATAL:
- FLD_SET(open_flags, DB_RECOVER_FATAL);
- break;
- case ENV_SYSTEM_MEM:
- FLD_SET(open_flags, DB_SYSTEM_MEM);
- break;
- case ENV_USE_ENVIRON_ROOT:
- FLD_SET(open_flags, DB_USE_ENVIRON_ROOT);
- break;
- case ENV_USE_ENVIRON:
- FLD_SET(open_flags, DB_USE_ENVIRON);
- break;
- case ENV_VERBOSE:
- result = Tcl_ListObjGetElements(interp, objv[i],
- &myobjc, &myobjv);
- if (result == TCL_OK)
- i++;
- else
- break;
- if (myobjc != 2) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-verbose {which on|off}?");
- result = TCL_ERROR;
- break;
- }
- result = tcl_EnvVerbose(interp, *env,
- myobjv[0], myobjv[1]);
- break;
- case ENV_REGION_INIT:
- _debug_check();
- ret = db_env_set_region_init(1);
- result = _ReturnSetup(interp, ret, "region_init");
- break;
- case ENV_CACHESIZE:
- result = Tcl_ListObjGetElements(interp, objv[i],
- &myobjc, &myobjv);
- if (result == TCL_OK)
- i++;
- else
- break;
- j = 0;
- if (myobjc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-cachesize {gbytes bytes ncaches}?");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
- gbytes = itmp;
- if (result != TCL_OK)
- break;
- result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
- bytes = itmp;
- if (result != TCL_OK)
- break;
- result = Tcl_GetIntFromObj(interp, myobjv[2], &itmp);
- ncaches = itmp;
- if (result != TCL_OK)
- break;
- _debug_check();
- ret = (*env)->set_cachesize(*env, gbytes, bytes,
- ncaches);
- result = _ReturnSetup(interp, ret, "set_cachesize");
- break;
- case ENV_MMAPSIZE:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-mmapsize size?");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
- if (result == TCL_OK) {
- _debug_check();
- ret = (*env)->set_mp_mmapsize(*env,
- (size_t)intarg);
- result = _ReturnSetup(interp, ret, "mmapsize");
- }
- break;
- case ENV_SHM_KEY:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-shm_key key?");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetLongFromObj(interp, objv[i++], &shm);
- if (result == TCL_OK) {
- _debug_check();
- ret = (*env)->set_shm_key(*env, shm);
- result = _ReturnSetup(interp, ret, "shm_key");
- }
- break;
- case ENV_LOG_MAX:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-log_max max?");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
- if (result == TCL_OK && logbufset) {
- _debug_check();
- ret = (*env)->set_lg_max(*env,
- (u_int32_t)intarg);
- result = _ReturnSetup(interp, ret, "log_max");
- logbufset = 0;
- } else
- logmaxset = intarg;
- break;
- case ENV_LOG_BUFFER:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "?-log_buffer size?");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
- if (result == TCL_OK) {
- _debug_check();
- ret = (*env)->set_lg_bsize(*env,
- (u_int32_t)intarg);
- result = _ReturnSetup(interp, ret, "log_bsize");
- logbufset = 1;
- if (logmaxset) {
- _debug_check();
- ret = (*env)->set_lg_max(*env,
- (u_int32_t)logmaxset);
- result = _ReturnSetup(interp, ret,
- "log_max");
- logmaxset = 0;
- logbufset = 0;
- }
- }
- break;
case ENV_CONFLICT:
/*
* Get conflict list. List is:
@@ -747,7 +651,7 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
break;
}
size = sizeof(u_int8_t) * nmodes*nmodes;
- ret = __os_malloc(*env, size, NULL, &conflicts);
+ ret = __os_malloc(*env, size, &conflicts);
if (ret != 0) {
result = TCL_ERROR;
break;
@@ -757,15 +661,16 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
&temp);
conflicts[j] = temp;
if (result != TCL_OK) {
- __os_free(conflicts, size);
+ __os_free(NULL, conflicts);
break;
}
}
_debug_check();
ret = (*env)->set_lk_conflicts(*env,
(u_int8_t *)conflicts, nmodes);
- __os_free(conflicts, size);
- result = _ReturnSetup(interp, ret, "set_lk_conflicts");
+ __os_free(NULL, conflicts);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lk_conflicts");
break;
case ENV_DETECT:
if (i >= objc) {
@@ -777,6 +682,14 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
arg = Tcl_GetStringFromObj(objv[i++], NULL);
if (strcmp(arg, "default") == 0)
detect = DB_LOCK_DEFAULT;
+ else if (strcmp(arg, "expire") == 0)
+ detect = DB_LOCK_EXPIRE;
+ else if (strcmp(arg, "maxlocks") == 0)
+ detect = DB_LOCK_MAXLOCKS;
+ else if (strcmp(arg, "minlocks") == 0)
+ detect = DB_LOCK_MINLOCKS;
+ else if (strcmp(arg, "minwrites") == 0)
+ detect = DB_LOCK_MINWRITE;
else if (strcmp(arg, "oldest") == 0)
detect = DB_LOCK_OLDEST;
else if (strcmp(arg, "youngest") == 0)
@@ -791,7 +704,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
}
_debug_check();
ret = (*env)->set_lk_detect(*env, detect);
- result = _ReturnSetup(interp, ret, "lock_detect");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_detect");
break;
case ENV_LOCK_MAX:
case ENV_LOCK_MAX_LOCKS:
@@ -803,61 +717,373 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ result = _GetUInt32(interp, objv[i++], &uintarg);
if (result == TCL_OK) {
_debug_check();
switch ((enum envopen)optindex) {
case ENV_LOCK_MAX:
ret = (*env)->set_lk_max(*env,
- (u_int32_t)intarg);
+ uintarg);
break;
case ENV_LOCK_MAX_LOCKS:
ret = (*env)->set_lk_max_locks(*env,
- (u_int32_t)intarg);
+ uintarg);
break;
case ENV_LOCK_MAX_LOCKERS:
ret = (*env)->set_lk_max_lockers(*env,
- (u_int32_t)intarg);
+ uintarg);
break;
case ENV_LOCK_MAX_OBJECTS:
ret = (*env)->set_lk_max_objects(*env,
- (u_int32_t)intarg);
+ uintarg);
break;
default:
break;
}
- result = _ReturnSetup(interp, ret, "lock_max");
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock_max");
}
break;
- case ENV_TXN_MAX:
+ case ENV_TXN_TIME:
+ case ENV_TXN_TIMEOUT:
+ case ENV_LOCK_TIMEOUT:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "?-txn_max max?");
+ "?-txn_timestamp time?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)&timestamp);
+ if (result == TCL_OK) {
+ _debug_check();
+ if (optindex == ENV_TXN_TIME)
+ ret = (*env)->
+ set_tx_timestamp(*env, &timestamp);
+ else
+ ret = (*env)->set_timeout(*env,
+ (db_timeout_t)timestamp,
+ optindex == ENV_TXN_TIMEOUT ?
+ DB_SET_TXN_TIMEOUT :
+ DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "txn_timestamp");
+ }
+ break;
+ case ENV_LOG:
+ FLD_SET(open_flags, DB_INIT_LOG | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_LOG_BUFFER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_buffer size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_bsize(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_bsize");
+ logbufset = 1;
+ if (logmaxset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env,
+ logmaxset);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logmaxset = 0;
+ logbufset = 0;
+ }
+ }
+ break;
+ case ENV_LOG_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK && logbufset) {
+ _debug_check();
+ ret = (*env)->set_lg_max(*env, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log_max");
+ logbufset = 0;
+ } else
+ logmaxset = uintarg;
+ break;
+ case ENV_LOG_REGIONMAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-log_regionmax size?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_lg_regionmax(*env, uintarg);
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_regionmax");
+ }
+ break;
+ case ENV_MMAPSIZE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mmapsize size?");
result = TCL_ERROR;
break;
}
result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
if (result == TCL_OK) {
_debug_check();
- ret = (*env)->set_tx_max(*env,
- (u_int32_t)intarg);
- result = _ReturnSetup(interp, ret, "txn_max");
+ ret = (*env)->set_mp_mmapsize(*env,
+ (size_t)intarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "mmapsize");
}
break;
- case ENV_TXN_TIME:
+ case ENV_NOMMAP:
+ FLD_SET(set_flags, DB_NOMMAP);
+ break;
+ case ENV_OVERWRITE:
+ FLD_SET(set_flags, DB_OVERWRITE);
+ break;
+ case ENV_REGION_INIT:
+ _debug_check();
+ ret = (*env)->set_flags(*env, DB_REGION_INIT, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "region_init");
+ break;
+ case ENV_REP_CLIENT:
+ rep_flags = DB_REP_CLIENT;
+ break;
+ case ENV_REP_LOGSONLY:
+ rep_flags = DB_REP_LOGSONLY;
+ break;
+ case ENV_REP_MASTER:
+ rep_flags = DB_REP_MASTER;
+ break;
+ case ENV_REP_TRANSPORT:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "?-txn_timestamp time?");
+ "-rep_transport {envid sendproc}");
result = TCL_ERROR;
break;
}
- result = Tcl_GetLongFromObj(interp, objv[i++],
- (long *)&time);
+
+ /*
+ * Store the objects containing the machine ID
+ * and the procedure name. We don't need to crack
+ * the send procedure out now, but we do convert the
+ * machine ID to an int, since set_rep_transport needs
+ * it. Even so, it'll be easier later to deal with
+ * the Tcl_Obj *, so we save that, not the int.
+ *
+ * Note that we Tcl_IncrRefCount both objects
+ * independently; Tcl is free to discard the list
+ * that they're bundled into.
+ */
+ result = Tcl_ListObjGetElements(interp, objv[i++],
+ &myobjc, &myobjv);
+ if (myobjc != 2) {
+ Tcl_SetResult(interp,
+ "List must be {envid sendproc}",
+ TCL_STATIC);
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Check that the machine ID is an int. Note that
+ * we do want to use GetIntFromObj; the machine
+ * ID is explicitly an int, not a u_int32_t.
+ */
+ ip->i_rep_eid = myobjv[0];
+ Tcl_IncrRefCount(ip->i_rep_eid);
+ result = Tcl_GetIntFromObj(interp,
+ ip->i_rep_eid, &intarg);
+ if (result != TCL_OK)
+ break;
+
+ ip->i_rep_send = myobjv[1];
+ Tcl_IncrRefCount(ip->i_rep_send);
+ _debug_check();
+ ret = (*env)->set_rep_transport(*env,
+ intarg, tcl_rep_send);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rep_transport");
+ break;
+ case ENV_VERBOSE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-verbose {which on|off}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = tcl_EnvVerbose(interp, *env,
+ myobjv[0], myobjv[1]);
+ break;
+ case ENV_WRNOSYNC:
+ FLD_SET(set_flags, DB_TXN_WRITE_NOSYNC);
+ break;
+#endif
+ case ENV_TXN:
+ FLD_SET(open_flags, DB_INIT_LOCK |
+ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN);
+ FLD_CLR(open_flags, DB_JOINENV);
+ /* Make sure we have an arg to check against! */
+ if (i < objc) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (strcmp(arg, "nosync") == 0) {
+ FLD_SET(set_flags, DB_TXN_NOSYNC);
+ i++;
+ }
+ }
+ break;
+ case ENV_CREATE:
+ FLD_SET(open_flags, DB_CREATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*env)->set_encrypt(*env, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case ENV_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+ case ENV_MODE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-mode mode?");
+ result = TCL_ERROR;
+ break;
+ }
+ /*
+ * Don't need to check result here because
+ * if TCL_ERROR, the error message is already
+ * set up, and we'll bail out below. If ok,
+ * the mode is set and we go on.
+ */
+ result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
+ break;
+ case ENV_PRIVATE:
+ FLD_SET(open_flags, DB_PRIVATE | DB_INIT_MPOOL);
+ FLD_CLR(open_flags, DB_JOINENV);
+ break;
+ case ENV_RECOVER:
+ FLD_SET(open_flags, DB_RECOVER);
+ break;
+ case ENV_RECOVER_FATAL:
+ FLD_SET(open_flags, DB_RECOVER_FATAL);
+ break;
+ case ENV_SYSTEM_MEM:
+ FLD_SET(open_flags, DB_SYSTEM_MEM);
+ break;
+ case ENV_USE_ENVIRON_ROOT:
+ FLD_SET(open_flags, DB_USE_ENVIRON_ROOT);
+ break;
+ case ENV_USE_ENVIRON:
+ FLD_SET(open_flags, DB_USE_ENVIRON);
+ break;
+ case ENV_CACHESIZE:
+ result = Tcl_ListObjGetElements(interp, objv[i],
+ &myobjc, &myobjv);
+ if (result == TCL_OK)
+ i++;
+ else
+ break;
+ if (myobjc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-cachesize {gbytes bytes ncaches}?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
+ if (result != TCL_OK)
+ break;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
+ if (result != TCL_OK)
+ break;
+ _debug_check();
+ ret = (*env)->set_cachesize(*env, gbytes, bytes,
+ ncaches);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_cachesize");
+ break;
+ case ENV_SHM_KEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-shm_key key?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++], &shm);
if (result == TCL_OK) {
_debug_check();
- ret = (*env)->set_tx_timestamp(*env, &time);
+ ret = (*env)->set_shm_key(*env, shm);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "shm_key");
+ }
+ break;
+ case ENV_TXN_MAX:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_max max?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->set_tx_max(*env, uintarg);
result = _ReturnSetup(interp, ret,
- "txn_timestamp");
+ DB_RETOK_STD(ret), "txn_max");
}
break;
case ENV_ERRFILE:
@@ -891,11 +1117,11 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
* If the user already set one, free it.
*/
if (ip->i_errpfx != NULL)
- __os_freestr(ip->i_errpfx);
+ __os_free(NULL, ip->i_errpfx);
if ((ret =
__os_strdup(*env, arg, &ip->i_errpfx)) != 0) {
result = _ReturnSetup(interp, ret,
- "__os_strdup");
+ DB_RETOK_STD(ret), "__os_strdup");
break;
}
if (ip->i_errpfx != NULL) {
@@ -913,7 +1139,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
arg = Tcl_GetStringFromObj(objv[i++], NULL);
_debug_check();
ret = (*env)->set_data_dir(*env, arg);
- result = _ReturnSetup(interp, ret, "set_data_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
break;
case ENV_LOG_DIR:
if (i >= objc) {
@@ -925,7 +1152,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
arg = Tcl_GetStringFromObj(objv[i++], NULL);
_debug_check();
ret = (*env)->set_lg_dir(*env, arg);
- result = _ReturnSetup(interp, ret, "set_lg_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_lg_dir");
break;
case ENV_TMP_DIR:
if (i >= objc) {
@@ -937,7 +1165,8 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
arg = Tcl_GetStringFromObj(objv[i++], NULL);
_debug_check();
ret = (*env)->set_tmp_dir(*env, arg);
- result = _ReturnSetup(interp, ret, "set_tmp_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
break;
}
/*
@@ -959,15 +1188,17 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
if (logmaxset) {
_debug_check();
ret = (*env)->set_lg_max(*env, (u_int32_t)logmaxset);
- result = _ReturnSetup(interp, ret, "log_max");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "log_max");
}
if (result != TCL_OK)
goto error;
- if (set_flag) {
- ret = (*env)->set_flags(*env, set_flag, 1);
- result = _ReturnSetup(interp, ret, "set_flags");
+ if (set_flags) {
+ ret = (*env)->set_flags(*env, set_flags, 1);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
if (result == TCL_ERROR)
goto error;
/*
@@ -985,10 +1216,16 @@ bdb_EnvOpen(interp, objc, objv, ip, env)
*/
_debug_check();
ret = (*env)->open(*env, home, open_flags, mode);
- result = _ReturnSetup(interp, ret, "env open");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env open");
-error:
- if (result == TCL_ERROR) {
+ if (rep_flags != 0 && result == TCL_OK) {
+ _debug_check();
+ ret = (*env)->rep_start(*env, NULL, rep_flags);
+ result = _ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "rep_start");
+ }
+
+error: if (result == TCL_ERROR) {
if (ip->i_err) {
fclose(ip->i_err);
ip->i_err = NULL;
@@ -1027,12 +1264,28 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
TCL_DB_ENV0
};
static char *bdbopen[] = {
+#if CONFIG_TEST
+ "-btcompare",
+ "-dirty",
+ "-dupcompare",
+ "-hashproc",
+ "-lorder",
+ "-minkey",
+ "-nommap",
+ "-revsplitoff",
+ "-test",
+#endif
+ "-auto_commit",
"-btree",
"-cachesize",
+ "-chksum",
"-create",
"-delim",
"-dup",
"-dupsort",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
"-env",
"-errfile",
"-errpfx",
@@ -1041,11 +1294,8 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
"-ffactor",
"-hash",
"-len",
- "-lorder",
- "-minkey",
"-mode",
"-nelem",
- "-nommap",
"-pad",
"-pagesize",
"-queue",
@@ -1053,22 +1303,37 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
"-recno",
"-recnum",
"-renumber",
- "-revsplitoff",
"-snapshot",
"-source",
"-truncate",
- "-test",
+ "-txn",
"-unknown",
"--",
NULL
};
enum bdbopen {
+#if CONFIG_TEST
+ TCL_DB_BTCOMPARE,
+ TCL_DB_DIRTY,
+ TCL_DB_DUPCOMPARE,
+ TCL_DB_HASHPROC,
+ TCL_DB_LORDER,
+ TCL_DB_MINKEY,
+ TCL_DB_NOMMAP,
+ TCL_DB_REVSPLIT,
+ TCL_DB_TEST,
+#endif
+ TCL_DB_AUTO_COMMIT,
TCL_DB_BTREE,
TCL_DB_CACHESIZE,
+ TCL_DB_CHKSUM,
TCL_DB_CREATE,
TCL_DB_DELIM,
TCL_DB_DUP,
TCL_DB_DUPSORT,
+ TCL_DB_ENCRYPT,
+ TCL_DB_ENCRYPT_AES,
+ TCL_DB_ENCRYPT_ANY,
TCL_DB_ENV,
TCL_DB_ERRFILE,
TCL_DB_ERRPFX,
@@ -1077,11 +1342,8 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
TCL_DB_FFACTOR,
TCL_DB_HASH,
TCL_DB_LEN,
- TCL_DB_LORDER,
- TCL_DB_MINKEY,
TCL_DB_MODE,
TCL_DB_NELEM,
- TCL_DB_NOMMAP,
TCL_DB_PAD,
TCL_DB_PAGESIZE,
TCL_DB_QUEUE,
@@ -1089,28 +1351,27 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
TCL_DB_RECNO,
TCL_DB_RECNUM,
TCL_DB_RENUMBER,
- TCL_DB_REVSPLIT,
TCL_DB_SNAPSHOT,
TCL_DB_SOURCE,
TCL_DB_TRUNCATE,
- TCL_DB_TEST,
+ TCL_DB_TXN,
TCL_DB_UNKNOWN,
TCL_DB_ENDARG
};
DBTCL_INFO *envip, *errip;
+ DB_TXN *txn;
DBTYPE type;
DB_ENV *envp;
Tcl_Obj **myobjv;
- u_int32_t gbytes, bytes, ncaches, open_flags;
- int endarg, i, intarg, itmp, j, mode, myobjc;
- int optindex, result, ret, set_err, set_flag, set_pfx, subdblen;
+ u_int32_t gbytes, bytes, ncaches, open_flags, uintarg;
+ int endarg, i, intarg, mode, myobjc;
+ int optindex, result, ret, set_err, set_flags, set_pfx, subdblen;
u_char *subdbtmp;
- char *arg, *db, *subdb;
- extern u_int32_t __ham_test __P((DB *, const void *, u_int32_t));
+ char *arg, *db, *passwd, *subdb, msg[MSG_SIZE];
type = DB_UNKNOWN;
- endarg = mode = set_err = set_flag = set_pfx = 0;
+ endarg = mode = set_err = set_flags = set_pfx = 0;
result = TCL_OK;
subdbtmp = NULL;
db = subdb = NULL;
@@ -1118,10 +1379,18 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
/*
* XXX
* If/when our Tcl interface becomes thread-safe, we should enable
- * DB_THREAD here. See comment in bdb_EnvOpen().
+ * DB_THREAD here in all cases. See comment in bdb_EnvOpen().
+ * For now, just turn it on when testing so that we exercise
+ * MUTEX_THREAD_LOCK cases.
*/
- open_flags = 0;
+ open_flags =
+#ifdef TEST_THREAD
+ DB_THREAD;
+#else
+ 0;
+#endif
envp = NULL;
+ txn = NULL;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?args?");
@@ -1162,7 +1431,11 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
*/
ret = db_create(dbp, envp, 0);
if (ret)
- return (_ReturnSetup(interp, ret, "db_create"));
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create"));
+
+ /* Hang our info pointer on the DB handle, so we can do callbacks. */
+ (*dbp)->api_internal = ip;
/*
* XXX Remove restriction when err stuff is not tied to env.
@@ -1193,6 +1466,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
*/
i = 2;
while (i < objc) {
+ Tcl_ResetResult(interp);
if (Tcl_GetIndexFromObj(interp, objv[i], bdbopen, "option",
TCL_EXACT, &optindex) != TCL_OK) {
arg = Tcl_GetStringFromObj(objv[i], NULL);
@@ -1205,12 +1479,134 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
}
i++;
switch ((enum bdbopen)optindex) {
+#if CONFIG_TEST
+ case TCL_DB_BTCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-btcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * We don't need to crack it out now--we'll want
+ * to bundle it up to pass into Tcl_EvalObjv anyway.
+ * Tcl's object refcounting will--I hope--take care
+ * of the memory management here.
+ */
+ ip->i_btcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_btcompare);
+ _debug_check();
+ ret = (*dbp)->set_bt_compare(*dbp, tcl_bt_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_bt_compare");
+ break;
+ case TCL_DB_DIRTY:
+ open_flags |= DB_DIRTY_READ;
+ break;
+ case TCL_DB_DUPCOMPARE:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-dupcompare compareproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_dupcompare = objv[i++];
+ Tcl_IncrRefCount(ip->i_dupcompare);
+ _debug_check();
+ ret = (*dbp)->set_dup_compare(*dbp, tcl_dup_compare);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_dup_compare");
+ break;
+ case TCL_DB_HASHPROC:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-hashproc hashproc");
+ result = TCL_ERROR;
+ break;
+ }
+
+ /*
+ * Store the object containing the procedure name.
+ * See TCL_DB_BTCOMPARE.
+ */
+ ip->i_hashproc = objv[i++];
+ Tcl_IncrRefCount(ip->i_hashproc);
+ _debug_check();
+ ret = (*dbp)->set_h_hash(*dbp, tcl_h_hash);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_h_hash");
+ break;
+ case TCL_DB_LORDER:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-lorder 1234|4321");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_lorder(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_lorder");
+ }
+ break;
+ case TCL_DB_MINKEY:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "-minkey minkey");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetUInt32(interp, objv[i++], &uintarg);
+ if (result == TCL_OK) {
+ _debug_check();
+ ret = (*dbp)->set_bt_minkey(*dbp, uintarg);
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "set_bt_minkey");
+ }
+ break;
+ case TCL_DB_NOMMAP:
+ open_flags |= DB_NOMMAP;
+ break;
+ case TCL_DB_REVSPLIT:
+ set_flags |= DB_REVSPLITOFF;
+ break;
+ case TCL_DB_TEST:
+ (*dbp)->set_h_hash(*dbp, __ham_test);
+ break;
+#endif
+ case TCL_DB_AUTO_COMMIT:
+ open_flags |= DB_AUTO_COMMIT;
+ break;
case TCL_DB_ENV:
/*
* Already parsed this, skip it and the env pointer.
*/
i++;
continue;
+ case TCL_DB_TXN:
+ if (i > (objc - 1)) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
case TCL_DB_BTREE:
if (type != DB_UNKNOWN) {
Tcl_SetResult(interp,
@@ -1267,9 +1663,6 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
case TCL_DB_TRUNCATE:
open_flags |= DB_TRUNCATE;
break;
- case TCL_DB_TEST:
- (*dbp)->set_h_hash(*dbp, __ham_test);
- break;
case TCL_DB_MODE:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
@@ -1285,73 +1678,83 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
*/
result = Tcl_GetIntFromObj(interp, objv[i++], &mode);
break;
- case TCL_DB_NOMMAP:
- open_flags |= DB_NOMMAP;
- break;
case TCL_DB_DUP:
- set_flag |= DB_DUP;
+ set_flags |= DB_DUP;
break;
case TCL_DB_DUPSORT:
- set_flag |= DB_DUPSORT;
+ set_flags |= DB_DUPSORT;
break;
case TCL_DB_RECNUM:
- set_flag |= DB_RECNUM;
+ set_flags |= DB_RECNUM;
break;
case TCL_DB_RENUMBER:
- set_flag |= DB_RENUMBER;
- break;
- case TCL_DB_REVSPLIT:
- set_flag |= DB_REVSPLITOFF;
+ set_flags |= DB_RENUMBER;
break;
case TCL_DB_SNAPSHOT:
- set_flag |= DB_SNAPSHOT;
+ set_flags |= DB_SNAPSHOT;
break;
- case TCL_DB_FFACTOR:
+ case TCL_DB_CHKSUM:
+ set_flags |= DB_CHKSUM_SHA1;
+ break;
+ case TCL_DB_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ break;
+ case TCL_DB_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "-ffactor density");
+ "?-encryptaes passwd?");
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
- if (result == TCL_OK) {
- _debug_check();
- ret = (*dbp)->set_h_ffactor(*dbp,
- (u_int32_t)intarg);
- result = _ReturnSetup(interp, ret,
- "set_h_ffactor");
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, DB_ENCRYPT_AES);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ break;
+ case TCL_DB_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
}
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ _debug_check();
+ ret = (*dbp)->set_encrypt(*dbp, passwd, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
break;
- case TCL_DB_NELEM:
+ case TCL_DB_FFACTOR:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "-nelem nelem");
+ "-ffactor density");
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ result = _GetUInt32(interp, objv[i++], &uintarg);
if (result == TCL_OK) {
_debug_check();
- ret = (*dbp)->set_h_nelem(*dbp,
- (u_int32_t)intarg);
+ ret = (*dbp)->set_h_ffactor(*dbp, uintarg);
result = _ReturnSetup(interp, ret,
- "set_h_nelem");
+ DB_RETOK_STD(ret), "set_h_ffactor");
}
break;
- case TCL_DB_LORDER:
+ case TCL_DB_NELEM:
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "-lorder 1234|4321");
+ "-nelem nelem");
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ result = _GetUInt32(interp, objv[i++], &uintarg);
if (result == TCL_OK) {
_debug_check();
- ret = (*dbp)->set_lorder(*dbp,
- (u_int32_t)intarg);
+ ret = (*dbp)->set_h_nelem(*dbp, uintarg);
result = _ReturnSetup(interp, ret,
- "set_lorder");
+ DB_RETOK_STD(ret), "set_h_nelem");
}
break;
case TCL_DB_DELIM:
@@ -1366,7 +1769,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
_debug_check();
ret = (*dbp)->set_re_delim(*dbp, intarg);
result = _ReturnSetup(interp, ret,
- "set_re_delim");
+ DB_RETOK_STD(ret), "set_re_delim");
}
break;
case TCL_DB_LEN:
@@ -1376,13 +1779,12 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ result = _GetUInt32(interp, objv[i++], &uintarg);
if (result == TCL_OK) {
_debug_check();
- ret = (*dbp)->set_re_len(*dbp,
- (u_int32_t)intarg);
+ ret = (*dbp)->set_re_len(*dbp, uintarg);
result = _ReturnSetup(interp, ret,
- "set_re_len");
+ DB_RETOK_STD(ret), "set_re_len");
}
break;
case TCL_DB_PAD:
@@ -1397,7 +1799,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
_debug_check();
ret = (*dbp)->set_re_pad(*dbp, intarg);
result = _ReturnSetup(interp, ret,
- "set_re_pad");
+ DB_RETOK_STD(ret), "set_re_pad");
}
break;
case TCL_DB_SOURCE:
@@ -1410,7 +1812,8 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
arg = Tcl_GetStringFromObj(objv[i++], NULL);
_debug_check();
ret = (*dbp)->set_re_source(*dbp, arg);
- result = _ReturnSetup(interp, ret, "set_re_source");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_re_source");
break;
case TCL_DB_EXTENT:
if (i >= objc) {
@@ -1419,28 +1822,12 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
- if (result == TCL_OK) {
- _debug_check();
- ret = (*dbp)->set_q_extentsize(*dbp,
- (u_int32_t)intarg);
- result = _ReturnSetup(interp, ret,
- "set_q_extentsize");
- }
- break;
- case TCL_DB_MINKEY:
- if (i >= objc) {
- Tcl_WrongNumArgs(interp, 2, objv,
- "-minkey minkey");
- result = TCL_ERROR;
- break;
- }
- result = Tcl_GetIntFromObj(interp, objv[i++], &intarg);
+ result = _GetUInt32(interp, objv[i++], &uintarg);
if (result == TCL_OK) {
_debug_check();
- ret = (*dbp)->set_bt_minkey(*dbp, intarg);
+ ret = (*dbp)->set_q_extentsize(*dbp, uintarg);
result = _ReturnSetup(interp, ret,
- "set_bt_minkey");
+ DB_RETOK_STD(ret), "set_q_extentsize");
}
break;
case TCL_DB_CACHESIZE:
@@ -1448,30 +1835,26 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
&myobjc, &myobjv);
if (result != TCL_OK)
break;
- j = 0;
if (myobjc != 3) {
Tcl_WrongNumArgs(interp, 2, objv,
"?-cachesize {gbytes bytes ncaches}?");
result = TCL_ERROR;
break;
}
- result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
- gbytes = itmp;
+ result = _GetUInt32(interp, myobjv[0], &gbytes);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
- bytes = itmp;
+ result = _GetUInt32(interp, myobjv[1], &bytes);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, myobjv[2], &itmp);
- ncaches = itmp;
+ result = _GetUInt32(interp, myobjv[2], &ncaches);
if (result != TCL_OK)
break;
_debug_check();
ret = (*dbp)->set_cachesize(*dbp, gbytes, bytes,
ncaches);
result = _ReturnSetup(interp, ret,
- "set_cachesize");
+ DB_RETOK_STD(ret), "set_cachesize");
break;
case TCL_DB_PAGESIZE:
if (i >= objc) {
@@ -1486,7 +1869,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
ret = (*dbp)->set_pagesize(*dbp,
(size_t)intarg);
result = _ReturnSetup(interp, ret,
- "set pagesize");
+ DB_RETOK_STD(ret), "set pagesize");
}
break;
case TCL_DB_ERRFILE:
@@ -1521,11 +1904,11 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
* If the user already set one, free it.
*/
if (errip->i_errpfx != NULL)
- __os_freestr(errip->i_errpfx);
+ __os_free(NULL, errip->i_errpfx);
if ((ret = __os_strdup((*dbp)->dbenv,
arg, &errip->i_errpfx)) != 0) {
result = _ReturnSetup(interp, ret,
- "__os_strdup");
+ DB_RETOK_STD(ret), "__os_strdup");
break;
}
if (errip->i_errpfx != NULL) {
@@ -1567,7 +1950,7 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
subdbtmp =
Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
if ((ret = __os_malloc(envp,
- subdblen + 1, NULL, &subdb)) != 0) {
+ subdblen + 1, &subdb)) != 0) {
Tcl_SetResult(interp, db_strerror(ret),
TCL_STATIC);
return (0);
@@ -1576,9 +1959,10 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
subdb[subdblen] = '\0';
}
}
- if (set_flag) {
- ret = (*dbp)->set_flags(*dbp, set_flag);
- result = _ReturnSetup(interp, ret, "set_flags");
+ if (set_flags) {
+ ret = (*dbp)->set_flags(*dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
if (result == TCL_ERROR)
goto error;
/*
@@ -1596,13 +1980,14 @@ bdb_DbOpen(interp, objc, objv, ip, dbp)
_debug_check();
/* Open the database. */
- ret = (*dbp)->open(*dbp, db, subdb, type, open_flags, mode);
- result = _ReturnSetup(interp, ret, "db open");
+ ret = (*dbp)->open(*dbp, txn, db, subdb, type, open_flags, mode);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db open");
error:
if (subdb)
- __os_free(subdb, subdblen + 1);
+ __os_free(envp, subdb);
if (result == TCL_ERROR) {
+ (void)(*dbp)->close(*dbp, 0);
/*
* If we opened and set up the error file in the environment
* on this open, but we failed for some other reason, clean
@@ -1619,10 +2004,9 @@ error:
errip->i_err = NULL;
}
if (set_pfx && errip && errip->i_errpfx != NULL) {
- __os_freestr(errip->i_errpfx);
+ __os_free(envp, errip->i_errpfx);
errip->i_errpfx = NULL;
}
- (void)(*dbp)->close(*dbp, 0);
*dbp = NULL;
}
return (result);
@@ -1630,7 +2014,7 @@ error:
/*
* bdb_DbRemove --
- * Implements the DB->remove command.
+ * Implements the DB_ENV->remove and DB->remove command.
*/
static int
bdb_DbRemove(interp, objc, objv)
@@ -1639,24 +2023,41 @@ bdb_DbRemove(interp, objc, objv)
Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *bdbrem[] = {
- "-env", "--", NULL
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
};
enum bdbrem {
+ TCL_DBREM_AUTOCOMMIT,
+ TCL_DBREM_ENCRYPT,
+ TCL_DBREM_ENCRYPT_AES,
+ TCL_DBREM_ENCRYPT_ANY,
TCL_DBREM_ENV,
+ TCL_DBREM_TXN,
TCL_DBREM_ENDARG
};
- DB_ENV *envp;
DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
int endarg, i, optindex, result, ret, subdblen;
+ u_int32_t enc_flag, iflags, set_flags;
u_char *subdbtmp;
- char *arg, *db, *subdb;
+ char *arg, *db, msg[MSG_SIZE], *passwd, *subdb;
- envp = NULL;
+ db = subdb = NULL;
dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
result = TCL_OK;
subdbtmp = NULL;
- db = subdb = NULL;
- endarg = 0;
+ txn = NULL;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
@@ -1681,6 +2082,36 @@ bdb_DbRemove(interp, objc, objv)
}
i++;
switch ((enum bdbrem)optindex) {
+ case TCL_DBREM_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBREM_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBREM_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
case TCL_DBREM_ENV:
arg = Tcl_GetStringFromObj(objv[i++], NULL);
envp = NAME_TO_ENV(arg);
@@ -1694,6 +2125,21 @@ bdb_DbRemove(interp, objc, objv)
case TCL_DBREM_ENDARG:
endarg = 1;
break;
+ case TCL_DBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
}
/*
* If, at any time, parsing the args we get an error,
@@ -1721,7 +2167,7 @@ bdb_DbRemove(interp, objc, objv)
subdbtmp =
Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
if ((ret = __os_malloc(envp, subdblen + 1,
- NULL, &subdb)) != 0) { Tcl_SetResult(interp,
+ &subdb)) != 0) { Tcl_SetResult(interp,
db_strerror(ret), TCL_STATIC);
return (0);
}
@@ -1733,28 +2179,48 @@ bdb_DbRemove(interp, objc, objv)
result = TCL_ERROR;
goto error;
}
- ret = db_create(&dbp, envp, 0);
- if (ret) {
- result = _ReturnSetup(interp, ret, "db_create");
- goto error;
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
}
+
/*
* No matter what, we NULL out dbp after this call.
*/
- ret = dbp->remove(dbp, db, subdb, 0);
- result = _ReturnSetup(interp, ret, "db remove");
+ _debug_check();
+ if (dbp == NULL)
+ ret = envp->dbremove(envp, txn, db, subdb, iflags);
+ else
+ ret = dbp->remove(dbp, db, subdb, 0);
+
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db remove");
dbp = NULL;
error:
if (subdb)
- __os_free(subdb, subdblen + 1);
- if (result == TCL_ERROR && dbp)
+ __os_free(envp, subdb);
+ if (result == TCL_ERROR && dbp != NULL)
(void)dbp->close(dbp, 0);
return (result);
}
/*
* bdb_DbRename --
- * Implements the DB->rename command.
+ * Implements the DBENV->dbrename and DB->rename commands.
*/
static int
bdb_DbRename(interp, objc, objv)
@@ -1763,24 +2229,41 @@ bdb_DbRename(interp, objc, objv)
Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *bdbmv[] = {
- "-env", "--", NULL
+ "-auto_commit",
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-txn",
+ "--",
+ NULL
};
enum bdbmv {
+ TCL_DBMV_AUTOCOMMIT,
+ TCL_DBMV_ENCRYPT,
+ TCL_DBMV_ENCRYPT_AES,
+ TCL_DBMV_ENCRYPT_ANY,
TCL_DBMV_ENV,
+ TCL_DBMV_TXN,
TCL_DBMV_ENDARG
};
- DB_ENV *envp;
DB *dbp;
+ DB_ENV *envp;
+ DB_TXN *txn;
+ u_int32_t enc_flag, iflags, set_flags;
int endarg, i, newlen, optindex, result, ret, subdblen;
u_char *subdbtmp;
- char *arg, *db, *newname, *subdb;
+ char *arg, *db, msg[MSG_SIZE], *newname, *passwd, *subdb;
- envp = NULL;
+ db = newname = subdb = NULL;
dbp = NULL;
+ endarg = 0;
+ envp = NULL;
+ iflags = enc_flag = set_flags = 0;
+ passwd = NULL;
result = TCL_OK;
subdbtmp = NULL;
- db = newname = subdb = NULL;
- endarg = 0;
+ txn = NULL;
if (objc < 2) {
Tcl_WrongNumArgs(interp,
@@ -1806,6 +2289,36 @@ bdb_DbRename(interp, objc, objv)
}
i++;
switch ((enum bdbmv)optindex) {
+ case TCL_DBMV_AUTOCOMMIT:
+ iflags |= DB_AUTO_COMMIT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBMV_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBMV_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
case TCL_DBMV_ENV:
arg = Tcl_GetStringFromObj(objv[i++], NULL);
envp = NAME_TO_ENV(arg);
@@ -1819,6 +2332,21 @@ bdb_DbRename(interp, objc, objv)
case TCL_DBMV_ENDARG:
endarg = 1;
break;
+ case TCL_DBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "Put: Invalid txn: %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ }
+ break;
}
/*
* If, at any time, parsing the args we get an error,
@@ -1846,7 +2374,7 @@ bdb_DbRename(interp, objc, objv)
subdbtmp =
Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
if ((ret = __os_malloc(envp, subdblen + 1,
- NULL, &subdb)) != 0) {
+ &subdb)) != 0) {
Tcl_SetResult(interp,
db_strerror(ret), TCL_STATIC);
return (0);
@@ -1857,7 +2385,7 @@ bdb_DbRename(interp, objc, objv)
subdbtmp =
Tcl_GetByteArrayFromObj(objv[i++], &newlen);
if ((ret = __os_malloc(envp, newlen + 1,
- NULL, &newname)) != 0) {
+ &newname)) != 0) {
Tcl_SetResult(interp,
db_strerror(ret), TCL_STATIC);
return (0);
@@ -1865,31 +2393,50 @@ bdb_DbRename(interp, objc, objv)
memcpy(newname, subdbtmp, newlen);
newname[newlen] = '\0';
} else {
- Tcl_WrongNumArgs(interp, 3, objv, "?args? filename ?database? ?newname?");
+ Tcl_WrongNumArgs(
+ interp, 3, objv, "?args? filename ?database? ?newname?");
result = TCL_ERROR;
goto error;
}
- ret = db_create(&dbp, envp, 0);
- if (ret) {
- result = _ReturnSetup(interp, ret, "db_create");
- goto error;
+ if (envp == NULL) {
+ ret = db_create(&dbp, envp, 0);
+ if (ret) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
}
+
/*
* No matter what, we NULL out dbp after this call.
*/
- ret = dbp->rename(dbp, db, subdb, newname, 0);
- result = _ReturnSetup(interp, ret, "db rename");
+ if (dbp == NULL)
+ ret = envp->dbrename(envp, txn, db, subdb, newname, iflags);
+ else
+ ret = dbp->rename(dbp, db, subdb, newname, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db rename");
dbp = NULL;
error:
if (subdb)
- __os_free(subdb, subdblen + 1);
+ __os_free(envp, subdb);
if (newname)
- __os_free(newname, newlen + 1);
- if (result == TCL_ERROR && dbp)
+ __os_free(envp, newname);
+ if (result == TCL_ERROR && dbp != NULL)
(void)dbp->close(dbp, 0);
return (result);
}
+#if CONFIG_TEST
/*
* bdb_DbVerify --
* Implements the DB->verify command.
@@ -1901,9 +2448,19 @@ bdb_DbVerify(interp, objc, objv)
Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *bdbverify[] = {
- "-env", "-errfile", "-errpfx", "--", NULL
+ "-encrypt",
+ "-encryptaes",
+ "-encryptany",
+ "-env",
+ "-errfile",
+ "-errpfx",
+ "--",
+ NULL
};
enum bdbvrfy {
+ TCL_DBVRFY_ENCRYPT,
+ TCL_DBVRFY_ENCRYPT_AES,
+ TCL_DBVRFY_ENCRYPT_ANY,
TCL_DBVRFY_ENV,
TCL_DBVRFY_ERRFILE,
TCL_DBVRFY_ERRPFX,
@@ -1912,15 +2469,18 @@ bdb_DbVerify(interp, objc, objv)
DB_ENV *envp;
DB *dbp;
FILE *errf;
- int endarg, i, optindex, result, ret, flags;
- char *arg, *db, *errpfx;
+ u_int32_t enc_flag, flags, set_flags;
+ int endarg, i, optindex, result, ret;
+ char *arg, *db, *errpfx, *passwd;
envp = NULL;
dbp = NULL;
+ passwd = NULL;
result = TCL_OK;
db = errpfx = NULL;
errf = NULL;
flags = endarg = 0;
+ enc_flag = set_flags = 0;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?args? filename");
@@ -1945,6 +2505,32 @@ bdb_DbVerify(interp, objc, objv)
}
i++;
switch ((enum bdbvrfy)optindex) {
+ case TCL_DBVRFY_ENCRYPT:
+ set_flags |= DB_ENCRYPT;
+ _debug_check();
+ break;
+ case TCL_DBVRFY_ENCRYPT_AES:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptaes passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
+ break;
+ case TCL_DBVRFY_ENCRYPT_ANY:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-encryptany passwd?");
+ result = TCL_ERROR;
+ break;
+ }
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
case TCL_DBVRFY_ENV:
arg = Tcl_GetStringFromObj(objv[i++], NULL);
envp = NAME_TO_ENV(arg);
@@ -1983,10 +2569,10 @@ bdb_DbVerify(interp, objc, objv)
* If the user already set one, free it.
*/
if (errpfx != NULL)
- __os_freestr(errpfx);
+ __os_free(envp, errpfx);
if ((ret = __os_strdup(NULL, arg, &errpfx)) != 0) {
result = _ReturnSetup(interp, ret,
- "__os_strdup");
+ DB_RETOK_STD(ret), "__os_strdup");
break;
}
break;
@@ -2017,26 +2603,39 @@ bdb_DbVerify(interp, objc, objv)
}
ret = db_create(&dbp, envp, 0);
if (ret) {
- result = _ReturnSetup(interp, ret, "db_create");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
goto error;
}
+ if (passwd != NULL) {
+ ret = dbp->set_encrypt(dbp, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+
+ if (set_flags != 0) {
+ ret = dbp->set_flags(dbp, set_flags);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
+ }
if (errf != NULL)
dbp->set_errfile(dbp, errf);
if (errpfx != NULL)
dbp->set_errpfx(dbp, errpfx);
ret = dbp->verify(dbp, db, NULL, NULL, flags);
- result = _ReturnSetup(interp, ret, "db verify");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db verify");
error:
if (errf != NULL)
fclose(errf);
if (errpfx != NULL)
- __os_freestr(errpfx);
+ __os_free(envp, errpfx);
if (dbp)
(void)dbp->close(dbp, 0);
return (result);
}
+#endif
/*
* bdb_Version --
@@ -2113,6 +2712,7 @@ error:
return (result);
}
+#if CONFIG_TEST
/*
* bdb_Handles --
* Implements the handles command.
@@ -2144,7 +2744,9 @@ bdb_Handles(interp, objc, objv)
Tcl_SetObjResult(interp, res);
return (TCL_OK);
}
+#endif
+#if CONFIG_TEST
/*
* bdb_DbUpgrade --
* Implements the DB->upgrade command.
@@ -2165,7 +2767,8 @@ bdb_DbUpgrade(interp, objc, objv)
};
DB_ENV *envp;
DB *dbp;
- int endarg, i, optindex, result, ret, flags;
+ u_int32_t flags;
+ int endarg, i, optindex, result, ret;
char *arg, *db;
envp = NULL;
@@ -2233,14 +2836,282 @@ bdb_DbUpgrade(interp, objc, objv)
}
ret = db_create(&dbp, envp, 0);
if (ret) {
- result = _ReturnSetup(interp, ret, "db_create");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_create");
goto error;
}
ret = dbp->upgrade(dbp, db, flags);
- result = _ReturnSetup(interp, ret, "db upgrade");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "db upgrade");
error:
if (dbp)
(void)dbp->close(dbp, 0);
return (result);
}
+#endif
+
+/*
+ * tcl_bt_compare and tcl_dup_compare --
+ * These two are basically identical internally, so may as well
+ * share code. The only differences are the name used in error
+ * reporting and the Tcl_Obj representing their respective procs.
+ */
+static int
+tcl_bt_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_btcompare, "bt_compare"));
+}
+
+static int
+tcl_dup_compare(dbp, dbta, dbtb)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+{
+ return (tcl_compare_callback(dbp, dbta, dbtb,
+ ((DBTCL_INFO *)dbp->api_internal)->i_dupcompare, "dup_compare"));
+}
+
+/*
+ * tcl_compare_callback --
+ * Tcl callback for set_bt_compare and set_dup_compare. What this
+ * function does is stuff the data fields of the two DBTs into Tcl ByteArray
+ * objects, then call the procedure stored in ip->i_btcompare on the two
+ * objects. Then we return that procedure's result as the comparison.
+ */
+static int
+tcl_compare_callback(dbp, dbta, dbtb, procobj, errname)
+ DB *dbp;
+ const DBT *dbta, *dbtb;
+ Tcl_Obj *procobj;
+ char *errname;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *a, *b, *resobj, *objv[3];
+ int result, cmp;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = procobj;
+
+ /*
+ * Create two ByteArray objects, with the two data we've been passed.
+ * This will involve a copy, which is unpleasantly slow, but there's
+ * little we can do to avoid this (I think).
+ */
+ a = Tcl_NewByteArrayObj(dbta->data, dbta->size);
+ Tcl_IncrRefCount(a);
+ b = Tcl_NewByteArrayObj(dbtb->data, dbtb->size);
+ Tcl_IncrRefCount(b);
+
+ objv[1] = a;
+ objv[2] = b;
+
+ result = Tcl_EvalObjv(interp, 3, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * If this or the next Tcl call fails, we're doomed.
+ * There's no way to return an error from comparison functions,
+ * no way to determine what the correct sort order is, and
+ * so no way to avoid corrupting the database if we proceed.
+ * We could play some games stashing return values on the
+ * DB handle, but it's not worth the trouble--no one with
+ * any sense is going to be using this other than for testing,
+ * and failure typically means that the bt_compare proc
+ * had a syntax error in it or something similarly dumb.
+ *
+ * So, drop core. If we're not running with diagnostic
+ * mode, panic--and always return a negative number. :-)
+ */
+panic: __db_err(dbp->dbenv, "Tcl %s callback failed", errname);
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &cmp);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(a);
+ Tcl_DecrRefCount(b);
+ return (cmp);
+}
+
+/*
+ * tcl_h_hash --
+ * Tcl callback for the hashing function. See tcl_compare_callback--
+ * this works much the same way, only we're given a buffer and a length
+ * instead of two DBTs.
+ */
+static u_int32_t
+tcl_h_hash(dbp, buf, len)
+ DB *dbp;
+ const void *buf;
+ u_int32_t len;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *objv[2];
+ int result, hval;
+
+ ip = (DBTCL_INFO *)dbp->api_internal;
+ interp = ip->i_interp;
+ objv[0] = ip->i_hashproc;
+
+ /*
+ * Create a ByteArray for the buffer.
+ */
+ objv[1] = Tcl_NewByteArrayObj((void *)buf, len);
+ Tcl_IncrRefCount(objv[1]);
+ result = Tcl_EvalObjv(interp, 2, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * We drop core on error. See the comment in
+ * tcl_compare_callback.
+ */
+panic: __db_err(dbp->dbenv, "Tcl h_hash callback failed");
+ DB_ASSERT(0);
+ return (__db_panic(dbp->dbenv, DB_RUNRECOVERY));
+ }
+
+ result = Tcl_GetIntFromObj(interp, Tcl_GetObjResult(interp), &hval);
+ if (result != TCL_OK)
+ goto panic;
+
+ Tcl_DecrRefCount(objv[1]);
+ return (hval);
+}
+
+/*
+ * tcl_rep_send --
+ * Replication send callback.
+ */
+static int
+tcl_rep_send(dbenv, control, rec, eid, flags)
+ DB_ENV *dbenv;
+ const DBT *control, *rec;
+ int eid;
+ u_int32_t flags;
+{
+ DBTCL_INFO *ip;
+ Tcl_Interp *interp;
+ Tcl_Obj *control_o, *eid_o, *origobj, *rec_o, *resobj, *objv[5];
+ int result, ret;
+
+ COMPQUIET(flags, 0);
+
+ ip = (DBTCL_INFO *)dbenv->app_private;
+ interp = ip->i_interp;
+ objv[0] = ip->i_rep_send;
+
+ control_o = Tcl_NewByteArrayObj(control->data, control->size);
+ Tcl_IncrRefCount(control_o);
+
+ rec_o = Tcl_NewByteArrayObj(rec->data, rec->size);
+ Tcl_IncrRefCount(rec_o);
+
+ eid_o = Tcl_NewIntObj(eid);
+ Tcl_IncrRefCount(eid_o);
+
+ objv[1] = control_o;
+ objv[2] = rec_o;
+ objv[3] = ip->i_rep_eid; /* From ID */
+ objv[4] = eid_o; /* To ID */
+
+ /*
+ * We really want to return the original result to the
+ * user. So, save the result obj here, and then after
+ * we've taken care of the Tcl_EvalObjv, set the result
+ * back to this original result.
+ */
+ origobj = Tcl_GetObjResult(interp);
+ Tcl_IncrRefCount(origobj);
+ result = Tcl_EvalObjv(interp, 5, objv, 0);
+ if (result != TCL_OK) {
+ /*
+ * XXX
+ * This probably isn't the right error behavior, but
+ * this error should only happen if the Tcl callback is
+ * somehow invalid, which is a fatal scripting bug.
+ */
+err: __db_err(dbenv, "Tcl rep_send failure");
+ return (EINVAL);
+ }
+
+ resobj = Tcl_GetObjResult(interp);
+ result = Tcl_GetIntFromObj(interp, resobj, &ret);
+ if (result != TCL_OK)
+ goto err;
+
+ Tcl_SetObjResult(interp, origobj);
+ Tcl_DecrRefCount(origobj);
+ Tcl_DecrRefCount(control_o);
+ Tcl_DecrRefCount(rec_o);
+ Tcl_DecrRefCount(eid_o);
+
+ return (ret);
+}
+
+#ifdef TEST_ALLOC
+/*
+ * tcl_db_malloc, tcl_db_realloc, tcl_db_free --
+ * Tcl-local malloc, realloc, and free functions to use for user data
+ * to exercise umalloc/urealloc/ufree. Allocate the memory as a Tcl object
+ * so we're sure to exacerbate and catch any shared-library issues.
+ */
+static void *
+tcl_db_malloc(size)
+ size_t size;
+{
+ Tcl_Obj *obj;
+ void *buf;
+
+ obj = Tcl_NewObj();
+ if (obj == NULL)
+ return (NULL);
+ Tcl_IncrRefCount(obj);
+
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+ buf = Tcl_GetString(obj);
+ memcpy(buf, &obj, sizeof(&obj));
+
+ buf = (Tcl_Obj **)buf + 1;
+ return (buf);
+}
+
+static void *
+tcl_db_realloc(ptr, size)
+ void *ptr;
+ size_t size;
+{
+ Tcl_Obj *obj;
+
+ if (ptr == NULL)
+ return (tcl_db_malloc(size));
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_SetObjLength(obj, size + sizeof(Tcl_Obj *));
+
+ ptr = Tcl_GetString(obj);
+ memcpy(ptr, &obj, sizeof(&obj));
+
+ ptr = (Tcl_Obj **)ptr + 1;
+ return (ptr);
+}
+
+static void
+tcl_db_free(ptr)
+ void *ptr;
+{
+ Tcl_Obj *obj;
+
+ obj = *(Tcl_Obj **)((Tcl_Obj **)ptr - 1);
+ Tcl_DecrRefCount(obj);
+}
+#endif
diff --git a/bdb/tcl/tcl_dbcursor.c b/bdb/tcl/tcl_dbcursor.c
index 26e7b58c64a..fb426e53f48 100644
--- a/bdb/tcl/tcl_dbcursor.c
+++ b/bdb/tcl/tcl_dbcursor.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_dbcursor.c,v 11.26 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: tcl_dbcursor.c,v 11.51 2002/08/06 06:20:59 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,14 +20,14 @@ static const char revid[] = "$Id: tcl_dbcursor.c,v 11.26 2001/01/11 18:19:55 bos
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/tcl_db.h"
/*
* Prototypes for procedures defined later in this file:
*/
-static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
-static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
-static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcDup __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
+static int tcl_DbcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *, int));
+static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
/*
* PUBLIC: int dbc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
@@ -37,12 +37,15 @@ static int tcl_DbcPut __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DBC *));
*/
int
dbc_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Cursor handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Cursor handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *dbccmds[] = {
+#if CONFIG_TEST
+ "pget",
+#endif
"close",
"del",
"dup",
@@ -51,6 +54,9 @@ dbc_Cmd(clientData, interp, objc, objv)
NULL
};
enum dbccmds {
+#if CONFIG_TEST
+ DBCPGET,
+#endif
DBCCLOSE,
DBCDELETE,
DBCDUP,
@@ -87,6 +93,11 @@ dbc_Cmd(clientData, interp, objc, objv)
TCL_EXACT, &cmdindex) != TCL_OK)
return (IS_HELP(objv[1]));
switch ((enum dbccmds)cmdindex) {
+#if CONFIG_TEST
+ case DBCPGET:
+ result = tcl_DbcGet(interp, objc, objv, dbc, 1);
+ break;
+#endif
case DBCCLOSE:
/*
* No args for this. Error if there are some.
@@ -97,7 +108,8 @@ dbc_Cmd(clientData, interp, objc, objv)
}
_debug_check();
ret = dbc->c_close(dbc);
- result = _ReturnSetup(interp, ret, "dbc close");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "dbc close");
if (result == TCL_OK) {
(void)Tcl_DeleteCommand(interp, dbip->i_name);
_DeleteInfo(dbip);
@@ -113,13 +125,14 @@ dbc_Cmd(clientData, interp, objc, objv)
}
_debug_check();
ret = dbc->c_del(dbc, 0);
- result = _ReturnSetup(interp, ret, "dbc delete");
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCDEL(ret),
+ "dbc delete");
break;
case DBCDUP:
result = tcl_DbcDup(interp, objc, objv, dbc);
break;
case DBCGET:
- result = tcl_DbcGet(interp, objc, objv, dbc);
+ result = tcl_DbcGet(interp, objc, objv, dbc, 0);
break;
case DBCPUT:
result = tcl_DbcPut(interp, objc, objv, dbc);
@@ -139,14 +152,26 @@ tcl_DbcPut(interp, objc, objv, dbc)
DBC *dbc; /* Cursor pointer */
{
static char *dbcutopts[] = {
- "-after", "-before", "-current",
- "-keyfirst", "-keylast", "-nodupdata",
+#if CONFIG_TEST
+ "-nodupdata",
+#endif
+ "-after",
+ "-before",
+ "-current",
+ "-keyfirst",
+ "-keylast",
"-partial",
NULL
};
enum dbcutopts {
- DBCPUT_AFTER, DBCPUT_BEFORE, DBCPUT_CURRENT,
- DBCPUT_KEYFIRST,DBCPUT_KEYLAST, DBCPUT_NODUPDATA,
+#if CONFIG_TEST
+ DBCPUT_NODUPDATA,
+#endif
+ DBCPUT_AFTER,
+ DBCPUT_BEFORE,
+ DBCPUT_CURRENT,
+ DBCPUT_KEYFIRST,
+ DBCPUT_KEYLAST,
DBCPUT_PART
};
DB *thisdbp;
@@ -154,12 +179,14 @@ tcl_DbcPut(interp, objc, objv, dbc)
DBTCL_INFO *dbcip, *dbip;
DBTYPE type;
Tcl_Obj **elemv, *res;
+ void *dtmp, *ktmp;
db_recno_t recno;
u_int32_t flag;
- int elemc, i, itmp, optindex, result, ret;
+ int elemc, freekey, freedata, i, optindex, result, ret;
result = TCL_OK;
flag = 0;
+ freekey = freedata = 0;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
@@ -190,6 +217,12 @@ tcl_DbcPut(interp, objc, objv, dbc)
}
i++;
switch ((enum dbcutopts)optindex) {
+#if CONFIG_TEST
+ case DBCPUT_NODUPDATA:
+ FLAG_CHECK(flag);
+ flag = DB_NODUPDATA;
+ break;
+#endif
case DBCPUT_AFTER:
FLAG_CHECK(flag);
flag = DB_AFTER;
@@ -210,10 +243,6 @@ tcl_DbcPut(interp, objc, objv, dbc)
FLAG_CHECK(flag);
flag = DB_KEYLAST;
break;
- case DBCPUT_NODUPDATA:
- FLAG_CHECK(flag);
- flag = DB_NODUPDATA;
- break;
case DBCPUT_PART:
if (i > (objc - 2)) {
Tcl_WrongNumArgs(interp, 2, objv,
@@ -233,12 +262,10 @@ tcl_DbcPut(interp, objc, objv, dbc)
break;
}
data.flags |= DB_DBT_PARTIAL;
- result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
- data.doff = itmp;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
- data.dlen = itmp;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
/*
* NOTE: We don't check result here because all we'd
* do is break anyway, and we are doing that. If you
@@ -269,7 +296,7 @@ tcl_DbcPut(interp, objc, objv, dbc)
return (result);
}
thisdbp = dbip->i_dbp;
- type = thisdbp->get_type(thisdbp);
+ (void)thisdbp->get_type(thisdbp, &type);
}
/*
* When we get here, we better have:
@@ -300,29 +327,45 @@ tcl_DbcPut(interp, objc, objv, dbc)
goto out;
}
if (type == DB_RECNO || type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(interp, objv[objc-2], &itmp);
- recno = itmp;
+ result = _GetUInt32(interp, objv[objc-2], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
return (result);
} else {
- key.data = Tcl_GetByteArrayFromObj(objv[objc-2], &itmp);
- key.size = itmp;
+ ret = _CopyObjBytes(interp, objv[objc-2], &ktmp,
+ &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ return (result);
+ }
+ key.data = ktmp;
}
}
- data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- data.size = itmp;
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ data.data = dtmp;
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCPUT(ret), "dbc put");
+ goto out;
+ }
_debug_check();
ret = dbc->c_put(dbc, &key, &data, flag);
- result = _ReturnSetup(interp, ret, "dbc put");
- if (ret == 0 && (flag == DB_AFTER || flag == DB_BEFORE)
- && type == DB_RECNO) {
- res = Tcl_NewIntObj(*(db_recno_t *)key.data);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCPUT(ret),
+ "dbc put");
+ if (ret == 0 &&
+ (flag == DB_AFTER || flag == DB_BEFORE) && type == DB_RECNO) {
+ res = Tcl_NewLongObj((long)*(db_recno_t *)key.data);
Tcl_SetObjResult(interp, res);
}
out:
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
return (result);
}
@@ -330,13 +373,20 @@ out:
* tcl_dbc_get --
*/
static int
-tcl_DbcGet(interp, objc, objv, dbc)
+tcl_DbcGet(interp, objc, objv, dbc, ispget)
Tcl_Interp *interp; /* Interpreter */
int objc; /* How many arguments? */
Tcl_Obj *CONST objv[]; /* The argument objects */
DBC *dbc; /* Cursor pointer */
+ int ispget; /* 1 for pget, 0 for get */
{
static char *dbcgetopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-get_both_range",
+ "-multi",
+ "-multi_key",
+#endif
"-current",
"-first",
"-get_both",
@@ -356,6 +406,12 @@ tcl_DbcGet(interp, objc, objv, dbc)
NULL
};
enum dbcgetopts {
+#if CONFIG_TEST
+ DBCGET_DIRTY,
+ DBCGET_BOTH_RANGE,
+ DBCGET_MULTI,
+ DBCGET_MULTI_KEY,
+#endif
DBCGET_CURRENT,
DBCGET_FIRST,
DBCGET_BOTH,
@@ -374,16 +430,18 @@ tcl_DbcGet(interp, objc, objv, dbc)
DBCGET_SETRECNO
};
DB *thisdbp;
- DBT key, data;
+ DBT key, data, pdata;
DBTCL_INFO *dbcip, *dbip;
- DBTYPE type;
+ DBTYPE ptype, type;
Tcl_Obj **elemv, *myobj, *retlist;
- db_recno_t recno;
- u_int32_t flag;
- int elemc, i, itmp, optindex, result, ret;
+ void *dtmp, *ktmp;
+ db_recno_t precno, recno;
+ u_int32_t flag, op;
+ int bufsize, elemc, freekey, freedata, i, optindex, result, ret;
result = TCL_OK;
flag = 0;
+ freekey = freedata = 0;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?-args? ?key?");
@@ -413,63 +471,101 @@ tcl_DbcGet(interp, objc, objv, dbc)
}
i++;
switch ((enum dbcgetopts)optindex) {
+#if CONFIG_TEST
+ case DBCGET_DIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case DBCGET_BOTH_RANGE:
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
+ flag |= DB_GET_BOTH_RANGE;
+ break;
+ case DBCGET_MULTI:
+ flag |= DB_MULTIPLE;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+ case DBCGET_MULTI_KEY:
+ flag |= DB_MULTIPLE_KEY;
+ result = Tcl_GetIntFromObj(interp, objv[i], &bufsize);
+ if (result != TCL_OK)
+ goto out;
+ i++;
+ break;
+#endif
case DBCGET_RMW:
flag |= DB_RMW;
break;
case DBCGET_CURRENT:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_CURRENT;
break;
case DBCGET_FIRST:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_FIRST;
break;
case DBCGET_LAST:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_LAST;
break;
case DBCGET_NEXT:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_NEXT;
break;
case DBCGET_PREV:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_PREV;
break;
case DBCGET_PREVNODUP:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_PREV_NODUP;
break;
case DBCGET_NEXTNODUP:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_NEXT_NODUP;
break;
case DBCGET_NEXTDUP:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_NEXT_DUP;
break;
case DBCGET_BOTH:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_GET_BOTH;
break;
case DBCGET_RECNO:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_GET_RECNO;
break;
case DBCGET_JOIN:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_JOIN_ITEM;
break;
case DBCGET_SET:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_SET;
break;
case DBCGET_SETRANGE:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_SET_RANGE;
break;
case DBCGET_SETRECNO:
- FLAG_CHECK2(flag, DB_RMW);
+ FLAG_CHECK2(flag,
+ DB_RMW|DB_MULTIPLE|DB_MULTIPLE_KEY|DB_DIRTY_READ);
flag |= DB_SET_RECNO;
break;
case DBCGET_PART:
@@ -491,12 +587,10 @@ tcl_DbcGet(interp, objc, objv, dbc)
break;
}
data.flags |= DB_DBT_PARTIAL;
- result = Tcl_GetIntFromObj(interp, elemv[0], &itmp);
- data.doff = itmp;
+ result = _GetUInt32(interp, elemv[0], &data.doff);
if (result != TCL_OK)
break;
- result = Tcl_GetIntFromObj(interp, elemv[1], &itmp);
- data.dlen = itmp;
+ result = _GetUInt32(interp, elemv[1], &data.dlen);
/*
* NOTE: We don't check result here because all we'd
* do is break anyway, and we are doing that. If you
@@ -518,9 +612,10 @@ tcl_DbcGet(interp, objc, objv, dbc)
* a string.
*/
dbcip = _PtrToInfo(dbc);
- if (dbcip == NULL)
+ if (dbcip == NULL) {
type = DB_UNKNOWN;
- else {
+ ptype = DB_UNKNOWN;
+ } else {
dbip = dbcip->i_parent;
if (dbip == NULL) {
Tcl_SetResult(interp, "Cursor without parent database",
@@ -529,15 +624,25 @@ tcl_DbcGet(interp, objc, objv, dbc)
goto out;
}
thisdbp = dbip->i_dbp;
- type = thisdbp->get_type(thisdbp);
+ (void)thisdbp->get_type(thisdbp, &type);
+ if (ispget && thisdbp->s_primary != NULL)
+ (void)thisdbp->
+ s_primary->get_type(thisdbp->s_primary, &ptype);
+ else
+ ptype = DB_UNKNOWN;
}
/*
* When we get here, we better have:
- * 2 args, key and data if GET_BOTH was specified.
+ * 2 args, key and data if GET_BOTH/GET_BOTH_RANGE was specified.
* 1 arg if -set, -set_range or -set_recno
* 0 in all other cases.
*/
- if ((flag & DB_OPFLAGS_MASK) == DB_GET_BOTH) {
+ op = flag & DB_OPFLAGS_MASK;
+ switch (op) {
+ case DB_GET_BOTH:
+#if CONFIG_TEST
+ case DB_GET_BOTH_RANGE:
+#endif
if (i != (objc - 2)) {
Tcl_WrongNumArgs(interp, 2, objv,
"?-args? -get_both key data");
@@ -545,82 +650,158 @@ tcl_DbcGet(interp, objc, objv, dbc)
goto out;
} else {
if (type == DB_RECNO || type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(
- interp, objv[objc-2], &itmp);
- recno = itmp;
+ result = _GetUInt32(
+ interp, objv[objc-2], &recno);
if (result == TCL_OK) {
key.data = &recno;
key.size = sizeof(db_recno_t);
} else
goto out;
} else {
- key.data = Tcl_GetByteArrayFromObj(
- objv[objc - 2], &itmp);
- key.size = itmp;
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-2],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
+ }
+ if (ptype == DB_RECNO || ptype == DB_QUEUE) {
+ result = _GetUInt32(
+ interp, objv[objc-1], &precno);
+ if (result == TCL_OK) {
+ data.data = &precno;
+ data.size = sizeof(db_recno_t);
+ } else
+ goto out;
+ } else {
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &dtmp, &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ goto out;
+ }
+ data.data = dtmp;
}
- data.data =
- Tcl_GetByteArrayFromObj(objv[objc - 1], &itmp);
- data.size = itmp;
}
- } else if ((flag & DB_OPFLAGS_MASK) == DB_SET ||
- (flag & DB_OPFLAGS_MASK) == DB_SET_RANGE ||
- (flag & DB_OPFLAGS_MASK) == DB_SET_RECNO) {
+ break;
+ case DB_SET:
+ case DB_SET_RANGE:
+ case DB_SET_RECNO:
if (i != (objc - 1)) {
Tcl_WrongNumArgs(interp, 2, objv, "?-args? key");
result = TCL_ERROR;
goto out;
}
- data.flags |= DB_DBT_MALLOC;
- if ((flag & DB_OPFLAGS_MASK) == DB_SET_RECNO ||
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
+ if (op == DB_SET_RECNO ||
type == DB_RECNO || type == DB_QUEUE) {
- result = Tcl_GetIntFromObj(interp,
- objv[objc - 1], (int *)&recno);
+ result = _GetUInt32(interp, objv[objc - 1], &recno);
key.data = &recno;
key.size = sizeof(db_recno_t);
} else {
- key.data =
- Tcl_GetByteArrayFromObj(objv[objc - 1], &itmp);
- key.size = itmp;
+ /*
+ * Some get calls (SET_*) can change the
+ * key pointers. So, we need to store
+ * the allocated key space in a tmp.
+ */
+ ret = _CopyObjBytes(interp, objv[objc-1],
+ &ktmp, &key.size, &freekey);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_DBCGET(ret), "dbc get");
+ return (result);
+ }
+ key.data = ktmp;
}
- } else {
+ break;
+ default:
if (i != objc) {
Tcl_WrongNumArgs(interp, 2, objv, "?-args?");
result = TCL_ERROR;
goto out;
}
key.flags |= DB_DBT_MALLOC;
- data.flags |= DB_DBT_MALLOC;
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY)) {
+ (void)__os_malloc(NULL, bufsize, &data.data);
+ data.ulen = bufsize;
+ data.flags |= DB_DBT_USERMEM;
+ } else
+ data.flags |= DB_DBT_MALLOC;
}
_debug_check();
- ret = dbc->c_get(dbc, &key, &data, flag);
- result = _ReturnSetup(interp, ret, "dbc get");
+ memset(&pdata, 0, sizeof(DBT));
+ if (ispget) {
+ F_SET(&pdata, DB_DBT_MALLOC);
+ ret = dbc->c_pget(dbc, &key, &data, &pdata, flag);
+ } else
+ ret = dbc->c_get(dbc, &key, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_DBCGET(ret), "dbc get");
if (result == TCL_ERROR)
goto out;
retlist = Tcl_NewListObj(0, NULL);
if (ret == DB_NOTFOUND)
goto out1;
- if ((flag & DB_OPFLAGS_MASK) == DB_GET_RECNO) {
+ if (op == DB_GET_RECNO) {
recno = *((db_recno_t *)data.data);
- myobj = Tcl_NewIntObj((int)recno);
+ myobj = Tcl_NewLongObj((long)recno);
result = Tcl_ListObjAppendElement(interp, retlist, myobj);
} else {
- if ((type == DB_RECNO || type == DB_QUEUE) && key.data != NULL)
- result = _SetListRecnoElem(interp, retlist,
- *(db_recno_t *)key.data, data.data, data.size);
- else
- result = _SetListElem(interp, retlist,
- key.data, key.size, data.data, data.size);
+ if (flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ result = _SetMultiList(interp,
+ retlist, &key, &data, type, flag);
+ else if ((type == DB_RECNO || type == DB_QUEUE) &&
+ key.data != NULL) {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 1,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListRecnoElem(interp, retlist,
+ *(db_recno_t *)key.data,
+ data.data, data.size);
+ } else {
+ if (ispget)
+ result = _Set3DBTList(interp, retlist, &key, 0,
+ &data,
+ (ptype == DB_RECNO || ptype == DB_QUEUE),
+ &pdata);
+ else
+ result = _SetListElem(interp, retlist,
+ key.data, key.size, data.data, data.size);
+ }
}
- if (key.flags & DB_DBT_MALLOC)
- __os_free(key.data, key.size);
- if (data.flags & DB_DBT_MALLOC)
- __os_free(data.data, data.size);
+ if (key.data != NULL && F_ISSET(&key, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, key.data);
+ if (data.data != NULL && F_ISSET(&data, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, data.data);
+ if (pdata.data != NULL && F_ISSET(&pdata, DB_DBT_MALLOC))
+ __os_ufree(dbc->dbp->dbenv, pdata.data);
out1:
if (result == TCL_OK)
Tcl_SetObjResult(interp, retlist);
out:
+ if (data.data != NULL && flag & (DB_MULTIPLE|DB_MULTIPLE_KEY))
+ __os_free(dbc->dbp->dbenv, data.data);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
+ if (freekey)
+ (void)__os_free(NULL, ktmp);
return (result);
}
@@ -642,7 +823,6 @@ tcl_DbcDup(interp, objc, objv, dbc)
enum dbcdupopts {
DBCDUP_POS
};
- DB *thisdbp;
DBC *newdbc;
DBTCL_INFO *dbcip, *newdbcip, *dbip;
Tcl_Obj *res;
@@ -709,7 +889,6 @@ tcl_DbcDup(interp, objc, objv, dbc)
result = TCL_ERROR;
goto out;
}
- thisdbp = dbip->i_dbp;
}
/*
* Now duplicate the cursor. If successful, we need to create
@@ -731,7 +910,8 @@ tcl_DbcDup(interp, objc, objv, dbc)
_SetInfoData(newdbcip, newdbc);
Tcl_SetObjResult(interp, res);
} else {
- result = _ReturnSetup(interp, ret, "db dup");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db dup");
_DeleteInfo(newdbcip);
}
} else {
diff --git a/bdb/tcl/tcl_env.c b/bdb/tcl/tcl_env.c
index cb7b0d9744d..cdf4890e9fc 100644
--- a/bdb/tcl/tcl_env.c
+++ b/bdb/tcl/tcl_env.c
@@ -1,30 +1,33 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_env.c,v 11.33 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: tcl_env.c,v 11.84 2002/08/06 06:21:03 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
#include <stdlib.h>
+#include <string.h>
#include <tcl.h>
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/tcl_db.h"
/*
* Prototypes for procedures defined later in this file:
*/
-static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
+static int env_DbRemove __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+static int env_DbRename __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
/*
* PUBLIC: int env_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
@@ -34,86 +37,124 @@ static void _EnvInfoDelete __P((Tcl_Interp *, DBTCL_INFO *));
*/
int
env_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Env handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Env handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *envcmds[] = {
- "close",
+#if CONFIG_TEST
+ "attributes",
"lock_detect",
"lock_id",
+ "lock_id_free",
+ "lock_id_set",
"lock_get",
"lock_stat",
+ "lock_timeout",
"lock_vec",
"log_archive",
"log_compare",
+ "log_cursor",
"log_file",
"log_flush",
"log_get",
"log_put",
- "log_register",
"log_stat",
- "log_unregister",
"mpool",
"mpool_stat",
"mpool_sync",
"mpool_trickle",
"mutex",
-#if CONFIG_TEST
+ "rep_elect",
+ "rep_flush",
+ "rep_limit",
+ "rep_process_message",
+ "rep_request",
+ "rep_start",
+ "rep_stat",
+ "rpcid",
"test",
-#endif
- "txn",
"txn_checkpoint",
+ "txn_id_set",
+ "txn_recover",
"txn_stat",
+ "txn_timeout",
"verbose",
+#endif
+ "close",
+ "dbremove",
+ "dbrename",
+ "txn",
NULL
};
enum envcmds {
- ENVCLOSE,
+#if CONFIG_TEST
+ ENVATTR,
ENVLKDETECT,
ENVLKID,
+ ENVLKFREEID,
+ ENVLKSETID,
ENVLKGET,
ENVLKSTAT,
+ ENVLKTIMEOUT,
ENVLKVEC,
ENVLOGARCH,
ENVLOGCMP,
+ ENVLOGCURSOR,
ENVLOGFILE,
ENVLOGFLUSH,
ENVLOGGET,
ENVLOGPUT,
- ENVLOGREG,
ENVLOGSTAT,
- ENVLOGUNREG,
ENVMP,
ENVMPSTAT,
ENVMPSYNC,
ENVTRICKLE,
ENVMUTEX,
-#if CONFIG_TEST
+ ENVREPELECT,
+ ENVREPFLUSH,
+ ENVREPLIMIT,
+ ENVREPPROCMESS,
+ ENVREPREQUEST,
+ ENVREPSTART,
+ ENVREPSTAT,
+ ENVRPCID,
ENVTEST,
-#endif
- ENVTXN,
ENVTXNCKP,
+ ENVTXNSETID,
+ ENVTXNRECOVER,
ENVTXNSTAT,
- ENVVERB
+ ENVTXNTIMEOUT,
+ ENVVERB,
+#endif
+ ENVCLOSE,
+ ENVDBREMOVE,
+ ENVDBRENAME,
+ ENVTXN
};
- DBTCL_INFO *envip;
- DB_ENV *envp;
+ DBTCL_INFO *envip, *logcip;
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
Tcl_Obj *res;
- u_int32_t newval;
+ char newname[MSG_SIZE];
int cmdindex, result, ret;
+ u_int32_t newval;
+#if CONFIG_TEST
+ u_int32_t otherval;
+#endif
Tcl_ResetResult(interp);
- envp = (DB_ENV *)clientData;
- envip = _PtrToInfo((void *)envp);
+ dbenv = (DB_ENV *)clientData;
+ envip = _PtrToInfo((void *)dbenv);
result = TCL_OK;
+ memset(newname, 0, MSG_SIZE);
if (objc <= 1) {
Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
return (TCL_ERROR);
}
- if (envp == NULL) {
+ if (dbenv == NULL) {
Tcl_SetResult(interp, "NULL env pointer", TCL_STATIC);
return (TCL_ERROR);
}
@@ -131,33 +172,15 @@ env_Cmd(clientData, interp, objc, objv)
return (IS_HELP(objv[1]));
res = NULL;
switch ((enum envcmds)cmdindex) {
- case ENVCLOSE:
- /*
- * No args for this. Error if there are some.
- */
- if (objc > 2) {
- Tcl_WrongNumArgs(interp, 2, objv, NULL);
- return (TCL_ERROR);
- }
- /*
- * Any transactions will be aborted, and an mpools
- * closed automatically. We must delete any txn
- * and mp widgets we have here too for this env.
- * NOTE: envip is freed when we come back from
- * this function. Set it to NULL to make sure no
- * one tries to use it later.
- */
- _EnvInfoDelete(interp, envip);
- envip = NULL;
- _debug_check();
- ret = envp->close(envp, 0);
- result = _ReturnSetup(interp, ret, "env close");
- break;
+#if CONFIG_TEST
case ENVLKDETECT:
- result = tcl_LockDetect(interp, objc, objv, envp);
+ result = tcl_LockDetect(interp, objc, objv, dbenv);
break;
case ENVLKSTAT:
- result = tcl_LockStat(interp, objc, objv, envp);
+ result = tcl_LockStat(interp, objc, objv, dbenv);
+ break;
+ case ENVLKTIMEOUT:
+ result = tcl_LockTimeout(interp, objc, objv, dbenv);
break;
case ENVLKID:
/*
@@ -168,73 +191,180 @@ env_Cmd(clientData, interp, objc, objv)
return (TCL_ERROR);
}
_debug_check();
- ret = lock_id(envp, &newval);
- result = _ReturnSetup(interp, ret, "lock_id");
+ ret = dbenv->lock_id(dbenv, &newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock_id");
if (result == TCL_OK)
- res = Tcl_NewIntObj((int)newval);
+ res = Tcl_NewLongObj((long)newval);
+ break;
+ case ENVLKFREEID:
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, NULL);
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_free(dbenv, newval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVLKSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->lock_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
break;
case ENVLKGET:
- result = tcl_LockGet(interp, objc, objv, envp);
+ result = tcl_LockGet(interp, objc, objv, dbenv);
break;
case ENVLKVEC:
- result = tcl_LockVec(interp, objc, objv, envp);
+ result = tcl_LockVec(interp, objc, objv, dbenv);
break;
case ENVLOGARCH:
- result = tcl_LogArchive(interp, objc, objv, envp);
+ result = tcl_LogArchive(interp, objc, objv, dbenv);
break;
case ENVLOGCMP:
result = tcl_LogCompare(interp, objc, objv);
break;
+ case ENVLOGCURSOR:
+ snprintf(newname, sizeof(newname),
+ "%s.logc%d", envip->i_name, envip->i_envlogcid);
+ logcip = _NewInfo(interp, NULL, newname, I_LOGC);
+ if (logcip != NULL) {
+ ret = dbenv->log_cursor(dbenv, &logc, 0);
+ if (ret == 0) {
+ result = TCL_OK;
+ envip->i_envlogcid++;
+ /*
+ * We do NOT want to set i_parent to
+ * envip here because log cursors are
+ * not "tied" to the env. That is, they
+ * are NOT closed if the env is closed.
+ */
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)logc_Cmd,
+ (ClientData)logc, NULL);
+ res =
+ Tcl_NewStringObj(newname, strlen(newname));
+ _SetInfoData(logcip, logc);
+ } else {
+ _DeleteInfo(logcip);
+ result = _ErrorSetup(interp, ret, "log cursor");
+ }
+ } else {
+ Tcl_SetResult(interp,
+ "Could not set up info", TCL_STATIC);
+ result = TCL_ERROR;
+ }
+ break;
case ENVLOGFILE:
- result = tcl_LogFile(interp, objc, objv, envp);
+ result = tcl_LogFile(interp, objc, objv, dbenv);
break;
case ENVLOGFLUSH:
- result = tcl_LogFlush(interp, objc, objv, envp);
+ result = tcl_LogFlush(interp, objc, objv, dbenv);
break;
case ENVLOGGET:
- result = tcl_LogGet(interp, objc, objv, envp);
+ result = tcl_LogGet(interp, objc, objv, dbenv);
break;
case ENVLOGPUT:
- result = tcl_LogPut(interp, objc, objv, envp);
- break;
- case ENVLOGREG:
- result = tcl_LogRegister(interp, objc, objv, envp);
- break;
- case ENVLOGUNREG:
- result = tcl_LogUnregister(interp, objc, objv, envp);
+ result = tcl_LogPut(interp, objc, objv, dbenv);
break;
case ENVLOGSTAT:
- result = tcl_LogStat(interp, objc, objv, envp);
+ result = tcl_LogStat(interp, objc, objv, dbenv);
break;
case ENVMPSTAT:
- result = tcl_MpStat(interp, objc, objv, envp);
+ result = tcl_MpStat(interp, objc, objv, dbenv);
break;
case ENVMPSYNC:
- result = tcl_MpSync(interp, objc, objv, envp);
+ result = tcl_MpSync(interp, objc, objv, dbenv);
break;
case ENVTRICKLE:
- result = tcl_MpTrickle(interp, objc, objv, envp);
+ result = tcl_MpTrickle(interp, objc, objv, dbenv);
break;
case ENVMP:
- result = tcl_Mp(interp, objc, objv, envp, envip);
+ result = tcl_Mp(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVREPELECT:
+ result = tcl_RepElect(interp, objc, objv, dbenv);
+ break;
+ case ENVREPFLUSH:
+ result = tcl_RepFlush(interp, objc, objv, dbenv);
+ break;
+ case ENVREPLIMIT:
+ result = tcl_RepLimit(interp, objc, objv, dbenv);
+ break;
+ case ENVREPPROCMESS:
+ result = tcl_RepProcessMessage(interp, objc, objv, dbenv);
+ break;
+ case ENVREPREQUEST:
+ result = tcl_RepRequest(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTART:
+ result = tcl_RepStart(interp, objc, objv, dbenv);
+ break;
+ case ENVREPSTAT:
+ result = tcl_RepStat(interp, objc, objv, dbenv);
+ break;
+ case ENVRPCID:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * !!! Retrieve the client ID from the dbp handle directly.
+ * This is for testing purposes only. It is dbp-private data.
+ */
+ res = Tcl_NewLongObj(dbenv->cl_id);
break;
case ENVTXNCKP:
- result = tcl_TxnCheckpoint(interp, objc, objv, envp);
+ result = tcl_TxnCheckpoint(interp, objc, objv, dbenv);
+ break;
+ case ENVTXNSETID:
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "current max");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], (long *)&newval);
+ if (result != TCL_OK)
+ return (result);
+ result = Tcl_GetLongFromObj(interp, objv[3], (long *)&otherval);
+ if (result != TCL_OK)
+ return (result);
+ ret = dbenv->txn_id_set(dbenv, newval, otherval);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock id_free");
+ break;
+ case ENVTXNRECOVER:
+ result = tcl_TxnRecover(interp, objc, objv, dbenv, envip);
break;
case ENVTXNSTAT:
- result = tcl_TxnStat(interp, objc, objv, envp);
+ result = tcl_TxnStat(interp, objc, objv, dbenv);
break;
- case ENVTXN:
- result = tcl_Txn(interp, objc, objv, envp, envip);
+ case ENVTXNTIMEOUT:
+ result = tcl_TxnTimeout(interp, objc, objv, dbenv);
break;
case ENVMUTEX:
- result = tcl_Mutex(interp, objc, objv, envp, envip);
+ result = tcl_Mutex(interp, objc, objv, dbenv, envip);
+ break;
+ case ENVATTR:
+ result = tcl_EnvAttr(interp, objc, objv, dbenv);
break;
-#if CONFIG_TEST
case ENVTEST:
- result = tcl_EnvTest(interp, objc, objv, envp);
+ result = tcl_EnvTest(interp, objc, objv, dbenv);
break;
-#endif
case ENVVERB:
/*
* Two args for this. Error if different.
@@ -243,7 +373,40 @@ env_Cmd(clientData, interp, objc, objv)
Tcl_WrongNumArgs(interp, 2, objv, NULL);
return (TCL_ERROR);
}
- result = tcl_EnvVerbose(interp, envp, objv[2], objv[3]);
+ result = tcl_EnvVerbose(interp, dbenv, objv[2], objv[3]);
+ break;
+#endif
+ case ENVCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ /*
+ * Any transactions will be aborted, and an mpools
+ * closed automatically. We must delete any txn
+ * and mp widgets we have here too for this env.
+ * NOTE: envip is freed when we come back from
+ * this function. Set it to NULL to make sure no
+ * one tries to use it later.
+ */
+ _debug_check();
+ ret = dbenv->close(dbenv, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env close");
+ _EnvInfoDelete(interp, envip);
+ envip = NULL;
+ break;
+ case ENVDBREMOVE:
+ result = env_DbRemove(interp, objc, objv, dbenv);
+ break;
+ case ENVDBRENAME:
+ result = env_DbRename(interp, objc, objv, dbenv);
+ break;
+ case ENVTXN:
+ result = tcl_Txn(interp, objc, objv, dbenv, envip);
break;
}
/*
@@ -262,44 +425,56 @@ env_Cmd(clientData, interp, objc, objv)
* tcl_EnvRemove --
*/
int
-tcl_EnvRemove(interp, objc, objv, envp, envip)
+tcl_EnvRemove(interp, objc, objv, dbenv, envip)
Tcl_Interp *interp; /* Interpreter */
int objc; /* How many arguments? */
Tcl_Obj *CONST objv[]; /* The argument objects */
- DB_ENV *envp; /* Env pointer */
+ DB_ENV *dbenv; /* Env pointer */
DBTCL_INFO *envip; /* Info pointer */
{
static char *envremopts[] = {
+#if CONFIG_TEST
+ "-overwrite",
+ "-server",
+#endif
"-data_dir",
+ "-encryptaes",
+ "-encryptany",
"-force",
"-home",
"-log_dir",
- "-server",
"-tmp_dir",
"-use_environ",
"-use_environ_root",
NULL
};
enum envremopts {
+#if CONFIG_TEST
+ ENVREM_OVERWRITE,
+ ENVREM_SERVER,
+#endif
ENVREM_DATADIR,
+ ENVREM_ENCRYPT_AES,
+ ENVREM_ENCRYPT_ANY,
ENVREM_FORCE,
ENVREM_HOME,
ENVREM_LOGDIR,
- ENVREM_SERVER,
ENVREM_TMPDIR,
ENVREM_USE_ENVIRON,
ENVREM_USE_ENVIRON_ROOT
};
DB_ENV *e;
- u_int32_t cflag, flag, forceflag;
+ u_int32_t cflag, enc_flag, flag, forceflag, sflag;
int i, optindex, result, ret;
- char *datadir, *home, *logdir, *server, *tmpdir;
+ char *datadir, *home, *logdir, *passwd, *server, *tmpdir;
result = TCL_OK;
- cflag = flag = forceflag = 0;
+ cflag = flag = forceflag = sflag = 0;
home = NULL;
+ passwd = NULL;
datadir = logdir = tmpdir = NULL;
server = NULL;
+ enc_flag = 0;
if (objc < 2) {
Tcl_WrongNumArgs(interp, 2, objv, "?args?");
@@ -315,30 +490,59 @@ tcl_EnvRemove(interp, objc, objv, envp, envip)
}
i++;
switch ((enum envremopts)optindex) {
- case ENVREM_FORCE:
- forceflag |= DB_FORCE;
+#if CONFIG_TEST
+ case ENVREM_SERVER:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-server name?");
+ result = TCL_ERROR;
+ break;
+ }
+ server = Tcl_GetStringFromObj(objv[i++], NULL);
+ cflag = DB_CLIENT;
break;
- case ENVREM_HOME:
+#endif
+ case ENVREM_ENCRYPT_AES:
/* Make sure we have an arg to check against! */
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "?-home dir?");
+ "?-encryptaes passwd?");
result = TCL_ERROR;
break;
}
- home = Tcl_GetStringFromObj(objv[i++], NULL);
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = DB_ENCRYPT_AES;
break;
- case ENVREM_SERVER:
+ case ENVREM_ENCRYPT_ANY:
/* Make sure we have an arg to check against! */
if (i >= objc) {
Tcl_WrongNumArgs(interp, 2, objv,
- "?-server name?");
+ "?-encryptany passwd?");
result = TCL_ERROR;
break;
}
- server = Tcl_GetStringFromObj(objv[i++], NULL);
- cflag = DB_CLIENT;
+ passwd = Tcl_GetStringFromObj(objv[i++], NULL);
+ enc_flag = 0;
+ break;
+ case ENVREM_FORCE:
+ forceflag |= DB_FORCE;
+ break;
+ case ENVREM_HOME:
+ /* Make sure we have an arg to check against! */
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-home dir?");
+ result = TCL_ERROR;
+ break;
+ }
+ home = Tcl_GetStringFromObj(objv[i++], NULL);
+ break;
+#if CONFIG_TEST
+ case ENVREM_OVERWRITE:
+ sflag |= DB_OVERWRITE;
break;
+#endif
case ENVREM_USE_ENVIRON:
flag |= DB_USE_ENVIRON;
break;
@@ -382,38 +586,56 @@ tcl_EnvRemove(interp, objc, objv, envp, envip)
}
/*
- * If envp is NULL, we don't have an open env and we need to open
+ * If dbenv is NULL, we don't have an open env and we need to open
* one of the user. Don't bother with the info stuff.
*/
- if (envp == NULL) {
+ if (dbenv == NULL) {
if ((ret = db_env_create(&e, cflag)) != 0) {
- result = _ReturnSetup(interp, ret, "db_env_create");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "db_env_create");
goto error;
}
if (server != NULL) {
- ret = e->set_server(e, server, 0, 0, 0);
- result = _ReturnSetup(interp, ret, "set_server");
+ _debug_check();
+ ret = e->set_rpc_server(e, NULL, server, 0, 0, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_rpc_server");
if (result != TCL_OK)
goto error;
}
if (datadir != NULL) {
_debug_check();
ret = e->set_data_dir(e, datadir);
- result = _ReturnSetup(interp, ret, "set_data_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_data_dir");
if (result != TCL_OK)
goto error;
}
if (logdir != NULL) {
_debug_check();
ret = e->set_lg_dir(e, logdir);
- result = _ReturnSetup(interp, ret, "set_log_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_log_dir");
if (result != TCL_OK)
goto error;
}
if (tmpdir != NULL) {
_debug_check();
ret = e->set_tmp_dir(e, tmpdir);
- result = _ReturnSetup(interp, ret, "set_tmp_dir");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_tmp_dir");
+ if (result != TCL_OK)
+ goto error;
+ }
+ if (passwd != NULL) {
+ ret = e->set_encrypt(e, passwd, enc_flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_encrypt");
+ }
+ if (sflag != 0 && (ret = e->set_flags(e, sflag, 1)) != 0) {
+ _debug_check();
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_flags");
if (result != TCL_OK)
goto error;
}
@@ -425,7 +647,7 @@ tcl_EnvRemove(interp, objc, objv, envp, envip)
*/
_EnvInfoDelete(interp, envip);
envip = NULL;
- e = envp;
+ e = dbenv;
}
flag |= forceflag;
@@ -435,7 +657,8 @@ tcl_EnvRemove(interp, objc, objv, envp, envip)
*/
_debug_check();
ret = e->remove(e, home, flag);
- result = _ReturnSetup(interp, ret, "env remove");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env remove");
error:
return (result);
}
@@ -452,7 +675,7 @@ _EnvInfoDelete(interp, envip)
* any open subsystems in this env. We will:
* 1. Abort any transactions (which aborts any nested txns).
* 2. Close any mpools (which will put any pages itself).
- * 3. Put any locks.
+ * 3. Put any locks and close log cursors.
* 4. Close the error file.
*/
for (p = LIST_FIRST(&__db_infohead); p != NULL; p = nextp) {
@@ -461,6 +684,11 @@ _EnvInfoDelete(interp, envip)
* env. If so, remove its commands and info structure.
* We do not close/abort/whatever here, because we
* don't want to replicate DB behavior.
+ *
+ * NOTE: Only those types that can nest need to be
+ * itemized in the switch below. That is txns and mps.
+ * Other types like log cursors and locks will just
+ * get cleaned up here.
*/
if (p->i_parent == envip) {
switch (p->i_type) {
@@ -486,6 +714,7 @@ _EnvInfoDelete(interp, envip)
_DeleteInfo(envip);
}
+#if CONFIG_TEST
/*
* PUBLIC: int tcl_EnvVerbose __P((Tcl_Interp *, DB_ENV *, Tcl_Obj *,
* PUBLIC: Tcl_Obj *));
@@ -493,9 +722,9 @@ _EnvInfoDelete(interp, envip)
* tcl_EnvVerbose --
*/
int
-tcl_EnvVerbose(interp, envp, which, onoff)
+tcl_EnvVerbose(interp, dbenv, which, onoff)
Tcl_Interp *interp; /* Interpreter */
- DB_ENV *envp; /* Env pointer */
+ DB_ENV *dbenv; /* Env pointer */
Tcl_Obj *which; /* Which subsystem */
Tcl_Obj *onoff; /* On or off */
{
@@ -503,6 +732,7 @@ tcl_EnvVerbose(interp, envp, which, onoff)
"chkpt",
"deadlock",
"recovery",
+ "rep",
"wait",
NULL
};
@@ -510,6 +740,7 @@ tcl_EnvVerbose(interp, envp, which, onoff)
ENVVERB_CHK,
ENVVERB_DEAD,
ENVVERB_REC,
+ ENVVERB_REP,
ENVVERB_WAIT
};
static char *verbonoff[] = {
@@ -538,6 +769,9 @@ tcl_EnvVerbose(interp, envp, which, onoff)
case ENVVERB_REC:
wh = DB_VERB_RECOVERY;
break;
+ case ENVVERB_REP:
+ wh = DB_VERB_REPLICATION;
+ break;
case ENVVERB_WAIT:
wh = DB_VERB_WAITSFOR;
break;
@@ -557,22 +791,107 @@ tcl_EnvVerbose(interp, envp, which, onoff)
default:
return (TCL_ERROR);
}
- ret = envp->set_verbose(envp, wh, on);
- return (_ReturnSetup(interp, ret, "env set verbose"));
+ ret = dbenv->set_verbose(dbenv, wh, on);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set verbose"));
}
+#endif
#if CONFIG_TEST
/*
+ * PUBLIC: int tcl_EnvAttr __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
+ *
+ * tcl_EnvAttr --
+ * Return a list of the env's attributes
+ */
+int
+tcl_EnvAttr(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Env pointer */
+{
+ int result;
+ Tcl_Obj *myobj, *retlist;
+
+ result = TCL_OK;
+
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ retlist = Tcl_NewListObj(0, NULL);
+ /*
+ * XXX
+ * We peek at the dbenv to determine what subsystems
+ * we have available in this env.
+ */
+ myobj = Tcl_NewStringObj("-home", strlen("-home"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ myobj = Tcl_NewStringObj(dbenv->db_home, strlen(dbenv->db_home));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ if (CDB_LOCKING(dbenv)) {
+ myobj = Tcl_NewStringObj("-cdb", strlen("-cdb"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (CRYPTO_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-crypto", strlen("-crypto"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOCKING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-lock", strlen("-lock"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (LOGGING_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-log", strlen("-log"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (MPOOL_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-mpool", strlen("-mpool"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (RPC_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-rpc", strlen("-rpc"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ if (TXN_ON(dbenv)) {
+ myobj = Tcl_NewStringObj("-txn", strlen("-txn"));
+ if ((result = Tcl_ListObjAppendElement(interp,
+ retlist, myobj)) != TCL_OK)
+ goto err;
+ }
+ Tcl_SetObjResult(interp, retlist);
+err:
+ return (result);
+}
+
+/*
* PUBLIC: int tcl_EnvTest __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *));
*
* tcl_EnvTest --
*/
int
-tcl_EnvTest(interp, objc, objv, envp)
+tcl_EnvTest(interp, objc, objv, dbenv)
Tcl_Interp *interp; /* Interpreter */
int objc; /* How many arguments? */
Tcl_Obj *CONST objv[]; /* The argument objects */
- DB_ENV *envp; /* Env pointer */
+ DB_ENV *dbenv; /* Env pointer */
{
static char *envtestcmd[] = {
"abort",
@@ -584,29 +903,44 @@ tcl_EnvTest(interp, objc, objv, envp)
ENVTEST_COPY
};
static char *envtestat[] = {
+ "electinit",
+ "electsend",
+ "electvote1",
+ "electvote2",
+ "electwait1",
+ "electwait2",
"none",
+ "predestroy",
"preopen",
- "prerename",
+ "postdestroy",
"postlog",
"postlogmeta",
"postopen",
- "postrename",
"postsync",
+ "subdb_lock",
NULL
};
enum envtestat {
+ ENVTEST_ELECTINIT,
+ ENVTEST_ELECTSEND,
+ ENVTEST_ELECTVOTE1,
+ ENVTEST_ELECTVOTE2,
+ ENVTEST_ELECTWAIT1,
+ ENVTEST_ELECTWAIT2,
ENVTEST_NONE,
+ ENVTEST_PREDESTROY,
ENVTEST_PREOPEN,
- ENVTEST_PRERENAME,
+ ENVTEST_POSTDESTROY,
ENVTEST_POSTLOG,
ENVTEST_POSTLOGMETA,
ENVTEST_POSTOPEN,
- ENVTEST_POSTRENAME,
- ENVTEST_POSTSYNC
+ ENVTEST_POSTSYNC,
+ ENVTEST_SUBDB_LOCKS
};
int *loc, optindex, result, testval;
result = TCL_OK;
+ loc = NULL;
if (objc != 4) {
Tcl_WrongNumArgs(interp, 2, objv, "abort|copy location");
@@ -623,10 +957,10 @@ tcl_EnvTest(interp, objc, objv, envp)
}
switch ((enum envtestcmd)optindex) {
case ENVTEST_ABORT:
- loc = &envp->test_abort;
+ loc = &dbenv->test_abort;
break;
case ENVTEST_COPY:
- loc = &envp->test_copy;
+ loc = &dbenv->test_copy;
break;
default:
Tcl_SetResult(interp, "Illegal store location", TCL_STATIC);
@@ -642,14 +976,38 @@ tcl_EnvTest(interp, objc, objv, envp)
return (result);
}
switch ((enum envtestat)optindex) {
+ case ENVTEST_ELECTINIT:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTINIT;
+ break;
+ case ENVTEST_ELECTSEND:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTSEND;
+ break;
+ case ENVTEST_ELECTVOTE1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE1;
+ break;
+ case ENVTEST_ELECTVOTE2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTVOTE2;
+ break;
+ case ENVTEST_ELECTWAIT1:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT1;
+ break;
+ case ENVTEST_ELECTWAIT2:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_ELECTWAIT2;
+ break;
case ENVTEST_NONE:
testval = 0;
break;
case ENVTEST_PREOPEN:
testval = DB_TEST_PREOPEN;
break;
- case ENVTEST_PRERENAME:
- testval = DB_TEST_PRERENAME;
+ case ENVTEST_PREDESTROY:
+ testval = DB_TEST_PREDESTROY;
break;
case ENVTEST_POSTLOG:
testval = DB_TEST_POSTLOG;
@@ -660,12 +1018,16 @@ tcl_EnvTest(interp, objc, objv, envp)
case ENVTEST_POSTOPEN:
testval = DB_TEST_POSTOPEN;
break;
- case ENVTEST_POSTRENAME:
- testval = DB_TEST_POSTRENAME;
+ case ENVTEST_POSTDESTROY:
+ testval = DB_TEST_POSTDESTROY;
break;
case ENVTEST_POSTSYNC:
testval = DB_TEST_POSTSYNC;
break;
+ case ENVTEST_SUBDB_LOCKS:
+ DB_ASSERT(loc == &dbenv->test_abort);
+ testval = DB_TEST_SUBDB_LOCKS;
+ break;
default:
Tcl_SetResult(interp, "Illegal test location", TCL_STATIC);
return (TCL_ERROR);
@@ -676,3 +1038,273 @@ tcl_EnvTest(interp, objc, objv, envp)
return (result);
}
#endif
+
+/*
+ * env_DbRemove --
+ * Implements the ENV->dbremove command.
+ */
+static int
+env_DbRemove(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbrem[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbrem {
+ TCL_EDBREM_COMMIT,
+ TCL_EDBREM_TXN,
+ TCL_EDBREM_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbrem,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbrem)optindex) {
+ case TCL_EDBREM_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBREM_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbremove: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBREM_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 1 or 2 left) are
+ * file names. If there is 1, a db name, if 2 a db and subdb name.
+ */
+ if ((i != (objc - 1)) || (i != (objc - 2))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i != objc) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ } else {
+ Tcl_WrongNumArgs(interp, 2, objv, "?args? filename ?database?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbremove(dbenv, txn, db, subdb, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbremove");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ return (result);
+}
+
+/*
+ * env_DbRename --
+ * Implements the ENV->dbrename command.
+ */
+static int
+env_DbRename(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *envdbmv[] = {
+ "-auto_commit",
+ "-txn",
+ "--",
+ NULL
+ };
+ enum envdbmv {
+ TCL_EDBMV_COMMIT,
+ TCL_EDBMV_TXN,
+ TCL_EDBMV_ENDARG
+ };
+ DB_TXN *txn;
+ u_int32_t flag;
+ int endarg, i, newlen, optindex, result, ret, subdblen;
+ u_char *subdbtmp;
+ char *arg, *db, *newname, *subdb, msg[MSG_SIZE];
+
+ txn = NULL;
+ result = TCL_OK;
+ subdbtmp = NULL;
+ db = newname = subdb = NULL;
+ endarg = 0;
+ flag = 0;
+
+ if (objc < 2) {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ return (TCL_ERROR);
+ }
+
+ /*
+ * We must first parse for the environment flag, since that
+ * is needed for db_create. Then create the db handle.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], envdbmv,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-') {
+ result = IS_HELP(objv[i]);
+ goto error;
+ } else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum envdbmv)optindex) {
+ case TCL_EDBMV_COMMIT:
+ flag |= DB_AUTO_COMMIT;
+ break;
+ case TCL_EDBMV_TXN:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-txn id?");
+ result = TCL_ERROR;
+ break;
+ }
+ arg = Tcl_GetStringFromObj(objv[i++], NULL);
+ txn = NAME_TO_TXN(arg);
+ if (txn == NULL) {
+ snprintf(msg, MSG_SIZE,
+ "env dbrename: Invalid txn %s\n", arg);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (TCL_ERROR);
+ }
+ break;
+ case TCL_EDBMV_ENDARG:
+ endarg = 1;
+ break;
+ }
+ /*
+ * If, at any time, parsing the args we get an error,
+ * bail out and return.
+ */
+ if (result != TCL_OK)
+ goto error;
+ if (endarg)
+ break;
+ }
+ if (result != TCL_OK)
+ goto error;
+ /*
+ * Any args we have left, (better be 2 or 3 left) are
+ * file names. If there is 2, a db name, if 3 a db and subdb name.
+ */
+ if ((i != (objc - 2)) || (i != (objc - 3))) {
+ /*
+ * Dbs must be NULL terminated file names, but subdbs can
+ * be anything. Use Strings for the db name and byte
+ * arrays for the subdb.
+ */
+ db = Tcl_GetStringFromObj(objv[i++], NULL);
+ if (i == objc - 2) {
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &subdblen);
+ if ((ret = __os_malloc(dbenv, subdblen + 1,
+ &subdb)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(subdb, subdbtmp, subdblen);
+ subdb[subdblen] = '\0';
+ }
+ subdbtmp =
+ Tcl_GetByteArrayFromObj(objv[i++], &newlen);
+ if ((ret = __os_malloc(dbenv, newlen + 1,
+ &newname)) != 0) {
+ Tcl_SetResult(interp,
+ db_strerror(ret), TCL_STATIC);
+ return (0);
+ }
+ memcpy(newname, subdbtmp, newlen);
+ newname[newlen] = '\0';
+ } else {
+ Tcl_WrongNumArgs(interp, 3, objv,
+ "?args? filename ?database? ?newname?");
+ result = TCL_ERROR;
+ goto error;
+ }
+ ret = dbenv->dbrename(dbenv, txn, db, subdb, newname, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env dbrename");
+error:
+ if (subdb)
+ __os_free(dbenv, subdb);
+ if (newname)
+ __os_free(dbenv, newname);
+ return (result);
+}
diff --git a/bdb/tcl/tcl_internal.c b/bdb/tcl/tcl_internal.c
index bdab60f4ad6..2d6ad4df444 100644
--- a/bdb/tcl/tcl_internal.c
+++ b/bdb/tcl/tcl_internal.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_internal.c,v 11.27 2000/05/22 18:36:51 sue Exp $";
+static const char revid[] = "$Id: tcl_internal.c,v 11.54 2002/08/15 02:47:46 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,10 +20,10 @@ static const char revid[] = "$Id: tcl_internal.c,v 11.27 2000/05/22 18:36:51 sue
#endif
#include "db_int.h"
-#include "tcl_db.h"
-#include "db_page.h"
-#include "db_am.h"
-#include "db_ext.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_am.h"
+#include "dbinc_auto/db_ext.h"
/*
*
@@ -46,6 +46,16 @@ static const char revid[] = "$Id: tcl_internal.c,v 11.27 2000/05/22 18:36:51 sue
/*
* Prototypes for procedures defined later in this file:
*/
+static void tcl_flag_callback __P((u_int32_t, const FN *, void *));
+
+/*
+ * Private structure type used to pass both an interp and an object into
+ * a callback's single void *.
+ */
+struct __tcl_callback_bundle {
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+};
#define GLOB_CHAR(c) ((c) == '*' || (c) == '?')
@@ -68,14 +78,14 @@ _NewInfo(interp, anyp, name, type)
DBTCL_INFO *p;
int i, ret;
- if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), NULL, &p)) != 0) {
+ if ((ret = __os_malloc(NULL, sizeof(DBTCL_INFO), &p)) != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
return (NULL);
}
if ((ret = __os_strdup(NULL, name, &p->i_name)) != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
- __os_free(p, sizeof(DBTCL_INFO));
+ __os_free(NULL, p);
return (NULL);
}
p->i_interp = interp;
@@ -87,6 +97,12 @@ _NewInfo(interp, anyp, name, type)
p->i_err = NULL;
p->i_errpfx = NULL;
p->i_lockobj.data = NULL;
+ p->i_btcompare = NULL;
+ p->i_dupcompare = NULL;
+ p->i_hashproc = NULL;
+ p->i_second_call = NULL;
+ p->i_rep_eid = NULL;
+ p->i_rep_send = NULL;
for (i = 0; i < MAX_ID; i++)
p->i_otherid[i] = 0;
@@ -111,22 +127,6 @@ _NameToPtr(name)
}
/*
- * PUBLIC: char *_PtrToName __P((CONST void *));
- */
-char *
-_PtrToName(ptr)
- CONST void *ptr;
-{
- DBTCL_INFO *p;
-
- for (p = LIST_FIRST(&__db_infohead); p != NULL;
- p = LIST_NEXT(p, entries))
- if (p->i_anyp == ptr)
- return (p->i_name);
- return (NULL);
-}
-
-/*
* PUBLIC: DBTCL_INFO *_PtrToInfo __P((CONST void *));
*/
DBTCL_INFO *
@@ -183,15 +183,27 @@ _DeleteInfo(p)
return;
LIST_REMOVE(p, entries);
if (p->i_lockobj.data != NULL)
- __os_free(p->i_lockobj.data, p->i_lockobj.size);
+ __os_free(NULL, p->i_lockobj.data);
if (p->i_err != NULL) {
fclose(p->i_err);
p->i_err = NULL;
}
if (p->i_errpfx != NULL)
- __os_freestr(p->i_errpfx);
- __os_freestr(p->i_name);
- __os_free(p, sizeof(DBTCL_INFO));
+ __os_free(NULL, p->i_errpfx);
+ if (p->i_btcompare != NULL)
+ Tcl_DecrRefCount(p->i_btcompare);
+ if (p->i_dupcompare != NULL)
+ Tcl_DecrRefCount(p->i_dupcompare);
+ if (p->i_hashproc != NULL)
+ Tcl_DecrRefCount(p->i_hashproc);
+ if (p->i_second_call != NULL)
+ Tcl_DecrRefCount(p->i_second_call);
+ if (p->i_rep_eid != NULL)
+ Tcl_DecrRefCount(p->i_rep_eid);
+ if (p->i_rep_send != NULL)
+ Tcl_DecrRefCount(p->i_rep_send);
+ __os_free(NULL, p->i_name);
+ __os_free(NULL, p);
return;
}
@@ -258,7 +270,7 @@ _SetListRecnoElem(interp, list, elem1, elem2, e2size)
int myobjc;
myobjc = 2;
- myobjv[0] = Tcl_NewIntObj(elem1);
+ myobjv[0] = Tcl_NewLongObj((long)elem1);
myobjv[1] = Tcl_NewByteArrayObj(elem2, e2size);
thislist = Tcl_NewListObj(myobjc, myobjv);
if (thislist == NULL)
@@ -268,6 +280,107 @@ _SetListRecnoElem(interp, list, elem1, elem2, e2size)
}
/*
+ * _Set3DBTList --
+ * This is really analogous to both _SetListElem and
+ * _SetListRecnoElem--it's used for three-DBT lists returned by
+ * DB->pget and DBC->pget(). We'd need a family of four functions
+ * to handle all the recno/non-recno cases, however, so we make
+ * this a little more aware of the internals and do the logic inside.
+ *
+ * XXX
+ * One of these days all these functions should probably be cleaned up
+ * to eliminate redundancy and bring them into the standard DB
+ * function namespace.
+ *
+ * PUBLIC: int _Set3DBTList __P((Tcl_Interp *, Tcl_Obj *, DBT *, int,
+ * PUBLIC: DBT *, int, DBT *));
+ */
+int
+_Set3DBTList(interp, list, elem1, is1recno, elem2, is2recno, elem3)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *elem1, *elem2, *elem3;
+ int is1recno, is2recno;
+{
+
+ Tcl_Obj *myobjv[3], *thislist;
+
+ if (is1recno)
+ myobjv[0] = Tcl_NewLongObj((long)*(db_recno_t *)elem1->data);
+ else
+ myobjv[0] =
+ Tcl_NewByteArrayObj((u_char *)elem1->data, elem1->size);
+
+ if (is2recno)
+ myobjv[1] = Tcl_NewLongObj((long)*(db_recno_t *)elem2->data);
+ else
+ myobjv[1] =
+ Tcl_NewByteArrayObj((u_char *)elem2->data, elem2->size);
+
+ myobjv[2] = Tcl_NewByteArrayObj((u_char *)elem3->data, elem3->size);
+
+ thislist = Tcl_NewListObj(3, myobjv);
+
+ if (thislist == NULL)
+ return (TCL_ERROR);
+ return (Tcl_ListObjAppendElement(interp, list, thislist));
+}
+
+/*
+ * _SetMultiList -- build a list for return from multiple get.
+ *
+ * PUBLIC: int _SetMultiList __P((Tcl_Interp *,
+ * PUBLIC: Tcl_Obj *, DBT *, DBT*, int, int));
+ */
+int
+_SetMultiList(interp, list, key, data, type, flag)
+ Tcl_Interp *interp;
+ Tcl_Obj *list;
+ DBT *key, *data;
+ int type, flag;
+{
+ db_recno_t recno;
+ u_int32_t dlen, klen;
+ int result;
+ void *pointer, *dp, *kp;
+
+ recno = 0;
+ dlen = 0;
+ kp = NULL;
+
+ DB_MULTIPLE_INIT(pointer, data);
+ result = TCL_OK;
+
+ if (type == DB_RECNO || type == DB_QUEUE)
+ recno = *(db_recno_t *) key->data;
+ else
+ kp = key->data;
+ klen = key->size;
+ do {
+ if (flag & DB_MULTIPLE_KEY) {
+ if (type == DB_RECNO || type == DB_QUEUE)
+ DB_MULTIPLE_RECNO_NEXT(pointer,
+ data, recno, dp, dlen);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ data, kp, klen, dp, dlen);
+ } else
+ DB_MULTIPLE_NEXT(pointer, data, dp, dlen);
+
+ if (pointer == NULL)
+ break;
+
+ if (type == DB_RECNO || type == DB_QUEUE) {
+ result =
+ _SetListRecnoElem(interp, list, recno, dp, dlen);
+ recno++;
+ } else
+ result = _SetListElem(interp, list, kp, klen, dp, dlen);
+ } while (result == TCL_OK);
+
+ return (result);
+}
+/*
* PUBLIC: int _GetGlobPrefix __P((char *, char **));
*/
int
@@ -299,12 +412,12 @@ _GetGlobPrefix(pattern, prefix)
}
/*
- * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, char *));
+ * PUBLIC: int _ReturnSetup __P((Tcl_Interp *, int, int, char *));
*/
int
-_ReturnSetup(interp, ret, errmsg)
+_ReturnSetup(interp, ret, ok, errmsg)
Tcl_Interp *interp;
- int ret;
+ int ret, ok;
char *errmsg;
{
char *msg;
@@ -327,12 +440,9 @@ _ReturnSetup(interp, ret, errmsg)
msg = db_strerror(ret);
Tcl_AppendResult(interp, msg, NULL);
- switch (ret) {
- case DB_NOTFOUND:
- case DB_KEYEXIST:
- case DB_KEYEMPTY:
+ if (ok)
return (TCL_OK);
- default:
+ else {
Tcl_SetErrorCode(interp, "BerkeleyDB", msg, NULL);
return (TCL_ERROR);
}
@@ -375,7 +485,7 @@ _ErrorFunc(pfx, msg)
* If we cannot allocate enough to put together the prefix
* and message then give them just the message.
*/
- if (__os_malloc(NULL, size, NULL, &err) != 0) {
+ if (__os_malloc(NULL, size, &err) != 0) {
Tcl_AddErrorInfo(interp, msg);
Tcl_AppendResult(interp, msg, "\n", NULL);
return;
@@ -383,7 +493,7 @@ _ErrorFunc(pfx, msg)
snprintf(err, size, "%s: %s", pfx, msg);
Tcl_AddErrorInfo(interp, err);
Tcl_AppendResult(interp, err, "\n", NULL);
- __os_free(err, size);
+ __os_free(NULL, err);
return;
}
@@ -399,8 +509,9 @@ _GetLsn(interp, obj, lsn)
DB_LSN *lsn;
{
Tcl_Obj **myobjv;
- int itmp, myobjc, result;
char msg[MSG_SIZE];
+ int myobjc, result;
+ u_int32_t tmp;
result = Tcl_ListObjGetElements(interp, obj, &myobjc, &myobjv);
if (result == TCL_ERROR)
@@ -411,15 +522,125 @@ _GetLsn(interp, obj, lsn)
Tcl_SetResult(interp, msg, TCL_VOLATILE);
return (result);
}
- result = Tcl_GetIntFromObj(interp, myobjv[0], &itmp);
+ result = _GetUInt32(interp, myobjv[0], &tmp);
if (result == TCL_ERROR)
return (result);
- lsn->file = itmp;
- result = Tcl_GetIntFromObj(interp, myobjv[1], &itmp);
- lsn->offset = itmp;
+ lsn->file = tmp;
+ result = _GetUInt32(interp, myobjv[1], &tmp);
+ lsn->offset = tmp;
return (result);
}
+/*
+ * _GetUInt32 --
+ * Get a u_int32_t from a Tcl object. Tcl_GetIntFromObj does the
+ * right thing most of the time, but on machines where a long is 8 bytes
+ * and an int is 4 bytes, it errors on integers between the maximum
+ * int32_t and the maximum u_int32_t. This is correct, but we generally
+ * want a u_int32_t in the end anyway, so we use Tcl_GetLongFromObj and do
+ * the bounds checking ourselves.
+ *
+ * This code looks much like Tcl_GetIntFromObj, only with a different
+ * bounds check. It's essentially Tcl_GetUnsignedIntFromObj, which
+ * unfortunately doesn't exist.
+ *
+ * PUBLIC: int _GetUInt32 __P((Tcl_Interp *, Tcl_Obj *, u_int32_t *));
+ */
+int
+_GetUInt32(interp, obj, resp)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ u_int32_t *resp;
+{
+ int result;
+ long ltmp;
+
+ result = Tcl_GetLongFromObj(interp, obj, &ltmp);
+ if (result != TCL_OK)
+ return (result);
+
+ if ((unsigned long)ltmp != (u_int32_t)ltmp) {
+ if (interp != NULL) {
+ Tcl_ResetResult(interp);
+ Tcl_AppendToObj(Tcl_GetObjResult(interp),
+ "integer value too large for u_int32_t", -1);
+ }
+ return (TCL_ERROR);
+ }
+
+ *resp = (u_int32_t)ltmp;
+ return (TCL_OK);
+}
+
+/*
+ * tcl_flag_callback --
+ * Callback for db_pr.c functions that contain the FN struct mapping
+ * flag values to meaningful strings. This function appends a Tcl_Obj
+ * containing each pertinent flag string to the specified Tcl list.
+ */
+static void
+tcl_flag_callback(flags, fn, vtcbp)
+ u_int32_t flags;
+ const FN *fn;
+ void *vtcbp;
+{
+ const FN *fnp;
+ Tcl_Interp *interp;
+ Tcl_Obj *newobj, *listobj;
+ int result;
+ struct __tcl_callback_bundle *tcbp;
+
+ tcbp = (struct __tcl_callback_bundle *)vtcbp;
+ interp = tcbp->interp;
+ listobj = tcbp->obj;
+
+ for (fnp = fn; fnp->mask != 0; ++fnp)
+ if (LF_ISSET(fnp->mask)) {
+ newobj = Tcl_NewStringObj(fnp->name, strlen(fnp->name));
+ result =
+ Tcl_ListObjAppendElement(interp, listobj, newobj);
+
+ /*
+ * Tcl_ListObjAppendElement is defined to return TCL_OK
+ * unless listobj isn't actually a list (or convertible
+ * into one). If this is the case, we screwed up badly
+ * somehow.
+ */
+ DB_ASSERT(result == TCL_OK);
+ }
+}
+
+/*
+ * _GetFlagsList --
+ * Get a new Tcl object, containing a list of the string values
+ * associated with a particular set of flag values, given a function
+ * that can extract the right names for the right flags.
+ *
+ * PUBLIC: Tcl_Obj *_GetFlagsList __P((Tcl_Interp *, u_int32_t,
+ * PUBLIC: void (*)(u_int32_t, void *,
+ * PUBLIC: void (*)(u_int32_t, const FN *, void *))));
+ */
+Tcl_Obj *
+_GetFlagsList(interp, flags, func)
+ Tcl_Interp *interp;
+ u_int32_t flags;
+ void (*func)
+ __P((u_int32_t, void *, void (*)(u_int32_t, const FN *, void *)));
+{
+ Tcl_Obj *newlist;
+ struct __tcl_callback_bundle tcb;
+
+ newlist = Tcl_NewObj();
+
+ memset(&tcb, 0, sizeof(tcb));
+ tcb.interp = interp;
+ tcb.obj = newlist;
+
+ func(flags, &tcb, tcl_flag_callback);
+
+ return (newlist);
+}
+
int __debug_stop, __debug_on, __debug_print, __debug_test;
/*
@@ -432,9 +653,65 @@ _debug_check()
return;
if (__debug_print != 0) {
- printf("\r%6d:", __debug_on);
+ printf("\r%7d:", __debug_on);
fflush(stdout);
}
if (__debug_on++ == __debug_test || __debug_stop)
__db_loadme();
}
+
+/*
+ * XXX
+ * Tcl 8.1+ Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
+ *
+ * There is a bug in Tcl 8.1+ and byte arrays in that if it happens
+ * to use an object as both a byte array and something else like
+ * an int, and you've done a Tcl_GetByteArrayFromObj, then you
+ * do a Tcl_GetIntFromObj, your memory is deleted.
+ *
+ * Workaround is for all byte arrays we want to use, if it can be
+ * represented as an integer, we copy it so that we don't lose the
+ * memory.
+ */
+/*
+ * PUBLIC: int _CopyObjBytes __P((Tcl_Interp *, Tcl_Obj *obj, void **,
+ * PUBLIC: u_int32_t *, int *));
+ */
+int
+_CopyObjBytes(interp, obj, newp, sizep, freep)
+ Tcl_Interp *interp;
+ Tcl_Obj *obj;
+ void **newp;
+ u_int32_t *sizep;
+ int *freep;
+{
+ void *tmp, *new;
+ int i, len, ret;
+
+ /*
+ * If the object is not an int, then just return the byte
+ * array because it won't be transformed out from under us.
+ * If it is a number, we need to copy it.
+ */
+ *freep = 0;
+ ret = Tcl_GetIntFromObj(interp, obj, &i);
+ tmp = Tcl_GetByteArrayFromObj(obj, &len);
+ *sizep = len;
+ if (ret == TCL_ERROR) {
+ Tcl_ResetResult(interp);
+ *newp = tmp;
+ return (0);
+ }
+
+ /*
+ * If we get here, we have an integer that might be reused
+ * at some other point so we cannot count on GetByteArray
+ * keeping our pointer valid.
+ */
+ if ((ret = __os_malloc(NULL, len, &new)) != 0)
+ return (ret);
+ memcpy(new, tmp, len);
+ *newp = new;
+ *freep = 1;
+ return (0);
+}
diff --git a/bdb/tcl/tcl_lock.c b/bdb/tcl/tcl_lock.c
index 89f6eeb2b39..6cb96dbb0da 100644
--- a/bdb/tcl/tcl_lock.c
+++ b/bdb/tcl/tcl_lock.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_lock.c,v 11.21 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: tcl_lock.c,v 11.47 2002/08/08 15:27:10 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,7 @@ static const char revid[] = "$Id: tcl_lock.c,v 11.21 2001/01/11 18:19:55 bostic
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/tcl_db.h"
/*
* Prototypes for procedures defined later in this file:
@@ -31,15 +31,23 @@ static int _GetThisLock __P((Tcl_Interp *, DB_ENV *, u_int32_t,
u_int32_t, DBT *, db_lockmode_t, char *));
static void _LockPutInfo __P((Tcl_Interp *, db_lockop_t, DB_LOCK *,
u_int32_t, DBT *));
-
+#if CONFIG_TEST
static char *lkmode[] = {
- "ng", "read", "write",
- "iwrite", "iread", "iwr",
+ "ng",
+ "read",
+ "write",
+ "iwrite",
+ "iread",
+ "iwr",
NULL
};
enum lkmode {
- LK_NG, LK_READ, LK_WRITE,
- LK_IWRITE, LK_IREAD, LK_IWR
+ LK_NG,
+ LK_READ,
+ LK_WRITE,
+ LK_IWRITE,
+ LK_IREAD,
+ LK_IWR
};
/*
@@ -56,16 +64,22 @@ tcl_LockDetect(interp, objc, objv, envp)
DB_ENV *envp; /* Environment pointer */
{
static char *ldopts[] = {
- "-lock_conflict",
+ "expire",
"default",
+ "maxlocks",
+ "minlocks",
+ "minwrites",
"oldest",
"random",
"youngest",
NULL
};
enum ldopts {
- LD_CONFLICT,
+ LD_EXPIRE,
LD_DEFAULT,
+ LD_MAXLOCKS,
+ LD_MINLOCKS,
+ LD_MINWRITES,
LD_OLDEST,
LD_RANDOM,
LD_YOUNGEST
@@ -82,10 +96,26 @@ tcl_LockDetect(interp, objc, objv, envp)
return (IS_HELP(objv[i]));
i++;
switch ((enum ldopts)optindex) {
+ case LD_EXPIRE:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_EXPIRE;
+ break;
case LD_DEFAULT:
FLAG_CHECK(policy);
policy = DB_LOCK_DEFAULT;
break;
+ case LD_MAXLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MAXLOCKS;
+ break;
+ case LD_MINWRITES:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINWRITE;
+ break;
+ case LD_MINLOCKS:
+ FLAG_CHECK(policy);
+ policy = DB_LOCK_MINLOCKS;
+ break;
case LD_OLDEST:
FLAG_CHECK(policy);
policy = DB_LOCK_OLDEST;
@@ -98,15 +128,12 @@ tcl_LockDetect(interp, objc, objv, envp)
FLAG_CHECK(policy);
policy = DB_LOCK_RANDOM;
break;
- case LD_CONFLICT:
- flag |= DB_LOCK_CONFLICT;
- break;
}
}
_debug_check();
- ret = lock_detect(envp, flag, policy, NULL);
- result = _ReturnSetup(interp, ret, "lock detect");
+ ret = envp->lock_detect(envp, flag, policy, NULL);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock detect");
return (result);
}
@@ -132,12 +159,14 @@ tcl_LockGet(interp, objc, objv, envp)
};
DBT obj;
Tcl_Obj *res;
+ void *otmp;
db_lockmode_t mode;
u_int32_t flag, lockid;
- int itmp, optindex, result;
+ int freeobj, optindex, result, ret;
char newname[MSG_SIZE];
result = TCL_OK;
+ freeobj = 0;
memset(newname, 0, MSG_SIZE);
if (objc != 5 && objc != 6) {
Tcl_WrongNumArgs(interp, 2, objv, "?-nowait? mode id obj");
@@ -152,28 +181,19 @@ tcl_LockGet(interp, objc, objv, envp)
memset(&obj, 0, sizeof(obj));
if ((result =
- Tcl_GetIntFromObj(interp, objv[objc-2], &itmp)) != TCL_OK)
+ _GetUInt32(interp, objv[objc-2], &lockid)) != TCL_OK)
return (result);
- lockid = itmp;
- /*
- * XXX
- * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj bug.
- *
- * The line below was originally before the Tcl_GetIntFromObj.
- *
- * There is a bug in Tcl 8.1 and byte arrays in that if it happens
- * to use an object as both a byte array and something else like
- * an int, and you've done a Tcl_GetByteArrayFromObj, then you
- * do a Tcl_GetIntFromObj, your memory is deleted.
- *
- * Workaround is to make sure all Tcl_GetByteArrayFromObj calls
- * are done last.
- */
- obj.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- obj.size = itmp;
- if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK)
+ ret = _CopyObjBytes(interp, objv[objc-1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock get");
return (result);
+ }
+ obj.data = otmp;
+ if ((result = _LockMode(interp, objv[(objc - 3)], &mode)) != TCL_OK)
+ goto out;
/*
* Any left over arg is the flag.
@@ -195,6 +215,9 @@ tcl_LockGet(interp, objc, objv, envp)
res = Tcl_NewStringObj(newname, strlen(newname));
Tcl_SetObjResult(interp, res);
}
+out:
+ if (freeobj)
+ (void)__os_free(envp, otmp);
return (result);
}
@@ -224,8 +247,8 @@ tcl_LockStat(interp, objc, objv, envp)
return (TCL_ERROR);
}
_debug_check();
- ret = lock_stat(envp, &sp, NULL);
- result = _ReturnSetup(interp, ret, "lock stat");
+ ret = envp->lock_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock stat");
if (result == TCL_ERROR)
return (result);
/*
@@ -237,9 +260,11 @@ tcl_LockStat(interp, objc, objv, envp)
* MAKE_STAT_LIST assumes 'res' and 'error' label.
*/
MAKE_STAT_LIST("Region size", sp->st_regsize);
- MAKE_STAT_LIST("Max locks", sp->st_maxlocks);
- MAKE_STAT_LIST("Max lockers", sp->st_maxlockers);
- MAKE_STAT_LIST("Max objects", sp->st_maxobjects);
+ MAKE_STAT_LIST("Last allocated locker ID", sp->st_id);
+ MAKE_STAT_LIST("Current maximum unused locker ID", sp->st_cur_maxid);
+ MAKE_STAT_LIST("Maximum locks", sp->st_maxlocks);
+ MAKE_STAT_LIST("Maximum lockers", sp->st_maxlockers);
+ MAKE_STAT_LIST("Maximum objects", sp->st_maxobjects);
MAKE_STAT_LIST("Lock modes", sp->st_nmodes);
MAKE_STAT_LIST("Current number of locks", sp->st_nlocks);
MAKE_STAT_LIST("Maximum number of locks so far", sp->st_maxnlocks);
@@ -250,12 +275,49 @@ tcl_LockStat(interp, objc, objv, envp)
MAKE_STAT_LIST("Number of conflicts", sp->st_nconflicts);
MAKE_STAT_LIST("Lock requests", sp->st_nrequests);
MAKE_STAT_LIST("Lock releases", sp->st_nreleases);
+ MAKE_STAT_LIST("Lock requests that would have waited", sp->st_nnowaits);
MAKE_STAT_LIST("Deadlocks detected", sp->st_ndeadlocks);
MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Lock timeout value", sp->st_locktimeout);
+ MAKE_STAT_LIST("Number of lock timeouts", sp->st_nlocktimeouts);
+ MAKE_STAT_LIST("Transaction timeout value", sp->st_txntimeout);
+ MAKE_STAT_LIST("Number of transaction timeouts", sp->st_ntxntimeouts);
Tcl_SetObjResult(interp, res);
error:
- __os_free(sp, sizeof(*sp));
+ free(sp);
+ return (result);
+}
+
+/*
+ * tcl_LockTimeout --
+ *
+ * PUBLIC: int tcl_LockTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ */
+int
+tcl_LockTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_LOCK_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock timeout");
return (result);
}
@@ -265,10 +327,10 @@ error:
*/
static int
lock_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Lock handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Lock handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *lkcmds[] = {
"put",
@@ -315,11 +377,12 @@ lock_Cmd(clientData, interp, objc, objv)
switch ((enum lkcmds)cmdindex) {
case LKPUT:
_debug_check();
- ret = lock_put(env, lock);
- result = _ReturnSetup(interp, ret, "lock put");
+ ret = env->lock_put(env, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock put");
(void)Tcl_DeleteCommand(interp, lkip->i_name);
_DeleteInfo(lkip);
- __os_free(lock, sizeof(DB_LOCK));
+ __os_free(env, lock);
break;
}
return (result);
@@ -332,9 +395,9 @@ lock_Cmd(clientData, interp, objc, objv)
*/
int
tcl_LockVec(interp, objc, objv, envp)
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
DB_ENV *envp; /* environment pointer */
{
static char *lvopts[] = {
@@ -345,25 +408,34 @@ tcl_LockVec(interp, objc, objv, envp)
LVNOWAIT
};
static char *lkops[] = {
- "get", "put", "put_all", "put_obj",
+ "get",
+ "put",
+ "put_all",
+ "put_obj",
+ "timeout",
NULL
};
enum lkops {
- LKGET, LKPUT, LKPUTALL, LKPUTOBJ
+ LKGET,
+ LKPUT,
+ LKPUTALL,
+ LKPUTOBJ,
+ LKTIMEOUT
};
DB_LOCK *lock;
DB_LOCKREQ list;
DBT obj;
Tcl_Obj **myobjv, *res, *thisop;
- db_lockmode_t mode;
+ void *otmp;
u_int32_t flag, lockid;
- int i, itmp, myobjc, optindex, result, ret;
+ int freeobj, i, myobjc, optindex, result, ret;
char *lockname, msg[MSG_SIZE], newname[MSG_SIZE];
result = TCL_OK;
memset(newname, 0, MSG_SIZE);
flag = 0;
- mode = 0;
+ freeobj = 0;
+
/*
* If -nowait is given, it MUST be first arg.
*/
@@ -385,10 +457,9 @@ tcl_LockVec(interp, objc, objv, envp)
/*
* Our next arg MUST be the locker ID.
*/
- result = Tcl_GetIntFromObj(interp, objv[i++], &itmp);
+ result = _GetUInt32(interp, objv[i++], &lockid);
if (result != TCL_OK)
return (result);
- lockid = itmp;
/*
* All other remaining args are operation tuples.
@@ -429,26 +500,19 @@ tcl_LockVec(interp, objc, objv, envp)
result = _LockMode(interp, myobjv[2], &list.mode);
if (result != TCL_OK)
goto error;
- /*
- * XXX
- * Tcl 8.1 Tcl_GetByteArrayFromObj/Tcl_GetIntFromObj
- * bug.
- *
- * There is a bug in Tcl 8.1 and byte arrays in that if
- * it happens to use an object as both a byte array and
- * something else like an int, and you've done a
- * Tcl_GetByteArrayFromObj, then you do a
- * Tcl_GetIntFromObj, your memory is deleted.
- *
- * Workaround is to make sure all
- * Tcl_GetByteArrayFromObj calls are done last.
- */
- obj.data = Tcl_GetByteArrayFromObj(myobjv[1], &itmp);
- obj.size = itmp;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
ret = _GetThisLock(interp, envp, lockid, flag,
&obj, list.mode, newname);
if (ret != 0) {
- result = _ReturnSetup(interp, ret, "lock vec");
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
thisop = Tcl_NewIntObj(ret);
(void)Tcl_ListObjAppendElement(interp, res,
thisop);
@@ -456,6 +520,10 @@ tcl_LockVec(interp, objc, objv, envp)
}
thisop = Tcl_NewStringObj(newname, strlen(newname));
(void)Tcl_ListObjAppendElement(interp, res, thisop);
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
continue;
case LKPUT:
if (myobjc != 2) {
@@ -493,17 +561,27 @@ tcl_LockVec(interp, objc, objv, envp)
goto error;
}
list.op = DB_LOCK_PUT_OBJ;
- obj.data = Tcl_GetByteArrayFromObj(myobjv[1], &itmp);
- obj.size = itmp;
+ ret = _CopyObjBytes(interp, myobjv[1], &otmp,
+ &obj.size, &freeobj);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock vec");
+ return (result);
+ }
+ obj.data = otmp;
list.obj = &obj;
break;
+ case LKTIMEOUT:
+ list.op = DB_LOCK_TIMEOUT;
+ break;
+
}
/*
* We get here, we have set up our request, now call
* lock_vec.
*/
_debug_check();
- ret = lock_vec(envp, lockid, flag, &list, 1, NULL);
+ ret = envp->lock_vec(envp, lockid, flag, &list, 1, NULL);
/*
* Now deal with whether or not the operation succeeded.
* Get's were done above, all these are only puts.
@@ -511,7 +589,12 @@ tcl_LockVec(interp, objc, objv, envp)
thisop = Tcl_NewIntObj(ret);
result = Tcl_ListObjAppendElement(interp, res, thisop);
if (ret != 0 && result == TCL_OK)
- result = _ReturnSetup(interp, ret, "lock put");
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "lock put");
+ if (freeobj) {
+ (void)__os_free(envp, otmp);
+ freeobj = 0;
+ }
/*
* We did a put of some kind. Since we did that,
* we have to delete the commands associated with
@@ -581,7 +664,7 @@ _LockPutInfo(interp, op, lock, lockid, objp)
found = 1;
if (found) {
(void)Tcl_DeleteCommand(interp, p->i_name);
- __os_free(p->i_lock, sizeof(DB_LOCK));
+ __os_free(NULL, p->i_lock);
_DeleteInfo(p);
}
}
@@ -615,16 +698,16 @@ _GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
TCL_STATIC);
return (TCL_ERROR);
}
- ret = __os_malloc(envp, sizeof(DB_LOCK), NULL, &lock);
+ ret = __os_malloc(envp, sizeof(DB_LOCK), &lock);
if (ret != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
return (TCL_ERROR);
}
_debug_check();
- ret = lock_get(envp, lockid, flag, objp, mode, lock);
- result = _ReturnSetup(interp, ret, "lock get");
+ ret = envp->lock_get(envp, lockid, flag, objp, mode, lock);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "lock get");
if (result == TCL_ERROR) {
- __os_free(lock, sizeof(DB_LOCK));
+ __os_free(envp, lock);
_DeleteInfo(ip);
return (result);
}
@@ -632,12 +715,12 @@ _GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
* Success. Set up return. Set up new info
* and command widget for this lock.
*/
- ret = __os_malloc(envp, objp->size, NULL, &ip->i_lockobj.data);
+ ret = __os_malloc(envp, objp->size, &ip->i_lockobj.data);
if (ret != 0) {
Tcl_SetResult(interp, "Could not duplicate obj",
TCL_STATIC);
- (void)lock_put(envp, lock);
- __os_free(lock, sizeof(DB_LOCK));
+ (void)envp->lock_put(envp, lock);
+ __os_free(envp, lock);
_DeleteInfo(ip);
result = TCL_ERROR;
goto error;
@@ -653,3 +736,4 @@ _GetThisLock(interp, envp, lockid, flag, objp, mode, newname)
error:
return (result);
}
+#endif
diff --git a/bdb/tcl/tcl_log.c b/bdb/tcl/tcl_log.c
index 20f8e8c0277..be6eebfb013 100644
--- a/bdb/tcl/tcl_log.c
+++ b/bdb/tcl/tcl_log.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_log.c,v 11.21 2000/11/30 00:58:45 ubell Exp $";
+static const char revid[] = "$Id: tcl_log.c,v 11.52 2002/08/14 20:11:57 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,12 @@ static const char revid[] = "$Id: tcl_log.c,v 11.21 2000/11/30 00:58:45 ubell Ex
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/log.h"
+#include "dbinc/tcl_db.h"
+#include "dbinc/txn.h"
+
+#ifdef CONFIG_TEST
+static int tcl_LogcGet __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_LOGC *));
/*
* tcl_LogArchive --
@@ -73,8 +78,8 @@ tcl_LogArchive(interp, objc, objv, envp)
}
_debug_check();
list = NULL;
- ret = log_archive(envp, &list, flag, NULL);
- result = _ReturnSetup(interp, ret, "log archive");
+ ret = envp->log_archive(envp, &list, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log archive");
if (result == TCL_OK) {
res = Tcl_NewListObj(0, NULL);
for (file = list; file != NULL && *file != NULL; file++) {
@@ -86,7 +91,7 @@ tcl_LogArchive(interp, objc, objv, envp)
Tcl_SetObjResult(interp, res);
}
if (list != NULL)
- __os_free(list, 0);
+ __os_ufree(envp, list);
return (result);
}
@@ -166,24 +171,24 @@ tcl_LogFile(interp, objc, objv, envp)
name = NULL;
while (ret == ENOMEM) {
if (name != NULL)
- __os_free(name, len/2);
- ret = __os_malloc(envp, len, NULL, &name);
+ __os_free(envp, name);
+ ret = __os_malloc(envp, len, &name);
if (ret != 0) {
Tcl_SetResult(interp, db_strerror(ret), TCL_STATIC);
break;
}
_debug_check();
- ret = log_file(envp, &lsn, name, len);
+ ret = envp->log_file(envp, &lsn, name, len);
len *= 2;
}
- result = _ReturnSetup(interp, ret, "log_file");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_file");
if (ret == 0) {
res = Tcl_NewStringObj(name, strlen(name));
Tcl_SetObjResult(interp, res);
}
if (name != NULL)
- __os_free(name, len/2);
+ __os_free(envp, name);
return (result);
}
@@ -222,8 +227,8 @@ tcl_LogFlush(interp, objc, objv, envp)
lsnp = NULL;
_debug_check();
- ret = log_flush(envp, lsnp);
- result = _ReturnSetup(interp, ret, "log_flush");
+ ret = envp->log_flush(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_flush");
return (result);
}
@@ -240,111 +245,13 @@ tcl_LogGet(interp, objc, objv, envp)
Tcl_Obj *CONST objv[]; /* The argument objects */
DB_ENV *envp; /* Environment pointer */
{
- static char *loggetopts[] = {
- "-checkpoint", "-current", "-first",
- "-last", "-next", "-prev",
- "-set",
- NULL
- };
- enum loggetopts {
- LOGGET_CKP, LOGGET_CUR, LOGGET_FIRST,
- LOGGET_LAST, LOGGET_NEXT, LOGGET_PREV,
- LOGGET_SET
- };
- DB_LSN lsn;
- DBT data;
- Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res;
- u_int32_t flag;
- int i, myobjc, optindex, result, ret;
-
- result = TCL_OK;
- flag = 0;
- if (objc < 3) {
- Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn");
- return (TCL_ERROR);
- }
-
- /*
- * Get the command name index from the object based on the options
- * defined above.
- */
- i = 2;
- while (i < objc) {
- if (Tcl_GetIndexFromObj(interp, objv[i],
- loggetopts, "option", TCL_EXACT, &optindex) != TCL_OK)
- return (IS_HELP(objv[i]));
- i++;
- switch ((enum loggetopts)optindex) {
- case LOGGET_CKP:
- FLAG_CHECK(flag);
- flag |= DB_CHECKPOINT;
- break;
- case LOGGET_CUR:
- FLAG_CHECK(flag);
- flag |= DB_CURRENT;
- break;
- case LOGGET_FIRST:
- FLAG_CHECK(flag);
- flag |= DB_FIRST;
- break;
- case LOGGET_LAST:
- FLAG_CHECK(flag);
- flag |= DB_LAST;
- break;
- case LOGGET_NEXT:
- FLAG_CHECK(flag);
- flag |= DB_NEXT;
- break;
- case LOGGET_PREV:
- FLAG_CHECK(flag);
- flag |= DB_PREV;
- break;
- case LOGGET_SET:
- FLAG_CHECK(flag);
- flag |= DB_SET;
- if (i == objc) {
- Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?");
- result = TCL_ERROR;
- break;
- }
- result = _GetLsn(interp, objv[i++], &lsn);
- break;
- }
- }
- if (result == TCL_ERROR)
- return (result);
-
- memset(&data, 0, sizeof(data));
- data.flags |= DB_DBT_MALLOC;
- _debug_check();
- ret = log_get(envp, &lsn, &data, flag);
- res = Tcl_NewListObj(0, NULL);
- result = _ReturnSetup(interp, ret, "log_get");
- if (ret == 0) {
- /*
- * Success. Set up return list as {LSN data} where LSN
- * is a sublist {file offset}.
- */
- myobjc = 2;
- myobjv[0] = Tcl_NewIntObj(lsn.file);
- myobjv[1] = Tcl_NewIntObj(lsn.offset);
- lsnlist = Tcl_NewListObj(myobjc, myobjv);
- if (lsnlist == NULL) {
- if (data.data != NULL)
- __os_free(data.data, data.size);
- return (TCL_ERROR);
- }
- result = Tcl_ListObjAppendElement(interp, res, lsnlist);
- dataobj = Tcl_NewStringObj(data.data, data.size);
- result = Tcl_ListObjAppendElement(interp, res, dataobj);
- }
- if (data.data != NULL)
- __os_free(data.data, data.size);
+ COMPQUIET(objv, NULL);
+ COMPQUIET(objc, 0);
+ COMPQUIET(envp, NULL);
- if (result == TCL_OK)
- Tcl_SetObjResult(interp, res);
- return (result);
+ Tcl_SetResult(interp, "FAIL: log_get deprecated\n", TCL_STATIC);
+ return (TCL_ERROR);
}
/*
@@ -361,20 +268,22 @@ tcl_LogPut(interp, objc, objv, envp)
DB_ENV *envp; /* Environment pointer */
{
static char *logputopts[] = {
- "-checkpoint", "-curlsn", "-flush",
+ "-flush",
NULL
};
enum logputopts {
- LOGPUT_CKP, LOGPUT_CUR, LOGPUT_FLUSH
+ LOGPUT_FLUSH
};
DB_LSN lsn;
DBT data;
Tcl_Obj *intobj, *res;
+ void *dtmp;
u_int32_t flag;
- int itmp, optindex, result, ret;
+ int freedata, optindex, result, ret;
result = TCL_OK;
flag = 0;
+ freedata = 0;
if (objc < 3) {
Tcl_WrongNumArgs(interp, 2, objv, "?-args? record");
return (TCL_ERROR);
@@ -384,8 +293,14 @@ tcl_LogPut(interp, objc, objv, envp)
* Data/record must be the last arg.
*/
memset(&data, 0, sizeof(data));
- data.data = Tcl_GetByteArrayFromObj(objv[objc-1], &itmp);
- data.size = itmp;
+ ret = _CopyObjBytes(interp, objv[objc-1], &dtmp,
+ &data.size, &freedata);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_STD(ret), "log put");
+ return (result);
+ }
+ data.data = dtmp;
/*
* Get the command name index from the object based on the options
@@ -397,12 +312,6 @@ tcl_LogPut(interp, objc, objv, envp)
return (IS_HELP(objv[2]));
}
switch ((enum logputopts)optindex) {
- case LOGPUT_CKP:
- flag = DB_CHECKPOINT;
- break;
- case LOGPUT_CUR:
- flag = DB_CURLSN;
- break;
case LOGPUT_FLUSH:
flag = DB_FLUSH;
break;
@@ -413,69 +322,20 @@ tcl_LogPut(interp, objc, objv, envp)
return (result);
_debug_check();
- ret = log_put(envp, &lsn, &data, flag);
- result = _ReturnSetup(interp, ret, "log_put");
+ ret = envp->log_put(envp, &lsn, &data, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log_put");
if (result == TCL_ERROR)
return (result);
res = Tcl_NewListObj(0, NULL);
- intobj = Tcl_NewIntObj(lsn.file);
+ intobj = Tcl_NewLongObj((long)lsn.file);
result = Tcl_ListObjAppendElement(interp, res, intobj);
- intobj = Tcl_NewIntObj(lsn.offset);
+ intobj = Tcl_NewLongObj((long)lsn.offset);
result = Tcl_ListObjAppendElement(interp, res, intobj);
Tcl_SetObjResult(interp, res);
+ if (freedata)
+ (void)__os_free(NULL, dtmp);
return (result);
}
-
-/*
- * tcl_LogRegister --
- *
- * PUBLIC: int tcl_LogRegister __P((Tcl_Interp *, int,
- * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
- */
-int
-tcl_LogRegister(interp, objc, objv, envp)
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
- DB_ENV *envp; /* Environment pointer */
-{
- DB *dbp;
- Tcl_Obj *res;
- int result, ret;
- char *arg, msg[MSG_SIZE];
-
- result = TCL_OK;
- if (objc != 4) {
- Tcl_WrongNumArgs(interp, 2, objv, "db filename");
- return (TCL_ERROR);
- }
- /*
- * First comes the DB.
- */
- arg = Tcl_GetStringFromObj(objv[2], NULL);
- dbp = NAME_TO_DB(arg);
- if (dbp == NULL) {
- snprintf(msg, MSG_SIZE,
- "LogRegister: Invalid db: %s\n", arg);
- Tcl_SetResult(interp, msg, TCL_VOLATILE);
- return (TCL_ERROR);
- }
-
- /*
- * Next is the filename.
- */
- arg = Tcl_GetStringFromObj(objv[3], NULL);
-
- _debug_check();
- ret = log_register(envp, dbp, arg);
- result = _ReturnSetup(interp, ret, "log_register");
- if (result == TCL_OK) {
- res = Tcl_NewIntObj((int)dbp->log_fileid);
- Tcl_SetObjResult(interp, res);
- }
- return (result);
-}
-
/*
* tcl_LogStat --
*
@@ -502,8 +362,8 @@ tcl_LogStat(interp, objc, objv, envp)
return (TCL_ERROR);
}
_debug_check();
- ret = log_stat(envp, &sp, NULL);
- result = _ReturnSetup(interp, ret, "log stat");
+ ret = envp->log_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "log stat");
if (result == TCL_ERROR)
return (result);
@@ -520,7 +380,7 @@ tcl_LogStat(interp, objc, objv, envp)
MAKE_STAT_LIST("Region size", sp->st_regsize);
MAKE_STAT_LIST("Log file mode", sp->st_mode);
MAKE_STAT_LIST("Log record cache size", sp->st_lg_bsize);
- MAKE_STAT_LIST("Maximum log file size", sp->st_lg_max);
+ MAKE_STAT_LIST("Current log file size", sp->st_lg_size);
MAKE_STAT_LIST("Mbytes written", sp->st_w_mbytes);
MAKE_STAT_LIST("Bytes written (over Mb)", sp->st_w_bytes);
MAKE_STAT_LIST("Mbytes written since checkpoint", sp->st_wc_mbytes);
@@ -532,50 +392,219 @@ tcl_LogStat(interp, objc, objv, envp)
MAKE_STAT_LIST("Times log flushed", sp->st_scount);
MAKE_STAT_LIST("Current log file number", sp->st_cur_file);
MAKE_STAT_LIST("Current log file offset", sp->st_cur_offset);
+ MAKE_STAT_LIST("On-disk log file number", sp->st_disk_file);
+ MAKE_STAT_LIST("On-disk log file offset", sp->st_disk_offset);
+ MAKE_STAT_LIST("Max commits in a log flush", sp->st_maxcommitperflush);
+ MAKE_STAT_LIST("Min commits in a log flush", sp->st_mincommitperflush);
MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
Tcl_SetObjResult(interp, res);
error:
- __os_free(sp, sizeof(*sp));
+ free(sp);
return (result);
}
/*
- * tcl_LogUnregister --
+ * logc_Cmd --
+ * Implements the log cursor command.
*
- * PUBLIC: int tcl_LogUnregister __P((Tcl_Interp *, int,
- * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
+ * PUBLIC: int logc_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
*/
int
-tcl_LogUnregister(interp, objc, objv, envp)
+logc_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Cursor handle */
Tcl_Interp *interp; /* Interpreter */
int objc; /* How many arguments? */
Tcl_Obj *CONST objv[]; /* The argument objects */
- DB_ENV *envp; /* Environment pointer */
{
- DB *dbp;
- char *arg, msg[MSG_SIZE];
- int result, ret;
+ static char *logccmds[] = {
+ "close",
+ "get",
+ NULL
+ };
+ enum logccmds {
+ LOGCCLOSE,
+ LOGCGET
+ };
+ DB_LOGC *logc;
+ DBTCL_INFO *logcip;
+ int cmdindex, result, ret;
+ Tcl_ResetResult(interp);
+ logc = (DB_LOGC *)clientData;
+ logcip = _PtrToInfo((void *)logc);
result = TCL_OK;
+
+ if (objc <= 1) {
+ Tcl_WrongNumArgs(interp, 1, objv, "command cmdargs");
+ return (TCL_ERROR);
+ }
+ if (logc == NULL) {
+ Tcl_SetResult(interp, "NULL logc pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (logcip == NULL) {
+ Tcl_SetResult(interp, "NULL logc info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
/*
- * 1 arg for this. Error if more or less.
+ * Get the command name index from the object based on the berkdbcmds
+ * defined above.
*/
- if (objc != 3) {
- Tcl_WrongNumArgs(interp, 2, objv, NULL);
- return (TCL_ERROR);
+ if (Tcl_GetIndexFromObj(interp, objv[1], logccmds, "command",
+ TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+ switch ((enum logccmds)cmdindex) {
+ case LOGCCLOSE:
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc > 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = logc->close(logc, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "logc close");
+ if (result == TCL_OK) {
+ (void)Tcl_DeleteCommand(interp, logcip->i_name);
+ _DeleteInfo(logcip);
+ }
+ break;
+ case LOGCGET:
+ result = tcl_LogcGet(interp, objc, objv, logc);
+ break;
}
- arg = Tcl_GetStringFromObj(objv[2], NULL);
- dbp = NAME_TO_DB(arg);
- if (dbp == NULL) {
- snprintf(msg, MSG_SIZE,
- "log_unregister: Invalid db identifier: %s\n", arg);
- Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ return (result);
+}
+
+static int
+tcl_LogcGet(interp, objc, objv, logc)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj * CONST *objv;
+ DB_LOGC *logc;
+{
+ static char *logcgetopts[] = {
+ "-current",
+ "-first",
+ "-last",
+ "-next",
+ "-prev",
+ "-set",
+ NULL
+ };
+ enum logcgetopts {
+ LOGCGET_CURRENT,
+ LOGCGET_FIRST,
+ LOGCGET_LAST,
+ LOGCGET_NEXT,
+ LOGCGET_PREV,
+ LOGCGET_SET
+ };
+ DB_LSN lsn;
+ DBT data;
+ Tcl_Obj *dataobj, *lsnlist, *myobjv[2], *res;
+ u_int32_t flag;
+ int i, myobjc, optindex, result, ret;
+
+ result = TCL_OK;
+ res = NULL;
+ flag = 0;
+
+ if (objc < 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-args? lsn");
return (TCL_ERROR);
}
+
+ /*
+ * Get the command name index from the object based on the options
+ * defined above.
+ */
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i],
+ logcgetopts, "option", TCL_EXACT, &optindex) != TCL_OK)
+ return (IS_HELP(objv[i]));
+ i++;
+ switch ((enum logcgetopts)optindex) {
+ case LOGCGET_CURRENT:
+ FLAG_CHECK(flag);
+ flag |= DB_CURRENT;
+ break;
+ case LOGCGET_FIRST:
+ FLAG_CHECK(flag);
+ flag |= DB_FIRST;
+ break;
+ case LOGCGET_LAST:
+ FLAG_CHECK(flag);
+ flag |= DB_LAST;
+ break;
+ case LOGCGET_NEXT:
+ FLAG_CHECK(flag);
+ flag |= DB_NEXT;
+ break;
+ case LOGCGET_PREV:
+ FLAG_CHECK(flag);
+ flag |= DB_PREV;
+ break;
+ case LOGCGET_SET:
+ FLAG_CHECK(flag);
+ flag |= DB_SET;
+ if (i == objc) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?-set lsn?");
+ result = TCL_ERROR;
+ break;
+ }
+ result = _GetLsn(interp, objv[i++], &lsn);
+ break;
+ }
+ }
+
+ if (result == TCL_ERROR)
+ return (result);
+
+ memset(&data, 0, sizeof(data));
+
_debug_check();
- ret = log_unregister(envp, dbp);
- result = _ReturnSetup(interp, ret, "log_unregister");
+ ret = logc->get(logc, &lsn, &data, flag);
+
+ res = Tcl_NewListObj(0, NULL);
+ if (res == NULL)
+ goto memerr;
+
+ if (ret == 0) {
+ /*
+ * Success. Set up return list as {LSN data} where LSN
+ * is a sublist {file offset}.
+ */
+ myobjc = 2;
+ myobjv[0] = Tcl_NewLongObj((long)lsn.file);
+ myobjv[1] = Tcl_NewLongObj((long)lsn.offset);
+ lsnlist = Tcl_NewListObj(myobjc, myobjv);
+ if (lsnlist == NULL)
+ goto memerr;
+
+ result = Tcl_ListObjAppendElement(interp, res, lsnlist);
+ dataobj = Tcl_NewStringObj(data.data, data.size);
+ if (dataobj == NULL) {
+ goto memerr;
+ }
+ result = Tcl_ListObjAppendElement(interp, res, dataobj);
+ } else
+ result = _ReturnSetup(interp, ret, DB_RETOK_LGGET(ret),
+ "DB_LOGC->get");
+
+ Tcl_SetObjResult(interp, res);
+
+ if (0) {
+memerr: if (res != NULL)
+ Tcl_DecrRefCount(res);
+ Tcl_SetResult(interp, "allocation failed", TCL_STATIC);
+ }
return (result);
}
+#endif
diff --git a/bdb/tcl/tcl_mp.c b/bdb/tcl/tcl_mp.c
index b424deea242..0c4411cb58a 100644
--- a/bdb/tcl/tcl_mp.c
+++ b/bdb/tcl/tcl_mp.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_mp.c,v 11.24 2001/01/09 16:13:59 sue Exp $";
+static const char revid[] = "$Id: tcl_mp.c,v 11.39 2002/08/06 06:21:27 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,7 +20,7 @@ static const char revid[] = "$Id: tcl_mp.c,v 11.24 2001/01/09 16:13:59 sue Exp $
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/tcl_db.h"
/*
* Prototypes for procedures defined later in this file:
@@ -45,7 +45,7 @@ static int tcl_PgIsset __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
*/
void
_MpInfoDelete(interp, mpip)
- Tcl_Interp *interp; /* Interpreter */
+ Tcl_Interp *interp; /* Interpreter */
DBTCL_INFO *mpip; /* Info for mp */
{
DBTCL_INFO *nextp, *p;
@@ -63,6 +63,7 @@ _MpInfoDelete(interp, mpip)
}
}
+#if CONFIG_TEST
/*
* tcl_MpSync --
*
@@ -76,25 +77,28 @@ tcl_MpSync(interp, objc, objv, envp)
DB_ENV *envp; /* Environment pointer */
{
- DB_LSN lsn;
+ DB_LSN lsn, *lsnp;
int result, ret;
result = TCL_OK;
+ lsnp = NULL;
/*
* No flags, must be 3 args.
*/
- if (objc != 3) {
+ if (objc == 3) {
+ result = _GetLsn(interp, objv[2], &lsn);
+ if (result == TCL_ERROR)
+ return (result);
+ lsnp = &lsn;
+ }
+ else if (objc != 2) {
Tcl_WrongNumArgs(interp, 2, objv, "lsn");
return (TCL_ERROR);
}
- result = _GetLsn(interp, objv[2], &lsn);
- if (result == TCL_ERROR)
- return (result);
-
_debug_check();
- ret = memp_sync(envp, &lsn);
- result = _ReturnSetup(interp, ret, "memp sync");
+ ret = envp->memp_sync(envp, lsnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp sync");
return (result);
}
@@ -132,8 +136,8 @@ tcl_MpTrickle(interp, objc, objv, envp)
return (result);
_debug_check();
- ret = memp_trickle(envp, percent, &pages);
- result = _ReturnSetup(interp, ret, "memp trickle");
+ ret = envp->memp_trickle(envp, percent, &pages);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp trickle");
if (result == TCL_ERROR)
return (result);
@@ -264,29 +268,39 @@ tcl_Mp(interp, objc, objv, envp, envip)
TCL_STATIC);
return (TCL_ERROR);
}
+
+ _debug_check();
+ if ((ret = envp->memp_fcreate(envp, &mpf, 0)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
+ _DeleteInfo(ip);
+ goto error;
+ }
+
/*
- * XXX finfop is NULL here. Interface currently doesn't
- * have all the stuff. Should expand interface.
+ * XXX
+ * Interface doesn't currently support DB_MPOOLFILE configuration.
*/
- _debug_check();
- ret = memp_fopen(envp, file, flag, mode, (size_t)pgsize, NULL, &mpf);
- if (ret != 0) {
- result = _ReturnSetup(interp, ret, "mpool");
+ if ((ret = mpf->open(mpf, file, flag, mode, (size_t)pgsize)) != 0) {
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mpool");
_DeleteInfo(ip);
- } else {
- /*
- * Success. Set up return. Set up new info
- * and command widget for this mpool.
- */
- envip->i_envmpid++;
- ip->i_parent = envip;
- ip->i_pgsz = pgsize;
- _SetInfoData(ip, mpf);
- Tcl_CreateObjCommand(interp, newname,
- (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL);
- res = Tcl_NewStringObj(newname, strlen(newname));
- Tcl_SetObjResult(interp, res);
+
+ (void)mpf->close(mpf, 0);
+ goto error;
}
+
+ /*
+ * Success. Set up return. Set up new info and command widget for
+ * this mpool.
+ */
+ envip->i_envmpid++;
+ ip->i_parent = envip;
+ ip->i_pgsz = pgsize;
+ _SetInfoData(ip, mpf);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mp_Cmd, (ClientData)mpf, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
error:
return (result);
}
@@ -320,8 +334,8 @@ tcl_MpStat(interp, objc, objv, envp)
return (TCL_ERROR);
}
_debug_check();
- ret = memp_stat(envp, &sp, &fsp, NULL);
- result = _ReturnSetup(interp, ret, "memp stat");
+ ret = envp->memp_stat(envp, &sp, &fsp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "memp stat");
if (result == TCL_ERROR)
return (result);
@@ -333,35 +347,48 @@ tcl_MpStat(interp, objc, objv, envp)
/*
* MAKE_STAT_LIST assumes 'res' and 'error' label.
*/
- MAKE_STAT_LIST("Region size", sp->st_regsize);
MAKE_STAT_LIST("Cache size (gbytes)", sp->st_gbytes);
MAKE_STAT_LIST("Cache size (bytes)", sp->st_bytes);
- MAKE_STAT_LIST("Cache hits", sp->st_cache_hit);
- MAKE_STAT_LIST("Cache misses", sp->st_cache_miss);
MAKE_STAT_LIST("Number of caches", sp->st_ncache);
+ MAKE_STAT_LIST("Region size", sp->st_regsize);
MAKE_STAT_LIST("Pages mapped into address space", sp->st_map);
+ MAKE_STAT_LIST("Cache hits", sp->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", sp->st_cache_miss);
MAKE_STAT_LIST("Pages created", sp->st_page_create);
MAKE_STAT_LIST("Pages read in", sp->st_page_in);
MAKE_STAT_LIST("Pages written", sp->st_page_out);
MAKE_STAT_LIST("Clean page evictions", sp->st_ro_evict);
MAKE_STAT_LIST("Dirty page evictions", sp->st_rw_evict);
+ MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle);
+ MAKE_STAT_LIST("Cached pages", sp->st_pages);
+ MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean);
+ MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty);
MAKE_STAT_LIST("Hash buckets", sp->st_hash_buckets);
MAKE_STAT_LIST("Hash lookups", sp->st_hash_searches);
MAKE_STAT_LIST("Longest hash chain found", sp->st_hash_longest);
MAKE_STAT_LIST("Hash elements examined", sp->st_hash_examined);
- MAKE_STAT_LIST("Cached clean pages", sp->st_page_clean);
- MAKE_STAT_LIST("Cached dirty pages", sp->st_page_dirty);
- MAKE_STAT_LIST("Dirty pages trickled", sp->st_page_trickle);
- MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Number of hash bucket nowaits", sp->st_hash_nowait);
+ MAKE_STAT_LIST("Number of hash bucket waits", sp->st_hash_wait);
+ MAKE_STAT_LIST("Maximum number of hash bucket waits",
+ sp->st_hash_max_wait);
MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
+ MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
+ MAKE_STAT_LIST("Page allocations", sp->st_alloc);
+ MAKE_STAT_LIST("Buckets examined during allocation",
+ sp->st_alloc_buckets);
+ MAKE_STAT_LIST("Maximum buckets examined during allocation",
+ sp->st_alloc_max_buckets);
+ MAKE_STAT_LIST("Pages examined during allocation", sp->st_alloc_pages);
+ MAKE_STAT_LIST("Maximum pages examined during allocation",
+ sp->st_alloc_max_pages);
+
/*
* Save global stat list as res1. The MAKE_STAT_LIST
* macro assumes 'res' so we'll use that to build up
* our per-file sublist.
*/
res1 = res;
- savefsp = fsp;
- for (; fsp != NULL && *fsp != NULL; fsp++) {
+ for (savefsp = fsp; fsp != NULL && *fsp != NULL; fsp++) {
res = Tcl_NewObj();
result = _SetListElem(interp, res, "File Name",
strlen("File Name"), (*fsp)->file_name,
@@ -369,16 +396,16 @@ tcl_MpStat(interp, objc, objv, envp)
if (result != TCL_OK)
goto error;
MAKE_STAT_LIST("Page size", (*fsp)->st_pagesize);
- MAKE_STAT_LIST("Cache Hits", (*fsp)->st_cache_hit);
- MAKE_STAT_LIST("Cache Misses", (*fsp)->st_cache_miss);
MAKE_STAT_LIST("Pages mapped into address space",
(*fsp)->st_map);
+ MAKE_STAT_LIST("Cache hits", (*fsp)->st_cache_hit);
+ MAKE_STAT_LIST("Cache misses", (*fsp)->st_cache_miss);
MAKE_STAT_LIST("Pages created", (*fsp)->st_page_create);
MAKE_STAT_LIST("Pages read in", (*fsp)->st_page_in);
MAKE_STAT_LIST("Pages written", (*fsp)->st_page_out);
/*
- * Now that we have a complete "per-file" stat
- * list, append that to the other list.
+ * Now that we have a complete "per-file" stat list, append
+ * that to the other list.
*/
result = Tcl_ListObjAppendElement(interp, res1, res);
if (result != TCL_OK)
@@ -386,9 +413,9 @@ tcl_MpStat(interp, objc, objv, envp)
}
Tcl_SetObjResult(interp, res1);
error:
- __os_free(sp, sizeof(*sp));
+ free(sp);
if (savefsp != NULL)
- __os_free(savefsp, 0);
+ free(savefsp);
return (result);
}
@@ -398,17 +425,21 @@ error:
*/
static int
mp_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Mp handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Mp handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *mpcmds[] = {
- "close", "fsync", "get",
+ "close",
+ "fsync",
+ "get",
NULL
};
enum mpcmds {
- MPCLOSE, MPFSYNC, MPGET
+ MPCLOSE,
+ MPFSYNC,
+ MPGET
};
DB_MPOOLFILE *mp;
int cmdindex, length, result, ret;
@@ -447,8 +478,9 @@ mp_Cmd(clientData, interp, objc, objv)
return (TCL_ERROR);
}
_debug_check();
- ret = memp_fclose(mp);
- result = _ReturnSetup(interp, ret, "mp close");
+ ret = mp->close(mp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "mp close");
_MpInfoDelete(interp, mpip);
(void)Tcl_DeleteCommand(interp, mpip->i_name);
_DeleteInfo(mpip);
@@ -459,7 +491,7 @@ mp_Cmd(clientData, interp, objc, objv)
return (TCL_ERROR);
}
_debug_check();
- ret = memp_fsync(mp);
+ ret = mp->sync(mp);
res = Tcl_NewIntObj(ret);
break;
case MPGET:
@@ -487,11 +519,15 @@ tcl_MpGet(interp, objc, objv, mp, mpip)
DBTCL_INFO *mpip; /* mp info pointer */
{
static char *mpget[] = {
- "-create", "-last", "-new",
+ "-create",
+ "-last",
+ "-new",
NULL
};
enum mpget {
- MPGET_CREATE, MPGET_LAST, MPGET_NEW
+ MPGET_CREATE,
+ MPGET_LAST,
+ MPGET_NEW
};
DBTCL_INFO *ip;
@@ -559,8 +595,8 @@ tcl_MpGet(interp, objc, objv, mp, mpip)
}
_debug_check();
pgno = ipgno;
- ret = memp_fget(mp, &pgno, flag, &page);
- result = _ReturnSetup(interp, ret, "mpool get");
+ ret = mp->get(mp, &pgno, flag, &page);
+ result = _ReturnSetup(interp, ret, DB_RETOK_MPGET(ret), "mpool get");
if (result == TCL_ERROR)
_DeleteInfo(ip);
else {
@@ -588,10 +624,10 @@ error:
*/
static int
pg_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Page handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Page handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *pgcmds[] = {
"init",
@@ -648,7 +684,7 @@ pg_Cmd(clientData, interp, objc, objv)
res = NULL;
switch ((enum pgcmds)cmdindex) {
case PGNUM:
- res = Tcl_NewIntObj(pgip->i_pgno);
+ res = Tcl_NewLongObj((long)pgip->i_pgno);
break;
case PGSIZE:
res = Tcl_NewLongObj(pgip->i_pgsz);
@@ -685,11 +721,15 @@ tcl_Pg(interp, objc, objv, page, mp, pgip, putop)
int putop; /* Operation */
{
static char *pgopt[] = {
- "-clean", "-dirty", "-discard",
+ "-clean",
+ "-dirty",
+ "-discard",
NULL
};
enum pgopt {
- PGCLEAN, PGDIRTY, PGDISCARD
+ PGCLEAN,
+ PGDIRTY,
+ PGDISCARD
};
u_int32_t flag;
int i, optindex, result, ret;
@@ -717,11 +757,11 @@ tcl_Pg(interp, objc, objv, page, mp, pgip, putop)
_debug_check();
if (putop)
- ret = memp_fput(mp, page, flag);
+ ret = mp->put(mp, page, flag);
else
- ret = memp_fset(mp, page, flag);
+ ret = mp->set(mp, page, flag);
- result = _ReturnSetup(interp, ret, "page");
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "page");
if (putop) {
(void)Tcl_DeleteCommand(interp, pgip->i_name);
@@ -756,7 +796,8 @@ tcl_PgInit(interp, objc, objv, page, pgip)
s = Tcl_GetByteArrayFromObj(objv[2], &length);
if (s == NULL)
return (TCL_ERROR);
- memcpy(page, s, ((size_t)length < pgsz) ? length : pgsz);
+ memcpy(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz);
result = TCL_OK;
} else {
p = (long *)page;
@@ -795,8 +836,8 @@ tcl_PgIsset(interp, objc, objv, page, pgip)
return (TCL_ERROR);
result = TCL_OK;
- if (memcmp(page,
- s, ((size_t)length < pgsz) ? length : pgsz ) != 0) {
+ if (memcmp(page, s,
+ ((size_t)length < pgsz) ? (size_t)length : pgsz ) != 0) {
res = Tcl_NewIntObj(0);
Tcl_SetObjResult(interp, res);
return (result);
@@ -820,3 +861,4 @@ tcl_PgIsset(interp, objc, objv, page, pgip)
Tcl_SetObjResult(interp, res);
return (result);
}
+#endif
diff --git a/bdb/tcl/tcl_rep.c b/bdb/tcl/tcl_rep.c
new file mode 100644
index 00000000000..c72c9971338
--- /dev/null
+++ b/bdb/tcl/tcl_rep.c
@@ -0,0 +1,405 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_rep.c,v 11.85 2002/08/06 04:45:44 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+#if CONFIG_TEST
+/*
+ * tcl_RepElect --
+ * Call DB_ENV->rep_elect().
+ *
+ * PUBLIC: int tcl_RepElect
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepElect(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int eid, nsites, pri, result, ret;
+ u_int32_t timeout;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "nsites pri timeout");
+ return (TCL_ERROR);
+ }
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &nsites)) != TCL_OK)
+ return (result);
+ if ((result = Tcl_GetIntFromObj(interp, objv[3], &pri)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[4], &timeout)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->rep_elect(dbenv, nsites, pri, timeout, &eid)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env rep_elect"));
+
+ Tcl_SetObjResult(interp, Tcl_NewIntObj(eid));
+
+ return (TCL_OK);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepFlush --
+ * Call DB_ENV->rep_flush().
+ *
+ * PUBLIC: int tcl_RepFlush
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepFlush(interp, objc, objv, dbenv)
+ Tcl_Interp *interp;
+ int objc;
+ Tcl_Obj *CONST objv[];
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, "");
+ return TCL_ERROR;
+ }
+
+ _debug_check();
+ ret = dbenv->rep_flush(dbenv);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_flush"));
+}
+#endif
+#if CONFIG_TEST
+/*
+ * tcl_RepLimit --
+ * Call DB_ENV->set_rep_limit().
+ *
+ * PUBLIC: int tcl_RepLimit
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepLimit(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t bytes, gbytes;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "gbytes bytes");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &gbytes)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &bytes)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_limit(dbenv, gbytes, bytes)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_limit"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_limit"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepRequest --
+ * Call DB_ENV->set_rep_request().
+ *
+ * PUBLIC: int tcl_RepRequest
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepRequest(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ int result, ret;
+ u_int32_t min, max;
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 4, objv, "min max");
+ return (TCL_ERROR);
+ }
+
+ if ((result = _GetUInt32(interp, objv[2], &min)) != TCL_OK)
+ return (result);
+ if ((result = _GetUInt32(interp, objv[3], &max)) != TCL_OK)
+ return (result);
+
+ _debug_check();
+ if ((ret = dbenv->set_rep_request(dbenv, min, max)) != 0)
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "env set_rep_request"));
+
+ return (_ReturnSetup(interp,
+ ret, DB_RETOK_STD(ret), "env set_rep_request"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStart --
+ * Call DB_ENV->rep_start().
+ *
+ * PUBLIC: int tcl_RepStart
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ *
+ * Note that this normally can/should be achieved as an argument to
+ * berkdb env, but we need to test forcible upgrading of clients, which
+ * involves calling this on an open environment handle.
+ */
+int
+tcl_RepStart(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ static char *tclrpstrt[] = {
+ "-client",
+ "-master",
+ NULL
+ };
+ enum tclrpstrt {
+ TCL_RPSTRT_CLIENT,
+ TCL_RPSTRT_MASTER
+ };
+ char *arg;
+ int i, optindex, ret;
+ u_int32_t flag;
+
+ flag = 0;
+
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 3, objv, "[-master/-client]");
+ return (TCL_ERROR);
+ }
+
+ i = 2;
+ while (i < objc) {
+ if (Tcl_GetIndexFromObj(interp, objv[i], tclrpstrt,
+ "option", TCL_EXACT, &optindex) != TCL_OK) {
+ arg = Tcl_GetStringFromObj(objv[i], NULL);
+ if (arg[0] == '-')
+ return (IS_HELP(objv[i]));
+ else
+ Tcl_ResetResult(interp);
+ break;
+ }
+ i++;
+ switch ((enum tclrpstrt)optindex) {
+ case TCL_RPSTRT_CLIENT:
+ flag |= DB_REP_CLIENT;
+ break;
+ case TCL_RPSTRT_MASTER:
+ flag |= DB_REP_MASTER;
+ break;
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_start(dbenv, NULL, flag);
+ return (_ReturnSetup(interp, ret, DB_RETOK_STD(ret), "env rep_start"));
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepProcessMessage --
+ * Call DB_ENV->rep_process_message().
+ *
+ * PUBLIC: int tcl_RepProcessMessage
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepProcessMessage(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv; /* Environment pointer */
+{
+ DBT control, rec;
+ Tcl_Obj *res;
+ void *ctmp, *rtmp;
+ int eid;
+ int freectl, freerec, result, ret;
+
+ if (objc != 5) {
+ Tcl_WrongNumArgs(interp, 5, objv, "id control rec");
+ return (TCL_ERROR);
+ }
+ freectl = freerec = 0;
+
+ memset(&control, 0, sizeof(control));
+ memset(&rec, 0, sizeof(rec));
+
+ if ((result = Tcl_GetIntFromObj(interp, objv[2], &eid)) != TCL_OK)
+ return (result);
+
+ ret = _CopyObjBytes(interp, objv[3], &ctmp,
+ &control.size, &freectl);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ return (result);
+ }
+ control.data = ctmp;
+ ret = _CopyObjBytes(interp, objv[4], &rtmp,
+ &rec.size, &freerec);
+ if (ret != 0) {
+ result = _ReturnSetup(interp, ret,
+ DB_RETOK_REPPMSG(ret), "rep_proc_msg");
+ goto out;
+ }
+ rec.data = rtmp;
+ _debug_check();
+ ret = dbenv->rep_process_message(dbenv, &control, &rec, &eid);
+ result = _ReturnSetup(interp, ret, DB_RETOK_REPPMSG(ret),
+ "env rep_process_message");
+
+ /*
+ * If we have a new master, return its environment ID.
+ *
+ * XXX
+ * We should do something prettier to differentiate success
+ * from an env ID, and figure out how to represent HOLDELECTION.
+ */
+ if (result == TCL_OK && ret == DB_REP_NEWMASTER) {
+ res = Tcl_NewIntObj(eid);
+ Tcl_SetObjResult(interp, res);
+ }
+out:
+ if (freectl)
+ (void)__os_free(NULL, ctmp);
+ if (freerec)
+ (void)__os_free(NULL, rtmp);
+
+ return (result);
+}
+#endif
+
+#if CONFIG_TEST
+/*
+ * tcl_RepStat --
+ * Call DB_ENV->rep_stat().
+ *
+ * PUBLIC: int tcl_RepStat
+ * PUBLIC: __P((Tcl_Interp *, int, Tcl_Obj * CONST *, DB_ENV *));
+ */
+int
+tcl_RepStat(interp, objc, objv, dbenv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *dbenv;
+{
+ DB_REP_STAT *sp;
+ Tcl_Obj *myobjv[2], *res, *thislist, *lsnlist;
+ u_int32_t flag;
+ int myobjc, result, ret;
+ char *arg;
+
+ result = TCL_OK;
+ flag = 0;
+
+ if (objc > 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ if (objc == 3) {
+ arg = Tcl_GetStringFromObj(objv[2], NULL);
+ if (strcmp(arg, "-clear") == 0)
+ flag = DB_STAT_CLEAR;
+ else {
+ Tcl_SetResult(interp,
+ "db stat: unknown arg", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ }
+
+ _debug_check();
+ ret = dbenv->rep_stat(dbenv, &sp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "rep stat");
+ if (result == TCL_ERROR)
+ return (result);
+
+ /*
+ * Have our stats, now construct the name value
+ * list pairs and free up the memory.
+ */
+ res = Tcl_NewObj();
+ /*
+ * MAKE_STAT_* assumes 'res' and 'error' label.
+ */
+ MAKE_STAT_LSN("Next LSN expected", &sp->st_next_lsn);
+ MAKE_STAT_LSN("First missed LSN", &sp->st_waiting_lsn);
+ MAKE_STAT_LIST("Duplicate master conditions", sp->st_dupmasters);
+ MAKE_STAT_LIST("Environment ID", sp->st_env_id);
+ MAKE_STAT_LIST("Environment priority", sp->st_env_priority);
+ MAKE_STAT_LIST("Generation number", sp->st_gen);
+ MAKE_STAT_LIST("Duplicate log records received", sp->st_log_duplicated);
+ MAKE_STAT_LIST("Current log records queued", sp->st_log_queued);
+ MAKE_STAT_LIST("Maximum log records queued", sp->st_log_queued_max);
+ MAKE_STAT_LIST("Total log records queued", sp->st_log_queued_total);
+ MAKE_STAT_LIST("Log records received", sp->st_log_records);
+ MAKE_STAT_LIST("Log records requested", sp->st_log_requested);
+ MAKE_STAT_LIST("Master environment ID", sp->st_master);
+ MAKE_STAT_LIST("Master changes", sp->st_master_changes);
+ MAKE_STAT_LIST("Messages with bad generation number",
+ sp->st_msgs_badgen);
+ MAKE_STAT_LIST("Messages processed", sp->st_msgs_processed);
+ MAKE_STAT_LIST("Messages ignored for recovery", sp->st_msgs_recover);
+ MAKE_STAT_LIST("Message send failures", sp->st_msgs_send_failures);
+ MAKE_STAT_LIST("Messages sent", sp->st_msgs_sent);
+ MAKE_STAT_LIST("New site messages", sp->st_newsites);
+ MAKE_STAT_LIST("Transmission limited", sp->st_nthrottles);
+ MAKE_STAT_LIST("Outdated conditions", sp->st_outdated);
+ MAKE_STAT_LIST("Transactions applied", sp->st_txns_applied);
+ MAKE_STAT_LIST("Elections held", sp->st_elections);
+ MAKE_STAT_LIST("Elections won", sp->st_elections_won);
+ MAKE_STAT_LIST("Election phase", sp->st_election_status);
+ MAKE_STAT_LIST("Election winner", sp->st_election_cur_winner);
+ MAKE_STAT_LIST("Election generation number", sp->st_election_gen);
+ MAKE_STAT_LSN("Election max LSN", &sp->st_election_lsn);
+ MAKE_STAT_LIST("Election sites", sp->st_election_nsites);
+ MAKE_STAT_LIST("Election priority", sp->st_election_priority);
+ MAKE_STAT_LIST("Election tiebreaker", sp->st_election_tiebreaker);
+ MAKE_STAT_LIST("Election votes", sp->st_election_votes);
+
+ Tcl_SetObjResult(interp, res);
+error:
+ free(sp);
+ return (result);
+}
+#endif
diff --git a/bdb/tcl/tcl_txn.c b/bdb/tcl/tcl_txn.c
index dfe6b6cf60f..b5fab637943 100644
--- a/bdb/tcl/tcl_txn.c
+++ b/bdb/tcl/tcl_txn.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1999, 2000
+ * Copyright (c) 1999-2001
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: tcl_txn.c,v 11.24 2000/12/31 19:26:23 bostic Exp $";
+static const char revid[] = "$Id: tcl_txn.c,v 11.57 2002/08/06 06:21:36 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -20,13 +20,11 @@ static const char revid[] = "$Id: tcl_txn.c,v 11.24 2000/12/31 19:26:23 bostic E
#endif
#include "db_int.h"
-#include "tcl_db.h"
+#include "dbinc/tcl_db.h"
-/*
- * Prototypes for procedures defined later in this file:
- */
-static int tcl_TxnCommit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
- DB_TXN *, DBTCL_INFO *));
+static int tcl_TxnCommit __P((Tcl_Interp *,
+ int, Tcl_Obj * CONST *, DB_TXN *, DBTCL_INFO *));
+static int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST *));
/*
* _TxnInfoDelete --
@@ -39,7 +37,7 @@ static int tcl_TxnCommit __P((Tcl_Interp *, int, Tcl_Obj * CONST*,
*/
void
_TxnInfoDelete(interp, txnip)
- Tcl_Interp *interp; /* Interpreter */
+ Tcl_Interp *interp; /* Interpreter */
DBTCL_INFO *txnip; /* Info for txn */
{
DBTCL_INFO *nextp, *p;
@@ -115,8 +113,9 @@ tcl_TxnCheckpoint(interp, objc, objv, envp)
}
}
_debug_check();
- ret = txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, 0);
- result = _ReturnSetup(interp, ret, "txn checkpoint");
+ ret = envp->txn_checkpoint(envp, (u_int32_t)kb, (u_int32_t)min, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn checkpoint");
return (result);
}
@@ -135,6 +134,11 @@ tcl_Txn(interp, objc, objv, envp, envip)
DBTCL_INFO *envip; /* Info pointer */
{
static char *txnopts[] = {
+#if CONFIG_TEST
+ "-dirty",
+ "-lock_timeout",
+ "-txn_timeout",
+#endif
"-nosync",
"-nowait",
"-parent",
@@ -142,16 +146,22 @@ tcl_Txn(interp, objc, objv, envp, envip)
NULL
};
enum txnopts {
- TXN_NOSYNC,
- TXN_NOWAIT,
- TXN_PARENT,
- TXN_SYNC
+#if CONFIG_TEST
+ TXNDIRTY,
+ TXN_LOCK_TIMEOUT,
+ TXN_TIMEOUT,
+#endif
+ TXNNOSYNC,
+ TXNNOWAIT,
+ TXNPARENT,
+ TXNSYNC
};
DBTCL_INFO *ip;
DB_TXN *parent;
DB_TXN *txn;
Tcl_Obj *res;
- u_int32_t flag;
+ db_timeout_t lk_time, tx_time;
+ u_int32_t flag, lk_timeflag, tx_timeflag;
int i, optindex, result, ret;
char *arg, msg[MSG_SIZE], newname[MSG_SIZE];
@@ -160,6 +170,7 @@ tcl_Txn(interp, objc, objv, envp, envip)
parent = NULL;
flag = 0;
+ lk_timeflag = tx_timeflag = 0;
i = 2;
while (i < objc) {
if (Tcl_GetIndexFromObj(interp, objv[i],
@@ -168,7 +179,37 @@ tcl_Txn(interp, objc, objv, envp, envip)
}
i++;
switch ((enum txnopts)optindex) {
- case TXN_PARENT:
+#ifdef CONFIG_TEST
+ case TXNDIRTY:
+ flag |= DB_DIRTY_READ;
+ break;
+ case TXN_LOCK_TIMEOUT:
+ lk_timeflag = DB_SET_LOCK_TIMEOUT;
+ goto getit;
+ case TXN_TIMEOUT:
+ tx_timeflag = DB_SET_TXN_TIMEOUT;
+getit:
+ if (i >= objc) {
+ Tcl_WrongNumArgs(interp, 2, objv,
+ "?-txn_timestamp time?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[i++],
+ (long *)(optindex == TXN_LOCK_TIMEOUT ?
+ &lk_time : &tx_time));
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ break;
+#endif
+ case TXNNOSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOSYNC;
+ break;
+ case TXNNOWAIT:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
+ flag |= DB_TXN_NOWAIT;
+ break;
+ case TXNPARENT:
if (i == objc) {
Tcl_WrongNumArgs(interp, 2, objv,
"?-parent txn?");
@@ -185,18 +226,10 @@ tcl_Txn(interp, objc, objv, envp, envip)
return (TCL_ERROR);
}
break;
- case TXN_NOWAIT:
- FLAG_CHECK(flag);
- flag |= DB_TXN_NOWAIT;
- break;
- case TXN_SYNC:
- FLAG_CHECK(flag);
+ case TXNSYNC:
+ FLAG_CHECK2(flag, DB_DIRTY_READ);
flag |= DB_TXN_SYNC;
break;
- case TXN_NOSYNC:
- FLAG_CHECK(flag);
- flag |= DB_TXN_NOSYNC;
- break;
}
}
snprintf(newname, sizeof(newname), "%s.txn%d",
@@ -208,8 +241,9 @@ tcl_Txn(interp, objc, objv, envp, envip)
return (TCL_ERROR);
}
_debug_check();
- ret = txn_begin(envp, parent, &txn, flag);
- result = _ReturnSetup(interp, ret, "txn");
+ ret = envp->txn_begin(envp, parent, &txn, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn");
if (result == TCL_ERROR)
_DeleteInfo(ip);
else {
@@ -227,6 +261,24 @@ tcl_Txn(interp, objc, objv, envp, envip)
(Tcl_ObjCmdProc *)txn_Cmd, (ClientData)txn, NULL);
res = Tcl_NewStringObj(newname, strlen(newname));
Tcl_SetObjResult(interp, res);
+ if (tx_timeflag != 0) {
+ ret = txn->set_timeout(txn, tx_time, tx_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
+ if (lk_timeflag != 0) {
+ ret = txn->set_timeout(txn, lk_time, lk_timeflag);
+ if (ret != 0) {
+ result =
+ _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "set_timeout");
+ _DeleteInfo(ip);
+ }
+ }
}
return (result);
}
@@ -244,21 +296,6 @@ tcl_TxnStat(interp, objc, objv, envp)
Tcl_Obj *CONST objv[]; /* The argument objects */
DB_ENV *envp; /* Environment pointer */
{
-#define MAKE_STAT_LSN(s, lsn) \
-do { \
- myobjc = 2; \
- myobjv[0] = Tcl_NewIntObj((lsn)->file); \
- myobjv[1] = Tcl_NewIntObj((lsn)->offset); \
- lsnlist = Tcl_NewListObj(myobjc, myobjv); \
- myobjc = 2; \
- myobjv[0] = Tcl_NewStringObj((s), strlen(s)); \
- myobjv[1] = lsnlist; \
- thislist = Tcl_NewListObj(myobjc, myobjv); \
- result = Tcl_ListObjAppendElement(interp, res, thislist); \
- if (result != TCL_OK) \
- goto error; \
-} while (0);
-
DBTCL_INFO *ip;
DB_TXN_ACTIVE *p;
DB_TXN_STAT *sp;
@@ -275,8 +312,9 @@ do { \
return (TCL_ERROR);
}
_debug_check();
- ret = txn_stat(envp, &sp, NULL);
- result = _ReturnSetup(interp, ret, "txn stat");
+ ret = envp->txn_stat(envp, &sp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn stat");
if (result == TCL_ERROR)
return (result);
@@ -290,14 +328,15 @@ do { \
*/
MAKE_STAT_LIST("Region size", sp->st_regsize);
MAKE_STAT_LSN("LSN of last checkpoint", &sp->st_last_ckp);
- MAKE_STAT_LSN("LSN of pending checkpoint", &sp->st_pending_ckp);
MAKE_STAT_LIST("Time of last checkpoint", sp->st_time_ckp);
MAKE_STAT_LIST("Last txn ID allocated", sp->st_last_txnid);
MAKE_STAT_LIST("Max Txns", sp->st_maxtxns);
MAKE_STAT_LIST("Number aborted txns", sp->st_naborts);
MAKE_STAT_LIST("Number active txns", sp->st_nactive);
+ MAKE_STAT_LIST("Maximum active txns", sp->st_maxnactive);
MAKE_STAT_LIST("Number txns begun", sp->st_nbegins);
MAKE_STAT_LIST("Number committed txns", sp->st_ncommits);
+ MAKE_STAT_LIST("Number restored txns", sp->st_nrestores);
MAKE_STAT_LIST("Number of region lock waits", sp->st_region_wait);
MAKE_STAT_LIST("Number of region lock nowaits", sp->st_region_nowait);
for (i = 0, p = sp->st_txnarray; i < sp->st_nactive; i++, p++)
@@ -306,7 +345,7 @@ do { \
if (ip->i_type != I_TXN)
continue;
if (ip->i_type == I_TXN &&
- (txn_id(ip->i_txnp) == p->txnid)) {
+ (ip->i_txnp->id(ip->i_txnp) == p->txnid)) {
MAKE_STAT_LSN(ip->i_name, &p->lsn);
if (p->parentid != 0)
MAKE_STAT_STRLIST("Parent",
@@ -318,40 +357,78 @@ do { \
}
Tcl_SetObjResult(interp, res);
error:
- __os_free(sp, sizeof(*sp));
+ free(sp);
return (result);
}
/*
- * txn_Cmd --
- * Implements the "txn" widget.
+ * tcl_TxnTimeout --
*
- * PUBLIC: int txn_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+ * PUBLIC: int tcl_TxnTimeout __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *));
*/
int
+tcl_TxnTimeout(interp, objc, objv, envp)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+{
+ long timeout;
+ int result, ret;
+
+ /*
+ * One arg, the timeout.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "?timeout?");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetLongFromObj(interp, objv[2], &timeout);
+ if (result != TCL_OK)
+ return (result);
+ _debug_check();
+ ret = envp->set_timeout(envp, (u_int32_t)timeout, DB_SET_TXN_TIMEOUT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "lock timeout");
+ return (result);
+}
+
+/*
+ * txn_Cmd --
+ * Implements the "txn" widget.
+ */
+static int
txn_Cmd(clientData, interp, objc, objv)
- ClientData clientData; /* Txn handle */
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ ClientData clientData; /* Txn handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
{
static char *txncmds[] = {
- "abort",
- "commit",
+#if CONFIG_TEST
+ "discard",
"id",
"prepare",
+#endif
+ "abort",
+ "commit",
NULL
};
enum txncmds {
- TXNABORT,
- TXNCOMMIT,
+#if CONFIG_TEST
+ TXNDISCARD,
TXNID,
- TXNPREPARE
+ TXNPREPARE,
+#endif
+ TXNABORT,
+ TXNCOMMIT
};
DBTCL_INFO *txnip;
DB_TXN *txnp;
Tcl_Obj *res;
int cmdindex, result, ret;
+ u_int8_t *gid;
Tcl_ResetResult(interp);
txnp = (DB_TXN *)clientData;
@@ -376,38 +453,64 @@ txn_Cmd(clientData, interp, objc, objv)
res = NULL;
switch ((enum txncmds)cmdindex) {
+#if CONFIG_TEST
+ case TXNDISCARD:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = txnp->discard(txnp, 0);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn discard");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
case TXNID:
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return (TCL_ERROR);
}
_debug_check();
- ret = txn_id(txnp);
+ ret = txnp->id(txnp);
res = Tcl_NewIntObj(ret);
break;
case TXNPREPARE:
- if (objc != 2) {
+ if (objc != 3) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return (TCL_ERROR);
}
_debug_check();
- ret = txn_prepare(txnp);
- result = _ReturnSetup(interp, ret, "txn prepare");
- break;
- case TXNCOMMIT:
- result = tcl_TxnCommit(interp, objc, objv, txnp, txnip);
+ gid = (u_int8_t *)Tcl_GetByteArrayFromObj(objv[2], NULL);
+ ret = txnp->prepare(txnp, gid);
+ /*
+ * !!!
+ * DB_TXN->prepare commits all outstanding children. But it
+ * does NOT destroy the current txn handle. So, we must call
+ * _TxnInfoDelete to recursively remove all nested txn handles,
+ * we do not call _DeleteInfo on ourselves.
+ */
_TxnInfoDelete(interp, txnip);
- (void)Tcl_DeleteCommand(interp, txnip->i_name);
- _DeleteInfo(txnip);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn prepare");
break;
+#endif
case TXNABORT:
if (objc != 2) {
Tcl_WrongNumArgs(interp, 1, objv, NULL);
return (TCL_ERROR);
}
_debug_check();
- ret = txn_abort(txnp);
- result = _ReturnSetup(interp, ret, "txn abort");
+ ret = txnp->abort(txnp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn abort");
+ _TxnInfoDelete(interp, txnip);
+ (void)Tcl_DeleteCommand(interp, txnip->i_name);
+ _DeleteInfo(txnip);
+ break;
+ case TXNCOMMIT:
+ result = tcl_TxnCommit(interp, objc, objv, txnp, txnip);
_TxnInfoDelete(interp, txnip);
(void)Tcl_DeleteCommand(interp, txnip->i_name);
_DeleteInfo(txnip);
@@ -424,9 +527,9 @@ txn_Cmd(clientData, interp, objc, objv)
static int
tcl_TxnCommit(interp, objc, objv, txnp, txnip)
- Tcl_Interp *interp; /* Interpreter */
- int objc; /* How many arguments? */
- Tcl_Obj *CONST objv[]; /* The argument objects */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
DB_TXN *txnp; /* Transaction pointer */
DBTCL_INFO *txnip; /* Info pointer */
{
@@ -467,7 +570,88 @@ tcl_TxnCommit(interp, objc, objv, txnp, txnip)
}
_debug_check();
- ret = txn_commit(txnp, flag);
- result = _ReturnSetup(interp, ret, "txn commit");
+ ret = txnp->commit(txnp, flag);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn commit");
+ return (result);
+}
+
+#if CONFIG_TEST
+/*
+ * tcl_TxnRecover --
+ *
+ * PUBLIC: int tcl_TxnRecover __P((Tcl_Interp *, int,
+ * PUBLIC: Tcl_Obj * CONST*, DB_ENV *, DBTCL_INFO *));
+ */
+int
+tcl_TxnRecover(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+#define DO_PREPLIST(count) \
+for (i = 0; i < count; i++) { \
+ snprintf(newname, sizeof(newname), "%s.txn%d", \
+ envip->i_name, envip->i_envtxnid); \
+ ip = _NewInfo(interp, NULL, newname, I_TXN); \
+ if (ip == NULL) { \
+ Tcl_SetResult(interp, "Could not set up info", \
+ TCL_STATIC); \
+ return (TCL_ERROR); \
+ } \
+ envip->i_envtxnid++; \
+ ip->i_parent = envip; \
+ p = &prep[i]; \
+ _SetInfoData(ip, p->txn); \
+ Tcl_CreateObjCommand(interp, newname, \
+ (Tcl_ObjCmdProc *)txn_Cmd, (ClientData)p->txn, NULL); \
+ result = _SetListElem(interp, res, newname, strlen(newname), \
+ p->gid, DB_XIDDATASIZE); \
+ if (result != TCL_OK) \
+ goto error; \
+}
+
+ DBTCL_INFO *ip;
+ DB_PREPLIST prep[DBTCL_PREP], *p;
+ Tcl_Obj *res;
+ long count, i;
+ int result, ret;
+ char newname[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * No args for this. Error if there are some.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ ret = envp->txn_recover(envp, prep, DBTCL_PREP, &count, DB_FIRST);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ res = Tcl_NewObj();
+ DO_PREPLIST(count);
+
+ /*
+ * If count returned is the maximum size we have, then there
+ * might be more. Keep going until we get them all.
+ */
+ while (count == DBTCL_PREP) {
+ ret = envp->txn_recover(
+ envp, prep, DBTCL_PREP, &count, DB_NEXT);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret),
+ "txn recover");
+ if (result == TCL_ERROR)
+ return (result);
+ DO_PREPLIST(count);
+ }
+ Tcl_SetObjResult(interp, res);
+error:
return (result);
}
+#endif
diff --git a/bdb/tcl/tcl_util.c b/bdb/tcl/tcl_util.c
new file mode 100644
index 00000000000..3c0665f9e38
--- /dev/null
+++ b/bdb/tcl/tcl_util.c
@@ -0,0 +1,381 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: tcl_util.c,v 11.35 2002/08/06 06:21:42 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <tcl.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/tcl_db.h"
+
+/*
+ * Prototypes for procedures defined later in this file:
+ */
+static int mutex_Cmd __P((ClientData, Tcl_Interp *, int, Tcl_Obj * CONST*));
+
+/*
+ * bdb_RandCommand --
+ * Implements rand* functions.
+ *
+ * PUBLIC: int bdb_RandCommand __P((Tcl_Interp *, int, Tcl_Obj * CONST*));
+ */
+int
+bdb_RandCommand(interp, objc, objv)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *rcmds[] = {
+ "rand", "random_int", "srand",
+ NULL
+ };
+ enum rcmds {
+ RRAND, RRAND_INT, RSRAND
+ };
+ long t;
+ int cmdindex, hi, lo, result, ret;
+ Tcl_Obj *res;
+ char msg[MSG_SIZE];
+
+ result = TCL_OK;
+ /*
+ * Get the command name index from the object based on the cmds
+ * defined above. This SHOULD NOT fail because we already checked
+ * in the 'berkdb' command.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], rcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum rcmds)cmdindex) {
+ case RRAND:
+ /*
+ * Must be 0 args. Error if different.
+ */
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 2, objv, NULL);
+ return (TCL_ERROR);
+ }
+ ret = rand();
+ res = Tcl_NewIntObj(ret);
+ break;
+ case RRAND_INT:
+ /*
+ * Must be 4 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "lo hi");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &hi);
+ if (result == TCL_OK) {
+#ifndef RAND_MAX
+#define RAND_MAX 0x7fffffff
+#endif
+ t = rand();
+ if (t > RAND_MAX) {
+ snprintf(msg, MSG_SIZE,
+ "Max random is higher than %ld\n",
+ (long)RAND_MAX);
+ Tcl_SetResult(interp, msg, TCL_VOLATILE);
+ result = TCL_ERROR;
+ break;
+ }
+ _debug_check();
+ ret = (int)(((double)t / ((double)(RAND_MAX) + 1)) *
+ (hi - lo + 1));
+ ret += lo;
+ res = Tcl_NewIntObj(ret);
+ }
+ break;
+ case RSRAND:
+ /*
+ * Must be 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "seed");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &lo);
+ if (result == TCL_OK) {
+ srand((u_int)lo);
+ res = Tcl_NewIntObj(0);
+ }
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
+
+/*
+ *
+ * tcl_Mutex --
+ * Opens an env mutex.
+ *
+ * PUBLIC: int tcl_Mutex __P((Tcl_Interp *, int, Tcl_Obj * CONST*, DB_ENV *,
+ * PUBLIC: DBTCL_INFO *));
+ */
+int
+tcl_Mutex(interp, objc, objv, envp, envip)
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+ DB_ENV *envp; /* Environment pointer */
+ DBTCL_INFO *envip; /* Info pointer */
+{
+ DBTCL_INFO *ip;
+ Tcl_Obj *res;
+ _MUTEX_DATA *md;
+ int i, mode, nitems, result, ret;
+ char newname[MSG_SIZE];
+
+ md = NULL;
+ result = TCL_OK;
+ mode = nitems = ret = 0;
+ memset(newname, 0, MSG_SIZE);
+
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "mode nitems");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &mode);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+ result = Tcl_GetIntFromObj(interp, objv[3], &nitems);
+ if (result != TCL_OK)
+ return (TCL_ERROR);
+
+ snprintf(newname, sizeof(newname),
+ "%s.mutex%d", envip->i_name, envip->i_envmutexid);
+ ip = _NewInfo(interp, NULL, newname, I_MUTEX);
+ if (ip == NULL) {
+ Tcl_SetResult(interp, "Could not set up info",
+ TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ /*
+ * Set up mutex.
+ */
+ /*
+ * Map in the region.
+ *
+ * XXX
+ * We don't bother doing this "right", i.e., using the shalloc
+ * functions, just grab some memory knowing that it's correctly
+ * aligned.
+ */
+ _debug_check();
+ if (__os_calloc(NULL, 1, sizeof(_MUTEX_DATA), &md) != 0)
+ goto posixout;
+ md->env = envp;
+ md->n_mutex = nitems;
+ md->size = sizeof(_MUTEX_ENTRY) * nitems;
+
+ md->reginfo.type = REGION_TYPE_MUTEX;
+ md->reginfo.id = INVALID_REGION_TYPE;
+ md->reginfo.mode = mode;
+ md->reginfo.flags = REGION_CREATE_OK | REGION_JOIN_OK;
+ if ((ret = __db_r_attach(envp, &md->reginfo, md->size)) != 0)
+ goto posixout;
+ md->marray = md->reginfo.addr;
+
+ /* Initialize a created region. */
+ if (F_ISSET(&md->reginfo, REGION_CREATE))
+ for (i = 0; i < nitems; i++) {
+ md->marray[i].val = 0;
+ if ((ret = __db_mutex_init_int(envp,
+ &md->marray[i].m, i, 0)) != 0)
+ goto posixout;
+ }
+ R_UNLOCK(envp, &md->reginfo);
+
+ /*
+ * Success. Set up return. Set up new info
+ * and command widget for this mutex.
+ */
+ envip->i_envmutexid++;
+ ip->i_parent = envip;
+ _SetInfoData(ip, md);
+ Tcl_CreateObjCommand(interp, newname,
+ (Tcl_ObjCmdProc *)mutex_Cmd, (ClientData)md, NULL);
+ res = Tcl_NewStringObj(newname, strlen(newname));
+ Tcl_SetObjResult(interp, res);
+
+ return (TCL_OK);
+
+posixout:
+ if (ret > 0)
+ Tcl_PosixError(interp);
+ result = _ReturnSetup(interp, ret, DB_RETOK_STD(ret), "mutex");
+ _DeleteInfo(ip);
+
+ if (md != NULL) {
+ if (md->reginfo.addr != NULL)
+ (void)__db_r_detach(md->env,
+ &md->reginfo, F_ISSET(&md->reginfo, REGION_CREATE));
+ __os_free(md->env, md);
+ }
+ return (result);
+}
+
+/*
+ * mutex_Cmd --
+ * Implements the "mutex" widget.
+ */
+static int
+mutex_Cmd(clientData, interp, objc, objv)
+ ClientData clientData; /* Mutex handle */
+ Tcl_Interp *interp; /* Interpreter */
+ int objc; /* How many arguments? */
+ Tcl_Obj *CONST objv[]; /* The argument objects */
+{
+ static char *mxcmds[] = {
+ "close",
+ "get",
+ "getval",
+ "release",
+ "setval",
+ NULL
+ };
+ enum mxcmds {
+ MXCLOSE,
+ MXGET,
+ MXGETVAL,
+ MXRELE,
+ MXSETVAL
+ };
+ DB_ENV *dbenv;
+ DBTCL_INFO *envip, *mpip;
+ _MUTEX_DATA *mp;
+ Tcl_Obj *res;
+ int cmdindex, id, result, newval;
+
+ Tcl_ResetResult(interp);
+ mp = (_MUTEX_DATA *)clientData;
+ mpip = _PtrToInfo((void *)mp);
+ envip = mpip->i_parent;
+ dbenv = envip->i_envp;
+ result = TCL_OK;
+
+ if (mp == NULL) {
+ Tcl_SetResult(interp, "NULL mp pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+ if (mpip == NULL) {
+ Tcl_SetResult(interp, "NULL mp info pointer", TCL_STATIC);
+ return (TCL_ERROR);
+ }
+
+ /*
+ * Get the command name index from the object based on the dbcmds
+ * defined above.
+ */
+ if (Tcl_GetIndexFromObj(interp,
+ objv[1], mxcmds, "command", TCL_EXACT, &cmdindex) != TCL_OK)
+ return (IS_HELP(objv[1]));
+
+ res = NULL;
+ switch ((enum mxcmds)cmdindex) {
+ case MXCLOSE:
+ if (objc != 2) {
+ Tcl_WrongNumArgs(interp, 1, objv, NULL);
+ return (TCL_ERROR);
+ }
+ _debug_check();
+ (void)__db_r_detach(mp->env, &mp->reginfo, 0);
+ res = Tcl_NewIntObj(0);
+ (void)Tcl_DeleteCommand(interp, mpip->i_name);
+ _DeleteInfo(mpip);
+ __os_free(mp->env, mp);
+ break;
+ case MXRELE:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_UNLOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGET:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ MUTEX_LOCK(dbenv, &mp->marray[id].m);
+ res = Tcl_NewIntObj(0);
+ break;
+ case MXGETVAL:
+ /*
+ * Check for 1 arg. Error if different.
+ */
+ if (objc != 3) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ res = Tcl_NewLongObj((long)mp->marray[id].val);
+ break;
+ case MXSETVAL:
+ /*
+ * Check for 2 args. Error if different.
+ */
+ if (objc != 4) {
+ Tcl_WrongNumArgs(interp, 2, objv, "id val");
+ return (TCL_ERROR);
+ }
+ result = Tcl_GetIntFromObj(interp, objv[2], &id);
+ if (result != TCL_OK)
+ break;
+ result = Tcl_GetIntFromObj(interp, objv[3], &newval);
+ if (result != TCL_OK)
+ break;
+ mp->marray[id].val = newval;
+ res = Tcl_NewIntObj(0);
+ break;
+ }
+ /*
+ * Only set result if we have a res. Otherwise, lower
+ * functions have already done so.
+ */
+ if (result == TCL_OK && res)
+ Tcl_SetObjResult(interp, res);
+ return (result);
+}
diff --git a/bdb/test/TESTS b/bdb/test/TESTS
index a585bdddcde..eac6396b20c 100644
--- a/bdb/test/TESTS
+++ b/bdb/test/TESTS
@@ -1,448 +1,1437 @@
-# $Id: TESTS,v 11.34 2000/11/06 19:31:56 sue Exp $
+# Automatically built by dist/s_test; may require local editing.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Access method tests
+bigfile001
+ Create a database greater than 4 GB in size. Close, verify.
+ Grow the database somewhat. Close, reverify. Lather, rinse,
+ repeat. Since it will not work on all systems, this test is
+ not run by default.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-test001 Small keys/data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+bigfile002
+ This one should be faster and not require so much disk space,
+ although it doesn't test as extensively. Create an mpool file
+ with 1K pages. Dirty page 6000000. Sync.
-test002 Small keys/medium data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dbm
+ Historic DBM interface test. Use the first 1000 entries from the
+ dictionary. Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Then reopen the file, re-retrieve everything. Finally, delete
+ everything.
-test003 Small keys/large data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead001
+ Use two different configurations to test deadlock detection among a
+ variable number of processes. One configuration has the processes
+ deadlocked in a ring. The other has the processes all deadlocked on
+ a single resource.
-test004 Small keys/medium data
- Put/get per key
- Sequential (cursor) get/delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead002
+ Same test as dead001, but use "detect on every collision" instead
+ of separate deadlock detector.
-test005 Small keys/medium data
- Put/get per key
- Close, reopen
- Sequential (cursor) get/delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead003
-test006 Small keys/medium data
- Put/get per key
- Keyed delete and verify
+ Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+ DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
-test007 Small keys/medium data
- Put/get per key
- Close, reopen
- Keyed delete
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead006
+ use timeouts rather than the normal dd algorithm.
-test008 Small keys/large data
- Put/get per key
- Loop through keys by steps (which change)
- ... delete each key at step
- ... add each key back
- ... change step
- Confirm that overflow pages are getting reused
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+dead007
+ use timeouts rather than the normal dd algorithm.
-test009 Small keys/large data
- Same as test008; close and reopen database
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env001
+ Test of env remove interface (formerly env_remove).
-test010 Duplicate test
- Small key/data pairs.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env002
+ Test of DB_LOG_DIR and env name resolution.
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the set_lg_dir option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- db_config and a file are present,
+ only the file is respected (see doc/env/naming.html).
-test011 Duplicate test
- Small key/data pairs.
- Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
- To test off-page duplicates, run with small pagesize.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env003
+ Test DB_TMP_DIR and env name resolution
+ With an environment path specified using -home, and then again
+ with it specified by the environment variable DB_HOME:
+ 1) Make sure that the DB_TMP_DIR config file option is respected
+ a) as a relative pathname.
+ b) as an absolute pathname.
+ 2) Make sure that the -tmp_dir config option is respected,
+ again as relative and absolute pathnames.
+ 3) Make sure that if -both- -tmp_dir and a file are present,
+ only the file is respected (see doc/env/naming.html).
-test012 Large keys/small data
- Same as test003 except use big keys (source files and
- executables) and small data (the file/executable names).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env004
+ Test multiple data directories. Do a bunch of different opens
+ to make sure that the files are detected in different directories.
-test013 Partial put test
- Overwrite entire records using partial puts. Make sure
- that NOOVERWRITE flag works.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env005
+ Test that using subsystems without initializing them correctly
+ returns an error. Cannot test mpool, because it is assumed in
+ the Tcl code.
-test014 Exercise partial puts on short data
- Run 5 combinations of numbers of characters to replace,
- and number of times to increase the size by.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env006
+ Make sure that all the utilities exist and run.
-test015 Partial put test
- Partial put test where the key does not initially exist.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env007
+ Test various DB_CONFIG config file options.
+ 1) Make sure command line option is respected
+ 2) Make sure that config file option is respected
+ 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+ method is used, only the file is respected.
+ Then test all known config options.
-test016 Partial put test
- Partial put where the datum gets shorter as a result of
- the put.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env008
+ Test environments and subdirectories.
-test017 Basic offpage duplicate test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env009
+ Test calls to all the various stat functions. We have several
+ sprinkled throughout the test suite, but this will ensure that
+ we run all of them at least once.
-test018 Offpage duplicate test
- Key_{first,last,before,after} offpage duplicates.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env010
+ Run recovery in an empty directory, and then make sure we can still
+ create a database in that directory.
-test019 Partial get test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+env011
+ Run with region overwrite flag.
-test020 In-Memory database tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+jointest
+ Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+ with differing index orders and selectivity.
-test021 Btree range tests.
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those
+ work, everything else does as well. We'll create test databases
+ called join1.db, join2.db, join3.db, and join4.db. The number on
+ the database describes the duplication -- duplicates are of the
+ form 0, N, 2N, 3N, ... where N is the number of the database.
+ Primary.db is the primary database, and null.db is the database
+ that has no matching duplicates.
-test022 Test of DB->getbyteswapped().
+ We should test this on all btrees, all hash, and a combination thereof
-test023 Duplicate test
- Exercise deletes and cursor operations within a
- duplicate set.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock001
+ Make sure that the basic lock tests work. Do some simple gets
+ and puts for a single locker.
-test024 Record number retrieval test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock002
+ Exercise basic multi-process aspects of lock.
-test025 DB_APPEND flag test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock003
+ Exercise multi-process aspects of lock. Generate a bunch of parallel
+ testers that try to randomly obtain locks; make sure that the locks
+ correctly protect corresponding objects.
-test026 Small keys/medium data w/duplicates
- Put/get per key.
- Loop through keys -- delete each key
- ... test that cursors delete duplicates correctly
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock004
+ Test locker ids wraping around.
-test027 Off-page duplicate test
- Test026 with parameters to force off-page duplicates.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+lock005
+ Check that page locks are being released properly.
-test028 Cursor delete test
- Test put operations after deleting through a cursor.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log001
+ Read/write log records.
-test029 Record renumbering
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log002
+ Tests multiple logs
+ Log truncation
+ LSN comparison and file functionality.
-test030 DB_NEXT_DUP functionality
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log003
+ Verify that log_flush is flushing records correctly.
-test031 Duplicate sorting functionality
- Make sure DB_NODUPDATA works.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log004
+ Make sure that if we do PREVs on a log, but the beginning of the
+ log has been truncated, we do the right thing.
-test032 DB_GET_BOTH
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+log005
+ Check that log file sizes can change on the fly.
-test033 DB_GET_BOTH without comparison function
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp001
+ Randomly updates pages.
-test034 Test032 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp002
+ Tests multiple processes accessing and modifying the same files.
-test035 Test033 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+memp003
+ Test reader-only/writer process combinations; we use the access methods
+ for testing.
-test036 Test KEYFIRST and KEYLAST when the key doesn't exist
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex001
+ Test basic mutex functionality
-test037 Test DB_RMW
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex002
+ Test basic mutex synchronization
-test038 DB_GET_BOTH on deleted items
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+mutex003
+ Generate a bunch of parallel testers that try to randomly obtain locks.
-test039 DB_GET_BOTH on deleted items without comparison function
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd001
+ Per-operation recovery tests for non-duplicate, non-split
+ messages. Makes sure that we exercise redo, undo, and do-nothing
+ condition. Any test that appears with the message (change state)
+ indicates that we've already run the particular test, but we are
+ running it again so that we can change the state of the data base
+ to prepare for the next test (this applies to all other recovery
+ tests as well).
+
+ These are the most basic recovery tests. We do individual recovery
+ tests for each operation in the access method interface. First we
+ create a file and capture the state of the database (i.e., we copy
+ it. Then we run a transaction containing a single operation. In
+ one test, we abort the transaction and compare the outcome to the
+ original copy of the file. In the second test, we restore the
+ original copy of the database and then run recovery and compare
+ this against the actual database.
-test040 Test038 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd002
+ Split recovery tests. For every known split log message, makes sure
+ that we exercise redo, undo, and do-nothing condition.
-test041 Test039 with off-page duplicates
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd003
+ Duplicate recovery tests. For every known duplicate log message,
+ makes sure that we exercise redo, undo, and do-nothing condition.
-test042 Concurrent Data Store test
+ Test all the duplicate log messages and recovery operations. We make
+ sure that we exercise all possible recovery actions: redo, undo, undo
+ but no fix necessary and redo but no fix necessary.
-test043 Recno renumbering and implicit creation test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd004
+ Big key test where big key gets elevated to internal page.
-test044 Small system integration tests
- Test proper functioning of the checkpoint daemon,
- recovery, transactions, etc.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd005
+ Verify reuse of file ids works on catastrophic recovery.
-test045 Small random tester
- Runs a number of random add/delete/retrieve operations.
- Tests both successful conditions and error conditions.
+ Make sure that we can do catastrophic recovery even if we open
+ files using the same log file id.
-test046 Overwrite test of small/big key/data with cursor checks.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd006
+ Nested transactions.
-test047 Cursor get test with SET_RANGE option.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd007
+ File create/delete tests.
-test048 Cursor stability across Btree splits.
+ This is a recovery test for create/delete of databases. We have
+ hooks in the database so that we can abort the process at various
+ points and make sure that the transaction doesn't commit. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
-test049 Cursor operations on unitialized cursors.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd008
+ Test deeply nested transactions and many-child transactions.
-test050 Cursor overwrite test for Recno.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd009
+ Verify record numbering across split/reverse splits and recovery.
-test051 Fixed-length record Recno test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd010
+ Test stability of btree duplicates across btree off-page dup splits
+ and reverse splits and across recovery.
-test052 Renumbering record Recno test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd011
+ Verify that recovery to a specific timestamp works.
-test053 DB_REVSPLITOFF flag test
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd012
+ Test of log file ID management. [#2288]
+ Test recovery handling of file opens and closes.
-test054 Cursor maintenance during key/data deletion.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd013
+ Test of cursor adjustment on child transaction aborts. [#2373]
-test054 Basic cursor operations.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd014
+ This is a recovery test for create/delete of queue extents. We
+ then need to recover and make sure the file is correctly existing
+ or not, as the case may be.
-test055 Cursor maintenance during key deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd015
+ This is a recovery test for testing lots of prepared txns.
+ This test is to force the use of txn_recover to call with the
+ DB_FIRST flag and then DB_NEXT.
-test056 Cursor maintenance during deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd016
+ This is a recovery test for testing running recovery while
+ recovery is already running. While bad things may or may not
+ happen, if recovery is then run properly, things should be correct.
-test057 Cursor maintenance during key deletes.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd017
+ Test recovery and security. This is basically a watered
+ down version of recd001 just to verify that encrypted environments
+ can be recovered.
-test058 Verify that deleting and reading duplicates results in
- correct ordering.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd018
+ Test recover of closely interspersed checkpoints and commits.
-test059 Cursor ops work with a partial length of 0.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd019
+ Test txn id wrap-around and recovery.
-test060 Test of the DB_EXCL flag to DB->open().
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+recd020
+ Test recovery after checksum error.
-test061 Test of txn abort and commit for in-memory databases.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep001
+ Replication rename and forced-upgrade test.
-test062 Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Run a modified version of test001 in a replicated master environment;
+ verify that the database on the client is correct.
+ Next, remove the database, close the master, upgrade the
+ client, reopen the master, and make sure the new master can correctly
+ run test001 and propagate it in the other direction.
-test063 Test of the DB_RDONLY flag to DB->open
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep002
+ Basic replication election test.
-test064 Test of DB->get_type
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, in various scenarios.
-test065 Test of DB->stat(DB_RECORDCOUNT)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep003
+ Repeated shutdown/restart replication test
-test066 Test of cursor overwrites of DB_CURRENT w/ duplicates.
+ Run a quick put test in a replicated master environment; start up,
+ shut down, and restart client processes, with and without recovery.
+ To ensure that environment state is transient, use DB_PRIVATE.
-test067 Test of DB_CURRENT partial puts onto almost empty duplicate
- pages, with and without DB_DUP_SORT.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep004
+ Test of DB_REP_LOGSONLY.
-test068 Test of DB_BEFORE and DB_AFTER with partial puts.
+ Run a quick put test in a master environment that has one logs-only
+ client. Shut down, then run catastrophic recovery in the logs-only
+ client and check that the database is present and populated.
-test069 Test of DB_CURRENT partial puts without duplicates--
- test067 w/ small ndups.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rep005
+ Replication election test with error handling.
-test070 Test of DB_CONSUME (Four consumers, 1000 items.)
+ Run a modified version of test001 in a replicated master environment;
+ hold an election among a group of clients to make sure they select
+ a proper master from amongst themselves, forcing errors at various
+ locations in the election path.
-test071 Test of DB_CONSUME (One consumer, 10000 items.)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc001
+ Test RPC server timeouts for cursor, txn and env handles.
+ Test RPC specifics, primarily that unsupported functions return
+ errors and such.
-test072 Cursor stability test when dups are moved off-page
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc002
+ Test invalid RPC functions and make sure we error them correctly
-test073 Test of cursor stability on duplicate pages.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc004
+ Test RPC server and security
-test074 Test of DB_NEXT_NODUP.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rpc005
+ Test RPC server handle ID sharing
-test075 Test of DB->rename().
- (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc001
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
-test076 Test creation of many small databases in a single environment.
- [#1528].
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc002
+ Recno backing file test #2: test of set_re_delim. Specify a backing
+ file with colon-delimited records, and make sure they are correctly
+ interpreted.
-test077 Test of DB_GET_RECNO [#1206].
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc003
+ Recno backing file test. Try different patterns of adding
+ records and making sure that the corresponding file matches.
-test078 Test of DBC->c_count().
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+rsrc004
+ Recno backing file test for EOF-terminated records.
-test079 Test of deletes in large trees. (test006 w/ sm. pagesize).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+scr###
+ The scr### directories are shell scripts that test a variety of
+ things, including things about the distribution itself. These
+ tests won't run on most systems, so don't even try to run them.
-test080 Test of DB->remove()
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest001
+ Tests multiple access methods in one subdb
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Dump file, verify per subdb
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Rotate methods and repeat [#762].
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-test081 Test off-page duplicates and overflow pages together with
- very large keys (key/data as file contents).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sdbtest002
+ Tests multiple access methods in one subdb access by multiple
+ processes.
+ Open several subdbs, each with a different access method
+ Small keys, small data
+ Put/get per key per subdb
+ Fork off several child procs to each delete selected
+ data from their subdb and then exit
+ Dump file, verify contents of each subdb is correct
+ Close, reopen per subdb
+ Dump file, verify per subdb
+
+ Make several subdb's of different access methods all in one DB.
+ Fork of some child procs to each manipulate one subdb and when
+ they are finished, verify the contents of the databases.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-test082 Test of DB_PREV_NODUP (uses test074).
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec001
+ Test of security interface
-test083 Test of DB->key_range.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sec002
+ Test of security interface and catching errors in the
+ face of attackers overwriting parts of existing files.
-test084 Sanity test of large (64K) pages.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex001
+ Basic secondary index put/delete test
-test085 Test of cursor behavior when a cursor is pointing to a deleted
- btree key which then has duplicates added. [#2473]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex002
+ Basic cursor-based secondary index put/delete test
-test086 Test of cursor stability across btree splits/rsplits with
- subtransaction aborts (a variant of test048). [#2373]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex003
+ sindex001 with secondaries created and closed mid-test
+ Basic secondary index put/delete test with secondaries
+ created mid-test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+sindex004
+ sindex002 with secondaries created and closed mid-test
+ Basic cursor-based secondary index put/delete test, with
+ secondaries created mid-test.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Cursor Join.
+sindex006
+ Basic secondary index put/delete test with transactions
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb001 Tests mixing db and subdb operations
+ Tests mixing db and subdb operations
+ Create a db, add data, try to create a subdb.
+ Test naming db and subdb with a leading - for correct parsing
+ Existence check -- test use of -excl with subdbs
+
+ Test non-subdb and subdb operations
+ Test naming (filenames begin with -)
+ Test existence (cannot create subdb of same name with -excl)
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-jointest Test duplicate assisted joins.
- Executes 1, 2, 3 and 4-way joins with differing
- index orders and selectivity.
+subdb002
+ Tests basic subdb functionality
+ Small keys, small data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+ Then repeat using an environment.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Deadlock detection.
+subdb003
+ Tests many subdbs
+ Creates many subdbs and puts a small amount of
+ data in each (many defaults to 2000)
+
+ Use the first 10,000 entries from the dictionary as subdbnames.
+ Insert each with entry as name of subdatabase and a partial list
+ as key/data. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-dead001 Use two different configurations to test deadlock
- detection among a variable number of processes. One
- configuration has the processes deadlocked in a ring.
- The other has the processes all deadlocked on a single
- resource.
+subdb004
+ Tests large subdb names
+ subdb name = filecontents,
+ key = filename, data = filecontents
+ Put/get per key
+ Dump file
+ Dump subdbs, verify data and subdb name match
+
+ Create 1 db with many large subdbs. Use the contents as subdb names.
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
-dead002 Same test as dead001, but use "detect on every collision"
- instead of separate deadlock detector.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb005
+ Tests cursor operations in subdbs
+ Put/get per key
+ Verify cursor operations work within subdb
+ Verify cursor operations do not work across subdbs
-dead003 Same test as dead002, but explicitly specify oldest or
- youngest. Verify the correct lock was aborted/granted.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Lock tests
+subdb006
+ Tests intra-subdb join
+
+ We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+ everything else does as well. We'll create test databases called
+ sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+ describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+ ... where N is the number of the database. Primary.db is the primary
+ database, and sub0.db is the database that has no matching duplicates.
+ All of these are within a single database.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-lock001 Basic lock test, gets/puts. Contention without waiting.
+subdb007
+ Tests page size difference errors between subdbs.
+ Test 3 different scenarios for page sizes.
+ 1. Create/open with a default page size, 2nd subdb create with
+ specified different one, should error.
+ 2. Create/open with specific page size, 2nd subdb create with
+ different one, should error.
+ 3. Create/open with specified page size, 2nd subdb create with
+ same specified size, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
-lock002 Multi-process lock tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb008
+ Tests lorder difference errors between subdbs.
+ Test 3 different scenarios for lorder.
+ 1. Create/open with specific lorder, 2nd subdb create with
+ different one, should error.
+ 2. Create/open with a default lorder 2nd subdb create with
+ specified different one, should error.
+ 3. Create/open with specified lorder, 2nd subdb create with
+ same specified lorder, should succeed.
+ (4th combo of using all defaults is a basic test, done elsewhere)
-lock003 Multiprocess random lock test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb009
+ Test DB->rename() method for subdbs
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Logging test
+subdb010
+ Test DB->remove() method and DB->truncate() for subdbs
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-log001 Read/write log records.
+subdb011
+ Test deleting Subdbs with overflow pages
+ Create 1 db with many large subdbs.
+ Test subdatabases with overflow pages.
-log002 Tests multiple logs
- Log truncation
- lsn comparison and file functionality.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+subdb012
+ Test subdbs with locking and transactions
+ Tests creating and removing subdbs while handles
+ are open works correctly, and in the face of txns.
-log003 Verify that log_flush is flushing records correctly.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test001
+ Small keys/data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
-log004 Prev on log when beginning of log has been truncated.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test002
+ Small keys/medium data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original. Close file, reopen, do retrieve and re-verify.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Mpool test
+test003
+ Small keys/large data
+ Put/get per key
+ Dump file
+ Close, reopen
+ Dump file
+
+ Take the source files and dbtest executable and enter their names
+ as the key with their contents as data. After all are entered,
+ retrieve all; compare output to original. Close file, reopen, do
+ retrieve and re-verify.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-memp001 Randomly updates pages.
+test004
+ Small keys/medium data
+ Put/get per key
+ Sequential (cursor) get/delete
-memp002 Tests multiple processes accessing and modifying the same
- files.
+ Check that cursor operations work. Create a database.
+ Read through the database sequentially using cursors and
+ delete each element.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Recovery
+test005
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Sequential (cursor) get/delete
+
+ Check that cursor operations work. Create a database; close
+ it and reopen it. Then read through the database sequentially
+ using cursors and delete each element.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-recd001 Per-operation recovery tests for non-duplicate, non-split
- messages. Makes sure that we exercise redo, undo, and
- do-nothing condition. Any test that appears with the
- message (change state) indicates that we've already run
- the particular test, but we are running it again so that
- we can change the state of the data base to prepare for
- the next test (this applies to all other recovery tests
- as well).
+test006
+ Small keys/medium data
+ Put/get per key
+ Keyed delete and verify
-recd002 Split recovery tests. For every known split log message,
- makes sure that we exercise redo, undo, and do-nothing
- condition.
+ Keyed delete test.
+ Create database.
+ Go through database, deleting all entries by key.
-recd003 Duplicate recovery tests. For every known duplicate log
- message, makes sure that we exercise redo, undo, and
- do-nothing condition.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test007
+ Small keys/medium data
+ Put/get per key
+ Close, reopen
+ Keyed delete
+
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
-recd004 Big key test where big key gets elevated to internal page.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test008
+ Small keys/large data
+ Put/get per key
+ Loop through keys by steps (which change)
+ ... delete each key at step
+ ... add each key back
+ ... change step
+ Confirm that overflow pages are getting reused
+
+ Take the source files and dbtest executable and enter their names as
+ the key with their contents as data. After all are entered, begin
+ looping through the entries; deleting some pairs and then readding them.
-recd005 Verify reuse of file ids works on catastrophic recovery.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test009
+ Small keys/large data
+ Same as test008; close and reopen database
-recd006 Nested transactions.
+ Check that we reuse overflow pages. Create database with lots of
+ big key/data pairs. Go through and delete and add keys back
+ randomly. Then close the DB and make sure that we have everything
+ we think we should.
-recd007 File create/delete tests.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test010
+ Duplicate test
+ Small key/data pairs.
-recd008 Test deeply nested transactions.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
-recd009 Verify record numbering across split/reverse splits
- and recovery.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test011
+ Duplicate test
+ Small key/data pairs.
+ Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+ To test off-page duplicates, run with small pagesize.
-recd010 Verify duplicates across split/reverse splits
- and recovery.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; add duplicate records for each.
+ Then do some key_first/key_last add_before, add_after operations.
+ This does not work for recno
-recd011 Verify that recovery to a specific timestamp works.
+ To test if dups work when they fall off the main page, run this with
+ a very tiny page size.
-recd012 Test of log file ID management. [#2288]
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test012
+ Large keys/small data
+ Same as test003 except use big keys (source files and
+ executables) and small data (the file/executable names).
-recd013 Test of cursor adjustment on child transaction aborts. [#2373]
+ Take the source files and dbtest executable and enter their contents
+ as the key with their names as data. After all are entered, retrieve
+ all; compare output to original. Close file, reopen, do retrieve and
+ re-verify.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Subdatabase tests
+test013
+ Partial put test
+ Overwrite entire records using partial puts.
+ Make surethat NOOVERWRITE flag works.
+
+ 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+ 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+ 3. Actually overwrite each one with its datum reversed.
+
+ No partial testing here.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-subdb001 Tests mixing db and subdb operations
- Create a db, add data, try to create a subdb.
- Test naming db and subdb with a leading - for
- correct parsing
- Existence check -- test use of -excl with subdbs
+test014
+ Exercise partial puts on short data
+ Run 5 combinations of numbers of characters to replace,
+ and number of times to increase the size by.
+
+ Partial put test, small data, replacing with same size. The data set
+ consists of the first nentries of the dictionary. We will insert them
+ (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+ we'll try to perform partial puts of some characters at the beginning,
+ some at the end, and some at the middle.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test015
+ Partial put test
+ Partial put test where the key does not initially exist.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test016
+ Partial put test
+ Partial put where the datum gets shorter as a result of the put.
+
+ Partial put test where partial puts make the record smaller.
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, go back and do partial puts,
+ replacing a random-length string with the key value.
+ Then verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test017
+ Basic offpage duplicate test.
+
+ Run duplicates with small page size so that we test off page duplicates.
+ Then after we have an off-page database, test with overflow pages too.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test018
+ Offpage duplicate test
+ Key_{first,last,before,after} offpage duplicates.
+ Run duplicates with small page size so that we test off page
+ duplicates.
-subdb002 Tests basic subdb functionality
- Small keys, small data
- Put/get per key
- Dump file
- Close, reopen
- Dump file
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test019
+ Partial get test.
-subdb003 Tests many subdbs
- Creates many subdbs and puts a small amount of
- data in each (many defaults to 2000)
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test020
+ In-Memory database tests.
-subdb004 Tests large subdb names
- subdb name = filecontents,
- key = filename, data = filecontents
- Put/get per key
- Dump file
- Dump subdbs, verify data and subdb name match
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test021
+ Btree range tests.
-subdb005 Tests cursor operations in subdbs
- Put/get per key
- Verify cursor operations work within subdb
- Verify cursor operations do not work across subdbs
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self, reversed as key and self as data.
+ After all are entered, retrieve each using a cursor SET_RANGE, and
+ getting about 20 keys sequentially after it (in some cases we'll
+ run out towards the end of the file).
-subdb006 Tests intra-subdb join
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test022
+ Test of DB->getbyteswapped().
-subdb007 Tests page size differences between subdbs
- Open several subdbs, each with a different pagesize
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test023
+ Duplicate test
+ Exercise deletes and cursor operations within a duplicate set.
+ Add a key with duplicates (first time on-page, second time off-page)
+ Number the dups.
+ Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test024
+ Record number retrieval test.
+ Test the Btree and Record number get-by-number functionality.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test025
+ DB_APPEND flag test.
-subdb008 Tests lorder differences between subdbs
- Open several subdbs, each with a different/random lorder
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test026
+ Small keys/medium data w/duplicates
+ Put/get per key.
+ Loop through keys -- delete each key
+ ... test that cursors delete duplicates correctly
-subdb009 Test DB->rename() method for subdbs
+ Keyed delete test through cursor. If ndups is small; this will
+ test on-page dups; if it's large, it will test off-page dups.
-subdb010 Test DB->remove() method for subdbs
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test027
+ Off-page duplicate test
+ Test026 with parameters to force off-page duplicates.
-subdbtest001 Tests multiple access methods in one subdb
- Open several subdbs, each with a different access method
- Small keys, small data
- Put/get per key per subdb
- Dump file, verify per subdb
- Close, reopen per subdb
- Dump file, verify per subdb
+ Check that delete operations work. Create a database; close
+ database and reopen it. Then issues delete by key for each
+ entry.
-subdbtest002 Tests multiple access methods in one subdb access by
- multiple processes
- Open several subdbs, each with a different access method
- Small keys, small data
- Put/get per key per subdb
- Fork off several child procs to each delete selected
- data from their subdb and then exit
- Dump file, verify contents of each subdb is correct
- Close, reopen per subdb
- Dump file, verify per subdb
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test028
+ Cursor delete test
+ Test put operations after deleting through a cursor.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Transaction tests
+test029
+ Test the Btree and Record number renumbering.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-txn001 Begin, commit, abort testing.
+test030
+ Test DB_NEXT_DUP Functionality.
-txn002 Verify that read-only transactions do not write log records.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test031
+ Duplicate sorting functionality
+ Make sure DB_NODUPDATA works.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and "ndups" duplicates
+ For the data field, prepend random five-char strings (see test032)
+ that we force the duplicate sorting code to do something.
+ Along the way, test that we cannot insert duplicate duplicates
+ using DB_NODUPDATA.
+
+ By setting ndups large, we can make this an off-page test
+ After all are entered, retrieve all; verify output.
+ Close file, reopen, do retrieve and re-verify.
+ This does not work for recno
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Environment tests
+test032
+ DB_GET_BOTH, DB_GET_BOTH_RANGE
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
+
+ Test the DB_GET_BOTH functionality by retrieving each dup in the file
+ explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+ the unique key prefix (cursor only). Finally test the failure case.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-env001 Test of env remove interface (formerly env_remove).
+test033
+ DB_GET_BOTH without comparison function
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and data; add duplicate records for each. After all are
+ entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+ DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+ nonexistent keys.
-env002 Test of DB_LOG_DIR and env name resolution.
+ XXX
+ This does not work for rbtree.
-env003 Test of DB_TMP_DIR and env name resolution.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test034
+ test032 with off-page duplicates
+ DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
-env004 Multiple data directories test.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test035
+ Test033 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
-env005 Test for using subsystems without initializing them correctly.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test036
+ Test KEYFIRST and KEYLAST when the key doesn't exist
+ Put nentries key/data pairs (from the dictionary) using a cursor
+ and KEYFIRST and KEYLAST (this tests the case where use use cursor
+ put for non-existent keys).
-env006 Smoke test that the utilities all run.
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test037
+ Test DB_RMW
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-RPC tests
+test038
+ DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test
+
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-[RPC tests also include running all Access Method tests for all methods
-via an RPC server]
+test039
+ DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+ function.
-rpc001 Test RPC server timeouts for cursor, txn and env handles.
+ Use the first 10,000 entries from the dictionary. Insert each with
+ self as key and "ndups" duplicates. For the data field, prepend the
+ letters of the alphabet in a random order so we force the duplicate
+ sorting code to do something. By setting ndups large, we can make
+ this an off-page test.
-rpc002 Test unsupported functions
+ Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+ each dup in the file explicitly. Then remove each duplicate and try
+ the retrieval again.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-Recno backing file tests
+test040
+ Test038 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
+
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-rsrc001 Basic backing file test (put/get)
+test041
+ Test039 with off-page duplicates
+ DB_GET_BOTH functionality with off-page duplicates.
-rsrc002 Test of set_re_delim
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test042
+ Concurrent Data Store test (CDB)
+
+ Multiprocess DB test; verify that locking is working for the
+ concurrent access method product.
+
+ Use the first "nentries" words from the dictionary. Insert each with
+ self as key and a fixed, medium length data string. Then fire off
+ multiple processes that bang on the database. Each one should try to
+ read and write random keys. When they rewrite, they'll append their
+ pid to the data string (sometimes doing a rewrite sometimes doing a
+ partial put). Some will use cursors to traverse through a few keys
+ before finding one to write.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test043
+ Recno renumbering and implicit creation test
+ Test the Record number implicit creation and renumbering options.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test044
+ Small system integration tests
+ Test proper functioning of the checkpoint daemon,
+ recovery, transactions, etc.
+
+ System integration DB test: verify that locking, recovery, checkpoint,
+ and all the other utilities basically work.
+
+ The test consists of $nprocs processes operating on $nfiles files. A
+ transaction consists of adding the same key/data pair to some random
+ number of these files. We generate a bimodal distribution in key size
+ with 70% of the keys being small (1-10 characters) and the remaining
+ 30% of the keys being large (uniform distribution about mean $key_avg).
+ If we generate a key, we first check to make sure that the key is not
+ already in the dataset. If it is, we do a lookup.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test045
+ Small random tester
+ Runs a number of random add/delete/retrieve operations.
+ Tests both successful conditions and error conditions.
+
+ Run the random db tester on the specified access method.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test046
+ Overwrite test of small/big key/data with cursor checks.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test047
+ DBcursor->c_get get test with SET_RANGE option.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test048
+ Cursor stability across Btree splits.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test049
+ Cursor operations on uninitialized cursors.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test050
+ Overwrite test of small/big key/data with cursor checks for Recno.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test051
+ Fixed-length record Recno test.
+ 0. Test various flags (legal and illegal) to open
+ 1. Test partial puts where dlen != size (should fail)
+ 2. Partial puts for existent record -- replaces at beg, mid, and
+ end of record, as well as full replace
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test052
+ Renumbering record Recno test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test053
+ Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+ methods.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test054
+ Cursor maintenance during key/data deletion.
+
+ This test checks for cursor maintenance in the presence of deletes.
+ There are N different scenarios to tests:
+ 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+ 2. No duplicates. Cursor is positioned right before key K, Delete K,
+ do a next on the cursor.
+ 3. No duplicates. Cursor is positioned on key K, do a regular delete
+ of K, do a current get on K.
+ 4. Repeat 3 but do a next instead of current.
+ 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+ does a delete. Then we do a non-cursor get.
+ 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ do a delete of the entire Key. Test cursor current.
+ 7. Continue last test and try cursor next.
+ 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+ Cursor B is in the same duplicate set and deletes a different item.
+ Verify that the cursor is in the right place.
+ 9. Cursors A and B are in the place in the same duplicate set. A
+ deletes its item. Do current on B.
+ 10. Continue 8 and do a next on B.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test055
+ Basic cursor operations.
+ This test checks basic cursor operations.
+ There are N different scenarios to tests:
+ 1. (no dups) Set cursor, retrieve current.
+ 2. (no dups) Set cursor, retrieve next.
+ 3. (no dups) Set cursor, retrieve prev.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test056
+ Cursor maintenance during deletes.
+ Check if deleting a key when a cursor is on a duplicate of that
+ key works.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test057
+ Cursor maintenance during key deletes.
+ Check if we handle the case where we delete a key with the cursor on
+ it and then add the same key. The cursor should not get the new item
+ returned, but the item shouldn't disappear.
+ Run test tests, one where the overwriting put is done with a put and
+ one where it's done with a cursor put.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test058
+ Verify that deleting and reading duplicates results in correct ordering.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test059
+ Cursor ops work with a partial length of 0.
+ Make sure that we handle retrieves of zero-length data items correctly.
+ The following ops, should allow a partial data retrieve of 0-length.
+ db_get
+ db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test060
+ Test of the DB_EXCL flag to DB->open().
+ 1) Attempt to open and create a nonexistent database; verify success.
+ 2) Attempt to reopen it; verify failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test061
+ Test of txn abort and commit for in-memory databases.
+ a) Put + abort: verify absence of data
+ b) Put + commit: verify presence of data
+ c) Overwrite + abort: verify that data is unchanged
+ d) Overwrite + commit: verify that data has changed
+ e) Delete + abort: verify that data is still present
+ f) Delete + commit: verify that data has been deleted
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test062
+ Test of partial puts (using DB_CURRENT) onto duplicate pages.
+ Insert the first 200 words into the dictionary 200 times each with
+ self as key and <random letter>:self as data. Use partial puts to
+ append self again to data; verify correctness.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test063
+ Test of the DB_RDONLY flag to DB->open
+ Attempt to both DB->put and DBC->c_put into a database
+ that has been opened DB_RDONLY, and check for failure.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test064
+ Test of DB->get_type
+ Create a database of type specified by method.
+ Make sure DB->get_type returns the right thing with both a normal
+ and DB_UNKNOWN open.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test065
+ Test of DB->stat(DB_FASTSTAT)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test066
+ Test of cursor overwrites of DB_CURRENT w/ duplicates.
+
+ Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+ database with duplicates.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test067
+ Test of DB_CURRENT partial puts onto almost empty duplicate
+ pages, with and without DB_DUP_SORT.
+
+ Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+ This test was written to address the following issue, #2 in the
+ list of issues relating to bug #0820:
+
+ 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+ In Btree, the DB_CURRENT overwrite of off-page duplicate records
+ first deletes the record and then puts the new one -- this could
+ be a problem if the removal of the record causes a reverse split.
+ Suggested solution is to acquire a cursor to lock down the current
+ record, put a new record after that record, and then delete using
+ the held cursor.
+
+ It also tests the following, #5 in the same list of issues:
+ 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+ set, duplicate comparison routine specified.
+ The partial change does not change how data items sort, but the
+ record to be put isn't built yet, and that record supplied is the
+ one that's checked for ordering compatibility.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test068
+ Test of DB_BEFORE and DB_AFTER with partial puts.
+ Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+ check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test069
+ Test of DB_CURRENT partial puts without duplicates-- test067 w/
+ small ndups to ensure that partial puts to DB_CURRENT work
+ correctly in the absence of duplicate pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test070
+ Test of DB_CONSUME (Four consumers, 1000 items.)
+
+ Fork off six processes, four consumers and two producers.
+ The producers will each put 20000 records into a queue;
+ the consumers will each get 10000.
+ Then, verify that no record was lost or retrieved twice.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test071
+ Test of DB_CONSUME (One consumer, 10000 items.)
+ This is DB Test 70, with one consumer, one producers, and 10000 items.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test072
+ Test of cursor stability when duplicates are moved off-page.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test073
+ Test of cursor stability on duplicate pages.
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test074
+ Test of DB_NEXT_NODUP.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test075
+ Test of DB->rename().
+ (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test076
+ Test creation of many small databases in a single environment. [#1528].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test077
+ Test of DB_GET_RECNO [#1206].
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test078
+ Test of DBC->c_count(). [#303]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test079
+ Test of deletes in large trees. (test006 w/ sm. pagesize).
+
+ Check that delete operations work in large btrees. 10000 entries
+ and a pagesize of 512 push this out to a four-level btree, with a
+ small fraction of the entries going on overflow pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test080
+ Test of DB->remove()
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test081
+ Test off-page duplicates and overflow pages together with
+ very large keys (key/data as file contents).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test082
+ Test of DB_PREV_NODUP (uses test074).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test083
+ Test of DB->key_range.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test084
+ Basic sanity test (test001) with large (64K) pages.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test085
+ Test of cursor behavior when a cursor is pointing to a deleted
+ btree key which then has duplicates added. [#2473]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test086
+ Test of cursor stability across btree splits/rsplits with
+ subtransaction aborts (a variant of test048). [#2373]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test087
+ Test of cursor stability when converting to and modifying
+ off-page duplicate pages with subtransaction aborts. [#2373]
+
+ Does the following:
+ a. Initialize things by DB->putting ndups dups and
+ setting a reference cursor to point to each. Do each put twice,
+ first aborting, then committing, so we're sure to abort the move
+ to off-page dups at some point.
+ b. c_put ndups dups (and correspondingly expanding
+ the set of reference cursors) after the last one, making sure
+ after each step that all the reference cursors still point to
+ the right item.
+ c. Ditto, but before the first one.
+ d. Ditto, but after each one in sequence first to last.
+ e. Ditto, but after each one in sequence from last to first.
+ occur relative to the new datum)
+ f. Ditto for the two sequence tests, only doing a
+ DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+ new one.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test088
+ Test of cursor stability across btree splits with very
+ deep trees (a variant of test048). [#2514]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test089
+ Concurrent Data Store test (CDB)
+
+ Enhanced CDB testing to test off-page dups, cursor dups and
+ cursor operations like c_del then c_get.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test090
+ Test for functionality near the end of the queue using test001.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test091
+ Test of DB_CONSUME_WAIT.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test092
+ Test of DB_DIRTY_READ [#3395]
+
+ We set up a database with nentries in it. We then open the
+ database read-only twice. One with dirty read and one without.
+ We open the database for writing and update some entries in it.
+ Then read those new entries via db->get (clean and dirty), and
+ via cursors (clean and dirty).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test093
+ Test using set_bt_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test094
+ Test using set_dup_compare.
+
+ Use the first 10,000 entries from the dictionary.
+ Insert each with self as key and data; retrieve each.
+ After all are entered, retrieve all; compare output to original.
+ Close file, reopen, do retrieve and re-verify.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test095
+ Bulk get test. [#2934]
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test096
+ Db->truncate test.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test097
+ Open up a large set of database files simultaneously.
+ Adjust for local file descriptor resource limits.
+ Then use the first 1000 entries from the dictionary.
+ Insert each with self as key and a fixed, medium length data string;
+ retrieve each. After all are entered, retrieve all; compare output
+ to original.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test098
+ Test of DB_GET_RECNO and secondary indices. Open a primary and
+ a secondary, and do a normal cursor get followed by a get_recno.
+ (This is a smoke test for "Bug #1" in [#5811].)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test099
+
+ Test of DB->get and DBC->c_get with set_recno and get_recno.
+
+ Populate a small btree -recnum database.
+ After all are entered, retrieve each using -recno with DB->get.
+ Open a cursor and do the same for DBC->c_get with set_recno.
+ Verify that set_recno sets the record number position properly.
+ Verify that get_recno returns the correct record numbers.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test100
+ Test for functionality near the end of the queue
+ using test025 (DB_APPEND).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+test101
+ Test for functionality near the end of the queue
+ using test070 (DB_CONSUME).
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn001
+ Begin, commit, abort testing.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn002
+ Verify that read-only transactions do not write log records.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn003
+ Test abort/commit/prepare of txns with outstanding child txns.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn004
+ Test of wraparound txnids (txn001)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn005
+ Test transaction ID wraparound and recovery.
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn008
+ Test of wraparound txnids (txn002)
+
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+txn009
+ Test of wraparound txnids (txn003)
diff --git a/bdb/test/archive.tcl b/bdb/test/archive.tcl
index 9fdbe82d137..9b5e764b2b4 100644
--- a/bdb/test/archive.tcl
+++ b/bdb/test/archive.tcl
@@ -1,33 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: archive.tcl,v 11.14 2000/10/27 13:23:55 sue Exp $
+# $Id: archive.tcl,v 11.20 2002/04/30 19:21:21 sue Exp $
#
# Options are:
# -checkrec <checkpoint frequency"
# -dir <dbhome directory>
# -maxfilesize <maxsize of log file>
-# -stat
-proc archive_usage {} {
- puts "archive -checkrec <checkpt freq> -dir <directory> \
- -maxfilesize <max size of log files>"
-}
-proc archive_command { args } {
- source ./include.tcl
-
- # Catch a list of files output by db_archive.
- catch { eval exec $util_path/db_archive $args } output
-
- if { $is_windows_test == 1 || 1 } {
- # On Windows, convert all filenames to use forward slashes.
- regsub -all {[\\]} $output / output
- }
-
- # Output the [possibly-transformed] list.
- return $output
-}
proc archive { args } {
global alphabet
source ./include.tcl
@@ -35,17 +16,16 @@ proc archive { args } {
# Set defaults
set maxbsize [expr 8 * 1024]
set maxfile [expr 32 * 1024]
- set dostat 0
set checkrec 500
for { set i 0 } { $i < [llength $args] } {incr i} {
switch -regexp -- [lindex $args $i] {
-c.* { incr i; set checkrec [lindex $args $i] }
-d.* { incr i; set testdir [lindex $args $i] }
-m.* { incr i; set maxfile [lindex $args $i] }
- -s.* { set dostat 1 }
default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- archive_usage
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
return
}
@@ -53,16 +33,20 @@ proc archive { args } {
}
# Clean out old log if it existed
+ puts "Archive: Log archive test"
puts "Unlinking log: error message OK"
env_cleanup $testdir
# Now run the various functionality tests
set eflags "-create -txn -home $testdir \
-log_buffer $maxbsize -log_max $maxfile"
- set dbenv [eval {berkdb env} $eflags]
+ set dbenv [eval {berkdb_env} $eflags]
error_check_bad dbenv $dbenv NULL
error_check_good dbenv [is_substr $dbenv env] 1
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
# The basic test structure here is that we write a lot of log
# records (enough to fill up 100 log files; each log file it
# small). We take periodic checkpoints. Between each pair
@@ -75,7 +59,7 @@ proc archive { args } {
# open data file and CDx is close datafile.
set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
- puts "Archive.a: Writing log records; checkpoint every $checkrec records"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
set nrecs $maxfile
set rec 0:$baserec
@@ -111,7 +95,7 @@ proc archive { args } {
if { [expr $i % $checkrec] == 0 } {
# Take a checkpoint
$dbenv txn_checkpoint
- set ckp_file [lindex [lindex [$dbenv log_get -last] 0] 0]
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
catch { archive_command -h $testdir -a } res_log_full
if { [string first db_archive $res_log_full] == 0 } {
set res_log_full ""
@@ -125,7 +109,7 @@ proc archive { args } {
res_data_full
catch { archive_command -h $testdir -s } res_data
error_check_good nlogfiles [llength $res_alllog] \
- [lindex [lindex [$dbenv log_get -last] 0] 0]
+ [lindex [lindex [$logc get -last] 0] 0]
error_check_good logs_match [llength $res_log_full] \
[llength $res_log]
error_check_good data_match [llength $res_data_full] \
@@ -206,21 +190,35 @@ proc archive { args } {
}
}
# Commit any transactions still running.
- puts "Archive: Commit any transactions still running."
+ puts "\tArchive.b: Commit any transactions still running."
foreach t $txnlist {
error_check_good txn_commit:$t [$t commit] 0
}
# Close any files that are still open.
- puts "Archive: Close open files."
+ puts "\tArchive.c: Close open files."
foreach d $dblist {
error_check_good db_close:$db [$d close] 0
}
# Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
- puts "Archive: Complete."
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
}
proc min { a b } {
diff --git a/bdb/test/bigfile001.tcl b/bdb/test/bigfile001.tcl
new file mode 100644
index 00000000000..78dcd940f5e
--- /dev/null
+++ b/bdb/test/bigfile001.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile001.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/bdb/test/bigfile002.tcl b/bdb/test/bigfile002.tcl
new file mode 100644
index 00000000000..f3e6defeaba
--- /dev/null
+++ b/bdb/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile002.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/byteorder.tcl b/bdb/test/byteorder.tcl
index d9e44e1d27d..823ca46270d 100644
--- a/bdb/test/byteorder.tcl
+++ b/bdb/test/byteorder.tcl
@@ -1,23 +1,34 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: byteorder.tcl,v 11.7 2000/11/16 23:56:18 ubell Exp $
+# $Id: byteorder.tcl,v 11.12 2002/07/29 18:09:25 sue Exp $
#
# Byte Order Test
# Use existing tests and run with both byte orders.
proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
puts "Byteorder: $method $nentries"
- eval {test001 $method $nentries 0 "01" -lorder 1234}
- eval {test001 $method $nentries 0 "01" -lorder 4321}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {verify_dir $testdir}
eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {verify_dir $testdir}
eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
}
diff --git a/bdb/test/conscript.tcl b/bdb/test/conscript.tcl
index 11d0eb58e7d..fd12c6e51a0 100644
--- a/bdb/test/conscript.tcl
+++ b/bdb/test/conscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: conscript.tcl,v 11.12 2000/12/01 04:28:36 ubell Exp $
+# $Id: conscript.tcl,v 11.17 2002/03/22 21:43:06 krinsky Exp $
#
# Script for DB_CONSUME test (test070.tcl).
# Usage: conscript dir file runtype nitems outputfile tnum args
@@ -28,17 +28,18 @@ proc consumescript_produce { db_cmd nitems tnum args } {
set ret 0
for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
set ret [$db put -append [chop_data q $mydata]]
error_check_good db_put \
[expr $ret > 0 ? $oret < $ret : \
$oret < 0 ? $oret < $ret : $oret > $ret] 1
}
- # XXX: We permit incomplete syncs because they seem to
- # be unavoidable and not damaging.
+
set ret [catch {$db close} res]
- error_check_good db_close:$pid [expr ($ret == 0) ||\
- ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Producer $pid finished."
}
@@ -67,10 +68,9 @@ proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
}
error_check_good output_close:$pid [close $oid] ""
- # XXX: see above note.
+
set ret [catch {$db close} res]
- error_check_good db_close:$pid [expr ($ret == 0) ||\
- ([is_substr $res DB_INCOMPLETE] == 1)] 1
+ error_check_good db_close:$pid $ret 0
puts "\t\tTest0$tnum: Consumer $pid finished."
}
@@ -99,7 +99,7 @@ set args [lindex [lrange $argv 6 end] 0]
set mydata "consumer data"
# Open env
-set dbenv [berkdb env -home $dir ]
+set dbenv [berkdb_env -home $dir ]
error_check_good db_env_create [is_valid_env $dbenv] TRUE
# Figure out db opening command.
diff --git a/bdb/test/dbm.tcl b/bdb/test/dbm.tcl
index 41a5da1f13a..a392c7a9f3a 100644
--- a/bdb/test/dbm.tcl
+++ b/bdb/test/dbm.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dbm.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+# $Id: dbm.tcl,v 11.15 2002/01/11 15:53:19 bostic Exp $
#
-# Historic DBM interface test.
-# Use the first 1000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Then reopen the file, re-retrieve everything.
-# Finally, delete everything.
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
proc dbm { { nentries 1000 } } {
source ./include.tcl
diff --git a/bdb/test/dbscript.tcl b/bdb/test/dbscript.tcl
index 3a51b4363d4..5decc493e9e 100644
--- a/bdb/test/dbscript.tcl
+++ b/bdb/test/dbscript.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dbscript.tcl,v 11.10 2000/04/21 18:36:21 krinsky Exp $
+# $Id: dbscript.tcl,v 11.14 2002/04/01 16:28:16 bostic Exp $
#
# Random db tester.
# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
# file: db file on which to operate
# numops: number of operations to do
# ncurs: number of cursors
@@ -22,26 +23,25 @@ source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
-set alphabet "abcdefghijklmnopqrstuvwxyz"
-
set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
# Verify usage
-if { $argc != 9 } {
+if { $argc != 10 } {
puts stderr "FAIL:[timestamp] Usage: $usage"
exit
}
# Initialize arguments
-set file [lindex $argv 0]
-set numops [ lindex $argv 1 ]
-set ncurs [ lindex $argv 2 ]
-set min_del [ lindex $argv 3 ]
-set max_add [ lindex $argv 4 ]
-set key_avg [ lindex $argv 5 ]
-set data_avg [ lindex $argv 6 ]
-set dups [ lindex $argv 7 ]
-set errpct [ lindex $argv 8 ]
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
berkdb srand $rand_init
@@ -68,7 +68,7 @@ if {$cerr != 0} {
puts $cret
return
}
-set method [$db get_type]
+# set method [$db get_type]
set record_based [is_record_based $method]
# Initialize globals including data
diff --git a/bdb/test/ddoyscript.tcl b/bdb/test/ddoyscript.tcl
new file mode 100644
index 00000000000..5478a1a98e0
--- /dev/null
+++ b/bdb/test/ddoyscript.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddoyscript.tcl,v 11.6 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/bdb/test/ddscript.tcl b/bdb/test/ddscript.tcl
index 9b139a4cbc6..621906233a9 100644
--- a/bdb/test/ddscript.tcl
+++ b/bdb/test/ddscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: ddscript.tcl,v 11.7 2000/05/08 19:26:37 sue Exp $
+# $Id: ddscript.tcl,v 11.12 2002/02/20 16:35:18 sandstro Exp $
#
# Deadlock detector script tester.
# Usage: ddscript dir test lockerid objid numprocs
@@ -32,12 +32,13 @@ set lockerid [ lindex $argv 2 ]
set objid [ lindex $argv 3 ]
set numprocs [ lindex $argv 4 ]
-set myenv [berkdb env -lock -home $dir -create -mode 0644]
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
error_check_bad lock_open $myenv NULL
error_check_good lock_open [is_substr $myenv "env"] 1
puts [eval $tnum $myenv $lockerid $objid $numprocs]
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
error_check_good envclose [$myenv close] 0
exit
diff --git a/bdb/test/dead001.tcl b/bdb/test/dead001.tcl
index 9e7c71f6a58..e9853a87e53 100644
--- a/bdb/test/dead001.tcl
+++ b/bdb/test/dead001.tcl
@@ -1,56 +1,67 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead001.tcl,v 11.17 2000/11/05 14:23:55 dda Exp $
+# $Id: dead001.tcl,v 11.33 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 1.
-# We create various deadlock scenarios for different numbers of lockers
-# and see if we can get the world cleaned up suitably.
-proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
- puts "Dead001: Deadlock detector tests"
+ puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
- puts "\tDead001.a: creating environment"
- set env [berkdb env -create -mode 0644 -lock -home $testdir]
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -txn_timeout $timeout -home $testdir]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
-
- set dpid [exec $util_path/db_deadlock -vw -h $testdir \
- >& $testdir/dd.out &]
-
foreach t $tests {
- set pidlist ""
foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
- sentinel_init
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
# Fire off the tests
- puts "\tDead001: $n procs of test $t"
+ puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead001.log.$i \
- ddscript.tcl $testdir $t $i $i $n"
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
- ddscript.tcl $testdir/dead001.log.$i \
- $testdir $t $i $i $n &]
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead001.log.$i]
+ set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@@ -60,17 +71,18 @@ proc dead001 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
+ tclkill $dpid
puts "dead check..."
- dead_check $t $n $dead $clean $other
+ dead_check $t $n $timeout $dead $clean $other
}
}
- exec $KILL $dpid
# Windows needs files closed before deleting files, so pause a little
- tclsleep 2
+ tclsleep 3
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead001.log.$i
+ fileremove -f $testdir/dead$tnum.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
diff --git a/bdb/test/dead002.tcl b/bdb/test/dead002.tcl
index 83cc6c7d59b..bc19e7127e5 100644
--- a/bdb/test/dead002.tcl
+++ b/bdb/test/dead002.tcl
@@ -1,52 +1,58 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead002.tcl,v 11.15 2000/08/25 14:21:50 sue Exp $
+# $Id: dead002.tcl,v 11.23 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 2.
-# Identical to Test 1 except that instead of running a standalone deadlock
-# detector, we create the region with "detect on every wait"
-proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
source ./include.tcl
- puts "Dead002: Deadlock detector tests"
+ puts "Dead$tnum: Deadlock detector tests"
env_cleanup $testdir
# Create the environment.
- puts "\tDead002.a: creating environment"
- set env [berkdb env \
- -create -mode 0644 -home $testdir -lock -lock_detect default]
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -txn_timeout $timeout -lock_detect $lmode]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
foreach t $tests {
- set pidlist ""
foreach n $procs {
+ set pidlist ""
sentinel_init
# Fire off the tests
- puts "\tDead002: $n procs of test $t"
+ puts "\tDead$tnum: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path $test_path/wrap.tcl \
- $testdir/dead002.log.$i \
- ddscript.tcl $testdir $t $i $i $n"
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
- ddscript.tcl $testdir/dead002.log.$i \
- $testdir $t $i $i $n &]
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
set clean 0
set other 0
for { set i 0 } { $i < $n } { incr i } {
- set did [open $testdir/dead002.log.$i]
+ set did [open $testdir/dead$tnum.log.$i]
while { [gets $did val] != -1 } {
switch $val {
DEADLOCK { incr dead }
@@ -56,13 +62,14 @@ proc dead002 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
- dead_check $t $n $dead $clean $other
+ dead_check $t $n $timeout $dead $clean $other
}
}
fileremove -f $testdir/dd.out
# Remove log files
for { set i 0 } { $i < $n } { incr i } {
- fileremove -f $testdir/dead002.log.$i
+ fileremove -f $testdir/dead$tnum.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
diff --git a/bdb/test/dead003.tcl b/bdb/test/dead003.tcl
index 4075eb44f86..48088e1427c 100644
--- a/bdb/test/dead003.tcl
+++ b/bdb/test/dead003.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: dead003.tcl,v 1.8 2000/08/25 14:21:50 sue Exp $
+# $Id: dead003.tcl,v 1.17 2002/09/05 17:23:05 sandstro Exp $
#
-# Deadlock Test 3.
-# Test DB_LOCK_OLDEST and DB_LOCK_YOUNGEST
-# Identical to Test 2 except that we create the region with "detect on
-# every wait" with first the "oldest" and then "youngest".
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
set detects { oldest youngest }
puts "Dead003: Deadlock detector tests: $detects"
@@ -19,31 +21,34 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
foreach d $detects {
env_cleanup $testdir
puts "\tDead003.a: creating environment for $d"
- set env [berkdb env \
+ set env [berkdb_env \
-create -mode 0644 -home $testdir -lock -lock_detect $d]
error_check_good lock_env:open [is_valid_env $env] TRUE
- error_check_good lock_env:close [$env close] 0
foreach t $tests {
- set pidlist ""
foreach n $procs {
- sentinel_init
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
# Fire off the tests
puts "\tDead003: $n procs of test $t"
for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
puts "$tclsh_path\
test_path/ddscript.tcl $testdir \
- $t $i $i $n >& \
+ $t $locker $i $n >& \
$testdir/dead003.log.$i"
set p [exec $tclsh_path \
$test_path/wrap.tcl \
ddscript.tcl \
$testdir/dead003.log.$i $testdir \
- $t $i $i $n &]
+ $t $locker $i $n &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
# Now check output
set dead 0
@@ -60,7 +65,7 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
}
close $did
}
- dead_check $t $n $dead $clean $other
+ dead_check $t $n 0 $dead $clean $other
#
# If we get here we know we have the
# correct number of dead/clean procs, as
@@ -88,5 +93,6 @@ proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
for { set i 0 } { $i < $n } { incr i } {
fileremove -f $testdir/dead003.log.$i
}
+ error_check_good lock_env:close [$env close] 0
}
}
diff --git a/bdb/test/dead004.tcl b/bdb/test/dead004.tcl
new file mode 100644
index 00000000000..f5306a0d892
--- /dev/null
+++ b/bdb/test/dead004.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead004.tcl,v 11.11 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead004: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead004.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead004: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead004.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead004.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead004.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead004.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead004.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead004.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/bdb/test/dead005.tcl b/bdb/test/dead005.tcl
new file mode 100644
index 00000000000..71be8b1713f
--- /dev/null
+++ b/bdb/test/dead005.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead005.tcl,v 11.10 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
+ source ./include.tcl
+
+ puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
+ foreach t $tests {
+ puts "Dead005.$t: creating environment"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ minlocks { set to n }
+ maxlocks { set to m }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead005: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead005.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead005.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead005.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ minlocks {set f 0}
+ minwrites {set f 1}
+ maxlocks {set f [expr $n - 1]}
+ }
+ set did [open $testdir/dead005.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/bdb/test/dead006.tcl b/bdb/test/dead006.tcl
new file mode 100644
index 00000000000..b70e011fb74
--- /dev/null
+++ b/bdb/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead006.tcl,v 1.4 2002/01/11 15:53:21 bostic Exp $
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/bdb/test/dead007.tcl b/bdb/test/dead007.tcl
new file mode 100644
index 00000000000..2b6a78cb4b9
--- /dev/null
+++ b/bdb/test/dead007.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead007.tcl,v 1.3 2002/01/11 15:53:22 bostic Exp $
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/bdb/test/env001.tcl b/bdb/test/env001.tcl
index 00837330193..781029f6a5c 100644
--- a/bdb/test/env001.tcl
+++ b/bdb/test/env001.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env001.tcl,v 11.21 2000/11/09 19:24:08 sue Exp $
+# $Id: env001.tcl,v 11.26 2002/05/08 19:01:43 margo Exp $
#
-# Test of env remove interface.
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
proc env001 { } {
global errorInfo
global errorCode
@@ -20,12 +21,12 @@ proc env001 { } {
# Try opening without Create flag should error
puts "\tEnv001.a: Open without create (should fail)."
- catch {set env [berkdb env -home $testdir]} ret
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
error_check_good env:fail [is_substr $ret "no such file"] 1
# Now try opening with create
puts "\tEnv001.b: Open with create."
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@@ -40,7 +41,7 @@ proc env001 { } {
puts "\tEnv001.d: Remove on closed environments."
if { $is_windows_test != 1 } {
puts "\t\tEnv001.d.1: Verify re-open."
- set env [berkdb env -home $testdir]
+ set env [berkdb_env -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
@@ -56,7 +57,7 @@ proc env001 { } {
puts "\tEnv001.e: Remove on open environments."
puts "\t\tEnv001.e.1: Env is open by single proc,\
remove no force."
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -home $testdir} ret]
@@ -68,7 +69,7 @@ proc env001 { } {
"\t\tEnv001.e.2: Env is open by single proc, remove with force."
# Now that envremove doesn't do a close, this won't work on Windows.
if { $is_windows_test != 1 && $is_hp_test != 1} {
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set stat [catch {berkdb envremove -force -home $testdir} ret]
@@ -77,19 +78,22 @@ proc env001 { } {
# Even though the underlying env is gone, we need to close
# the handle.
#
- catch {$env close}
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
}
puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
# should fail
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
- set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
# First close our env, but leave remote open
error_check_good env:close [$env close] 0
@@ -110,13 +114,13 @@ proc env001 { } {
# are open, so we skip this test for Windows. On UNIX, it should
# succeed
if { $is_windows_test != 1 && $is_hp_test != 1 } {
- set env [berkdb env -create -mode 0644 -home $testdir]
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
error_check_bad env:$testdir $env NULL
error_check_good env:$testdir [is_substr $env "env"] 1
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
- set remote_env [send_cmd $f1 "berkdb env -home $testdir"]
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
error_check_good remote:env_open [is_valid_env $remote_env] TRUE
catch {berkdb envremove -force -home $testdir} ret
@@ -124,7 +128,10 @@ proc env001 { } {
#
# We still need to close our handle.
#
- catch {$env close} ret
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
# Close down remote process
set err [catch { close $f1 } result]
@@ -137,7 +144,7 @@ proc env001 { } {
file mkdir $testdir/NEWDIR
}
set eflags "-create -home $testdir/NEWDIR -mode 0644"
- set env [eval {berkdb env} $eflags]
+ set env [eval {berkdb_env} $eflags]
error_check_bad env:open $env NULL
error_check_good env:close [$env close] 0
error_check_good berkdb:envremove \
diff --git a/bdb/test/env002.tcl b/bdb/test/env002.tcl
index a37ddea17a9..89c44f63a12 100644
--- a/bdb/test/env002.tcl
+++ b/bdb/test/env002.tcl
@@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env002.tcl,v 11.11 2000/08/25 14:21:50 sue Exp $
+# $Id: env002.tcl,v 11.15 2002/02/20 16:35:20 sandstro Exp $
#
-# Env Test 002
-# Test set_lg_dir and env name resolution
-# With an environment path specified using -home, and then again
-# with it specified by the environment variable DB_HOME:
-# 1) Make sure that the set_lg_dir option is respected
-# a) as a relative pathname.
-# b) as an absolute pathname.
-# 2) Make sure that the DB_LOG_DIR db_config argument is respected,
-# again as relative and absolute pathnames.
-# 3) Make sure that if -both- db_config and a file are present,
-# only the file is respected (see doc/env/naming.html).
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
proc env002 { } {
# env002 is essentially just a small driver that runs
# env002_body--formerly the entire test--twice; once, it
@@ -30,7 +30,7 @@ proc env002 { } {
puts "Env002: set_lg_dir test."
- puts "\tEnv002: Running with -home argument to berkdb env."
+ puts "\tEnv002: Running with -home argument to berkdb_env."
env002_body "-home $testdir"
puts "\tEnv002: Running with environment variable DB_HOME set."
@@ -125,8 +125,8 @@ proc env002_run_test { major minor msg env_args log_path} {
# Create an environment, with logging, and scribble some
# stuff in a [btree] database in it.
- # puts [concat {berkdb env -create -log -private} $env_args]
- set dbenv [eval {berkdb env -create -log -private} $env_args]
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
error_check_good env_open [is_valid_env $dbenv] TRUE
set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
error_check_good db_open [is_valid_db $db] TRUE
diff --git a/bdb/test/env003.tcl b/bdb/test/env003.tcl
index 01e0b6188fc..c16b54dd5e0 100644
--- a/bdb/test/env003.tcl
+++ b/bdb/test/env003.tcl
@@ -1,21 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env003.tcl,v 11.12 2000/08/25 14:21:50 sue Exp $
+# $Id: env003.tcl,v 11.21 2002/08/08 15:38:06 bostic Exp $
#
-# Env Test 003
-# Test DB_TMP_DIR and env name resolution
-# With an environment path specified using -home, and then again
-# with it specified by the environment variable DB_HOME:
-# 1) Make sure that the DB_TMP_DIR config file option is respected
-# a) as a relative pathname.
-# b) as an absolute pathname.
-# 2) Make sure that the DB_TMP_DIR db_config argument is respected,
-# again as relative and absolute pathnames.
-# 3) Make sure that if -both- db_config and a file are present,
-# only the file is respected (see doc/env/naming.html).
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
proc env003 { } {
# env003 is essentially just a small driver that runs
# env003_body twice. First, it supplies a "home" argument
@@ -29,7 +29,7 @@ proc env003 { } {
puts "Env003: DB_TMP_DIR test."
- puts "\tEnv003: Running with -home argument to berkdb env."
+ puts "\tEnv003: Running with -home argument to berkdb_env."
env003_body "-home $testdir"
puts "\tEnv003: Running with environment variable DB_HOME set."
@@ -44,7 +44,6 @@ proc env003 { } {
set env(DB_HOME) $testdir/bogus_home
env003_body "-use_environ -home $testdir"
unset env(DB_HOME)
-
}
proc env003_body { home_arg } {
@@ -52,7 +51,6 @@ proc env003_body { home_arg } {
env_cleanup $testdir
set tmpdir "tmpfiles_in_here"
-
file mkdir $testdir/$tmpdir
# Set up full path to $tmpdir for when we test absolute paths.
@@ -61,63 +59,44 @@ proc env003_body { home_arg } {
set fulltmpdir [pwd]
cd $curdir
- # Run test with the temp dir. nonexistent--it checks for failure.
- env_cleanup $testdir
-
+ # Create DB_CONFIG
env003_make_config $tmpdir
# Run the meat of the test.
env003_run_test a 1 "relative path, config file" $home_arg \
$testdir/$tmpdir
- env_cleanup $testdir
-
env003_make_config $fulltmpdir
# Run the test again
env003_run_test a 2 "absolute path, config file" $home_arg \
$fulltmpdir
- env_cleanup $testdir
-
# Now we try without a config file, but instead with db_config
# relative paths
env003_run_test b 1 "relative path, db_config" "$home_arg \
-tmp_dir $tmpdir -data_dir ." \
$testdir/$tmpdir
- env_cleanup $testdir
-
- # absolute
+ # absolute paths
env003_run_test b 2 "absolute path, db_config" "$home_arg \
-tmp_dir $fulltmpdir -data_dir ." \
$fulltmpdir
- env_cleanup $testdir
-
# Now, set db_config -and- have a # DB_CONFIG file, and make
# sure only the latter is honored.
- # Make a temp directory that actually does exist to supply
- # as a bogus argument--the test checks for -nonexistent- temp
- # dirs., as success is harder to detect.
file mkdir $testdir/bogus
env003_make_config $tmpdir
- # note that we supply an -existent- tmp dir to db_config as
- # a red herring
env003_run_test c 1 "relative path, both db_config and file" \
"$home_arg -tmp_dir $testdir/bogus -data_dir ." \
$testdir/$tmpdir
- env_cleanup $testdir
- file mkdir $fulltmpdir
file mkdir $fulltmpdir/bogus
- env003_make_config $fulltmpdir/nonexistent
+ env003_make_config $fulltmpdir
- # note that we supply an -existent- tmp dir to db_config as
- # a red herring
- env003_run_test c 2 "relative path, both db_config and file" \
+ env003_run_test c 2 "absolute path, both db_config and file" \
"$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
$fulltmpdir
}
@@ -131,40 +110,33 @@ proc env003_run_test { major minor msg env_args tmp_path} {
# Create an environment and small-cached in-memory database to
# use.
- set dbenv [eval {berkdb env -create -home $testdir} $env_args \
- {-cachesize {0 40960 1}}]
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
error_check_good env_open [is_valid_env $dbenv] TRUE
- set db [berkdb_open_noerr -env $dbenv -create -btree]
+
+ set db [berkdb_open -env $dbenv -create -btree]
error_check_good db_open [is_valid_db $db] TRUE
# Fill the database with more than its cache can fit.
- # !!!
- # This is actually trickier than it sounds. The tempfile
- # gets unlinked as soon as it's created, so there's no straightforward
- # way to check for its existence. Instead, we make sure
- # DB_TMP_DIR points somewhere bogus, and make sure that the temp
- # dir. does -not- exist. But to do this, we have to know
- # which call to DB->put is going to fail--the temp file is
- # created lazily, so the failure only occurs when the cache finally
- # overflows.
- # The data we've conjured up will fit nicely once, but the second
- # call will overflow the cache. Thus we check for success once,
- # then failure.
#
- set key1 "key1"
- set key2 "key2"
- set data [repeat $alphabet 1000]
-
- # First put should succeed.
- error_check_good db_put_1 [$db put $key1 $data] 0
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
- # Second one should return ENOENT.
- set errorCode NONE
- catch {$db put $key2 $data} res
- error_check_good db_put_2 [is_substr $errorCode ENOENT] 1
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+ # Can't remove temp file until db is closed on Windows.
error_check_good db_close [$db close] 0
+ fileremove -f $ret
error_check_good env_close [$dbenv close] 0
+
}
proc env003_make_config { tmpdir } {
diff --git a/bdb/test/env004.tcl b/bdb/test/env004.tcl
index 82cc8dd25c7..e93a0d95308 100644
--- a/bdb/test/env004.tcl
+++ b/bdb/test/env004.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env004.tcl,v 11.14 2000/08/25 14:21:50 sue Exp $
+# $Id: env004.tcl,v 11.18 2002/02/20 17:08:21 sandstro Exp $
#
-# Env Test 4
-# Test multiple data directories. Do a bunch of different opens
-# to make sure that the files are detected in different directories.
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
proc env004 { } {
source ./include.tcl
@@ -38,19 +38,19 @@ proc env004 { } {
set fulldir [pwd]
cd $curdir
- set e [berkdb env -create -private -home $testdir]
+ set e [berkdb_env -create -private -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
ddir_test $fulldir $method $e $args
error_check_good env_close [$e close] 0
- puts "\tEnv004.b: Multiple data directories in berkdb env call."
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
env_cleanup $testdir
file mkdir $testdir/data1
file mkdir $testdir/data2
file mkdir $testdir/data3
# Now call dbenv with config specified
- set e [berkdb env -create -private \
+ set e [berkdb_env -create -private \
-data_dir . -data_dir data1 -data_dir data2 \
-data_dir data3 -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
diff --git a/bdb/test/env005.tcl b/bdb/test/env005.tcl
index 4ad9419936f..03bb1b40b34 100644
--- a/bdb/test/env005.tcl
+++ b/bdb/test/env005.tcl
@@ -1,14 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env005.tcl,v 11.8 2000/08/25 14:21:50 sue Exp $
+# $Id: env005.tcl,v 11.15 2002/02/22 14:28:37 sandstro Exp $
#
-# Env Test 5
-# Test that using subsystems without initializing them correctly
-# returns an error. Cannot test mpool, because it is assumed
-# in the Tcl code.
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
proc env005 { } {
source ./include.tcl
@@ -17,7 +17,7 @@ proc env005 { } {
env_cleanup $testdir
puts "\tEnv005.a: Creating env with no subsystems."
- set e [berkdb env -create -home $testdir]
+ set e [berkdb_env_noerr -create -home $testdir]
error_check_good dbenv [is_valid_env $e] TRUE
set db [berkdb_open -create -btree $testdir/env005.db]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -27,17 +27,17 @@ proc env005 { } {
{ "lock_get read 1 1" "Env005.b1"}
{ "lock_id" "Env005.b2"}
{ "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
{ "log_archive" "Env005.c0"}
- { "log_file {1 1}" "Env005.c1"}
- { "log_flush" "Env005.c2"}
- { "log_get -first" "Env005.c3"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
{ "log_put record" "Env005.c4"}
- { "log_register $db xxx" "Env005.c5"}
- { "log_stat" "Env005.c6"}
- { "log_unregister $db" "Env005.c7"}
+ { "log_stat" "Env005.c5"}
{ "txn" "Env005.d0"}
{ "txn_checkpoint" "Env005.d1"}
{ "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
}
foreach pair $rlist {
diff --git a/bdb/test/env006.tcl b/bdb/test/env006.tcl
index 1a39886cafa..48fc6982772 100644
--- a/bdb/test/env006.tcl
+++ b/bdb/test/env006.tcl
@@ -1,14 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env006.tcl,v 11.5 2000/10/27 13:23:55 sue Exp $
-#
-# Env Test 6
-# DB Utility Check
-# Make sure that all the utilities exist and run.
+# $Id: env006.tcl,v 11.8 2002/01/11 15:53:23 bostic Exp $
#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
proc env006 { } {
source ./include.tcl
@@ -23,6 +21,8 @@ proc env006 { } {
{ "db_printlog" "Env006.f"}
{ "db_recover" "Env006.g"}
{ "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
}
foreach pair $rlist {
set cmd [lindex $pair 0]
diff --git a/bdb/test/env007.tcl b/bdb/test/env007.tcl
index b8ddea75c91..5748d2dbc89 100644
--- a/bdb/test/env007.tcl
+++ b/bdb/test/env007.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env007.tcl,v 11.5 2000/08/25 14:21:50 sue Exp $
+# $Id: env007.tcl,v 11.21 2002/08/12 20:49:36 sandstro Exp $
#
-# Env Test 007
-# Test various config file options.
-# 1) Make sure command line option is respected
-# 2) Make sure that config file option is respected
-# 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
-# method is used, only the file is respected.
+# TEST env007
+# TEST Test various DB_CONFIG config file options.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
proc env007 { } {
+ global errorInfo
+
# env007 is essentially just a small driver that runs
# env007_body twice. First, it supplies a "set" argument
# to use with environment opens, and the second time it sets
@@ -29,15 +32,19 @@ proc env007 { } {
set rlist {
{ " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
"txn_stat" "Max Txns"}
- { " -lock_max " "set_lk_max" "19" "31" "Env007.b: Lock Max"
- "lock_stat" "Max locks"}
- { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.c: Log Bsize"
+ { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
+ "lock_stat" "Maximum locks"}
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.d: Max Objects" "lock_stat" "Maximum objects"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
"log_stat" "Log record cache size"}
- { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.d: Log Max"
- "log_stat" "Maximum log file size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
+ "log_stat" "Current log file size"}
}
- set e "berkdb env -create -mode 0644 -home $testdir -log -lock -txn "
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
foreach item $rlist {
set envarg [lindex $item 0]
set configarg [lindex $item 1]
@@ -72,6 +79,122 @@ proc env007 { } {
env007_check $env $statcmd $statstr $configval
error_check_good envclose:2 [$env close] 0
}
+
+ #
+ # Test all options. For all config options, write it out
+ # to the file and make sure we can open the env. We cannot
+ # necessarily check via stat that it worked but this execs
+ # the config file code itself.
+ #
+ set cfglist {
+ { "set_cachesize" "0 1048576 0" }
+ { "set_data_dir" "." }
+ { "set_flags" "db_cdb_alldb" }
+ { "set_flags" "db_direct_db" }
+ { "set_flags" "db_direct_log" }
+ { "set_flags" "db_nolocking" }
+ { "set_flags" "db_nommap" }
+ { "set_flags" "db_nopanic" }
+ { "set_flags" "db_overwrite" }
+ { "set_flags" "db_region_init" }
+ { "set_flags" "db_txn_nosync" }
+ { "set_flags" "db_txn_write_nosync" }
+ { "set_flags" "db_yieldcpu" }
+ { "set_lg_bsize" "65536" }
+ { "set_lg_dir" "." }
+ { "set_lg_max" "8388608" }
+ { "set_lg_regionmax" "65536" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_lk_detect" "db_lock_expire" }
+ { "set_lk_detect" "db_lock_maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" }
+ { "set_lk_detect" "db_lock_oldest" }
+ { "set_lk_detect" "db_lock_random" }
+ { "set_lk_detect" "db_lock_youngest" }
+ { "set_lk_max" "50" }
+ { "set_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "29" }
+ { "set_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" }
+ { "set_mp_mmapsize" "12582912" }
+ { "set_region_init" "1" }
+ { "set_shm_key" "15" }
+ { "set_tas_spins" "15" }
+ { "set_tmp_dir" "." }
+ { "set_tx_max" "31" }
+ { "set_txn_timeout" "100" }
+ { "set_verbose" "db_verb_chkpoint" }
+ { "set_verbose" "db_verb_deadlock" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_waitsfor" }
+ }
+
+ puts "\tEnv007.g: Config file settings"
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set env [eval $e]
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max" "db_xxx" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+ puts "\tEnv007.h: Config value errors"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "incorrect arguments for name-value pair"] 1
+ }
+
+ puts "\tEnv007.i: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
}
proc env007_check { env statcmd statstr testval } {
diff --git a/bdb/test/env008.tcl b/bdb/test/env008.tcl
index 645f07f63d6..dccdb41f612 100644
--- a/bdb/test/env008.tcl
+++ b/bdb/test/env008.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: env008.tcl,v 11.2 2000/10/30 19:00:38 sue Exp $
+# $Id: env008.tcl,v 11.6 2002/02/22 14:29:34 sandstro Exp $
#
-# Test of env and subdirs.
+# TEST env008
+# TEST Test environments and subdirectories.
proc env008 { } {
global errorInfo
global errorCode
@@ -21,9 +22,8 @@ proc env008 { } {
puts "Env008: Test of environments and subdirectories."
- # Try opening without Create flag should error
puts "\tEnv008.a: Create env and db."
- set env [berkdb env -create -mode 0644 -home $testdir -txn]
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
error_check_good env [is_valid_env $env] TRUE
puts "\tEnv008.b: Remove db in subdir."
diff --git a/bdb/test/env009.tcl b/bdb/test/env009.tcl
new file mode 100644
index 00000000000..264d5e2dfec
--- /dev/null
+++ b/bdb/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env009.tcl,v 11.5 2002/08/12 20:40:36 sandstro Exp $
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/bdb/test/env010.tcl b/bdb/test/env010.tcl
new file mode 100644
index 00000000000..4444e34e439
--- /dev/null
+++ b/bdb/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env010.tcl,v 1.4 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/bdb/test/env011.tcl b/bdb/test/env011.tcl
new file mode 100644
index 00000000000..4061bb3fe51
--- /dev/null
+++ b/bdb/test/env011.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env011.tcl,v 1.2 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} -auto_commit "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} -auto_commit "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/bdb/test/hsearch.tcl b/bdb/test/hsearch.tcl
index 0afee7fb2de..afeed93f74e 100644
--- a/bdb/test/hsearch.tcl
+++ b/bdb/test/hsearch.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: hsearch.tcl,v 11.7 2000/08/25 14:21:50 sue Exp $
+# $Id: hsearch.tcl,v 11.9 2002/01/11 15:53:24 bostic Exp $
#
# Historic Hsearch interface test.
# Use the first 1000 entries from the dictionary.
diff --git a/bdb/test/join.tcl b/bdb/test/join.tcl
index ebf33b8cdf3..87b0d1fae58 100644
--- a/bdb/test/join.tcl
+++ b/bdb/test/join.tcl
@@ -1,19 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: join.tcl,v 11.17 2000/08/25 14:21:51 sue Exp $
+# $Id: join.tcl,v 11.21 2002/02/20 17:08:22 sandstro Exp $
#
-# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
-# everything else does as well. We'll create test databases called
-# join1.db, join2.db, join3.db, and join4.db. The number on the database
-# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
-# where N is the number of the database. Primary.db is the primary database,
-# and null.db is the database that has no matching duplicates.
-#
-# We should test this on all btrees, all hash, and a combination thereof
-# Join test.
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
global testdir
global rand_init
@@ -24,7 +28,7 @@ proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
# Use one environment for all database opens so we don't
# need oodles of regions.
- set env [berkdb env -create -home $testdir]
+ set env [berkdb_env -create -home $testdir]
error_check_good env_open [is_valid_env $env] TRUE
# With the new offpage duplicate code, we don't support
diff --git a/bdb/test/lock001.tcl b/bdb/test/lock001.tcl
index d571a987240..1afcc471fc1 100644
--- a/bdb/test/lock001.tcl
+++ b/bdb/test/lock001.tcl
@@ -1,67 +1,28 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock001.tcl,v 11.11 2000/08/25 14:21:51 sue Exp $
+# $Id: lock001.tcl,v 11.19 2002/04/25 19:30:28 sue Exp $
#
-# Test driver for lock tests.
-# General Multi Random
-# Options are:
-# -dir <directory in which to store mpool> Y Y Y
-# -iterations <iterations> Y N Y
-# -ldegree <number of locks per iteration> N N Y
-# -maxlocks <locks in table> Y Y Y
-# -objs <number of objects> N N Y
-# -procs <number of processes to run> N N Y
-# -reads <read ratio> N N Y
-# -seeds <list of seed values for processes> N N Y
-# -wait <wait interval after getting locks> N N Y
-# -conflicts <conflict matrix; a list of lists> Y Y Y
-proc lock_usage {} {
- puts stderr "randomlock\n\t-dir <dir>\n\t-iterations <iterations>"
- puts stderr "\t-conflicts <conflict matrix>"
- puts stderr "\t-ldegree <locks per iteration>\n\t-maxlocks <n>"
- puts stderr "\t-objs <objects>\n\t-procs <nprocs>\n\t-reads <%reads>"
- puts stderr "\t-seeds <list of seeds>\n\t-wait <max wait interval>"
- return
-}
-proc locktest { args } {
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} {maxlocks 1000} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
# Set defaults
# Adjusted to make exact match of isqrt
#set conflicts { 3 0 0 0 0 0 1 0 1 1}
#set conflicts { 3 0 0 0 0 1 0 1 1}
+
set conflicts { 0 0 0 0 0 1 0 1 1}
- set iterations 1000
- set ldegree 5
- set maxlocks 1000
- set objs 75
- set procs 5
- set reads 65
- set seeds {}
- set wait 5
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -c.* { incr i; set conflicts [linkdex $args $i] }
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -l.* { incr i; set ldegree [lindex $args $i] }
- -m.* { incr i; set maxlocks [lindex $args $i] }
- -o.* { incr i; set objs [lindex $args $i] }
- -p.* { incr i; set procs [lindex $args $i] }
- -r.* { incr i; set reads [lindex $args $i] }
- -s.* { incr i; set seeds [lindex $args $i] }
- -w.* { incr i; set wait [lindex $args $i] }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- lock_usage
- return
- }
- }
- }
set nmodes [isqrt [llength $conflicts]]
# Cleanup
@@ -70,26 +31,15 @@ proc locktest { args } {
# Open the region we'll use for testing.
set eflags "-create -lock -home $testdir -mode 0644 \
-lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
- set env [eval {berkdb env} $eflags]
- lock001 $env $iterations $nmodes
- reset_env $env
- env_cleanup $testdir
-
- lock002 $maxlocks $conflicts
-
- lock003 $testdir $iterations \
- $maxlocks $procs $ldegree $objs $reads $wait $conflicts $seeds
-}
-
-# Make sure that the basic lock tests work. Do some simple gets and puts for
-# a single locker.
-proc lock001 {env iter nmodes} {
- source ./include.tcl
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
puts "Lock001: test basic lock operations"
- set locker 999
+ set locker [$env lock_id]
# Get and release each type of lock
- puts "Lock001.a: get and release each type of lock"
+ puts "\tLock001.a: get and release each type of lock"
foreach m {ng write read} {
set obj obj$m
set lockp [$env lock_get $m $locker $obj]
@@ -101,7 +51,7 @@ proc lock001 {env iter nmodes} {
# Get a bunch of locks for the same locker; these should work
set obj OBJECT
- puts "Lock001.b: Get a bunch of locks for the same locker"
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
foreach m {ng write read} {
set lockp [$env lock_get $m $locker $obj ]
lappend locklist $lockp
@@ -112,7 +62,7 @@ proc lock001 {env iter nmodes} {
set locklist {}
# Check that reference counted locks work
- puts "Lock001.c: reference counted locks."
+ puts "\tLock001.c: reference counted locks."
for {set i 0} { $i < 10 } {incr i} {
set lockp [$env lock_get -nowait write $locker $obj]
error_check_good lock_get:c [is_blocked $lockp] 0
@@ -131,10 +81,10 @@ proc lock001 {env iter nmodes} {
}
# Change the locker
- set locker [incr locker]
+ set locker [$env lock_id]
set blocklist {}
# Skip NO_LOCK lock.
- puts "Lock001.e: Change the locker, acquire read and write."
+ puts "\tLock001.d: Change the locker, acquire read and write."
foreach i {write read} {
catch {$env lock_get -nowait $i $locker $obj} ret
error_check_good lock_get:e [is_substr $ret "not granted"] 1
@@ -146,7 +96,7 @@ proc lock001 {env iter nmodes} {
# Now re-acquire blocking locks
set locklist {}
- puts "Lock001.f: Re-acquire blocking locks."
+ puts "\tLock001.e: Re-acquire blocking locks."
foreach i {write read} {
set lockp [$env lock_get -nowait $i $locker $obj ]
error_check_good lock_get:f [is_substr $lockp $env] 1
@@ -156,8 +106,10 @@ proc lock001 {env iter nmodes} {
# Now release new locks
release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
- puts "Lock001 Complete."
}
# Blocked locks appear as lockmgrN.lockM\nBLOCKED
diff --git a/bdb/test/lock002.tcl b/bdb/test/lock002.tcl
index b433730b1e6..a1ad8760c9d 100644
--- a/bdb/test/lock002.tcl
+++ b/bdb/test/lock002.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock002.tcl,v 11.10 2000/08/25 14:21:51 sue Exp $
+# $Id: lock002.tcl,v 11.19 2002/04/25 19:30:29 sue Exp $
#
-# Exercise basic multi-process aspects of lock.
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
source ./include.tcl
@@ -24,22 +25,25 @@ proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
# detach from it, etc.
proc mlock_open { maxl nmodes conflicts } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
- puts "Lock002.a multi-process open/close test"
+ puts "\tLock002.a multi-process open/close test"
# Open/Create region here. Then close it and try to open from
# other test process.
- set env_cmd [concat "berkdb env -create -mode 0644 \
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
error_check_good env_open [is_valid_env $local_env] TRUE
set ret [$local_env close]
error_check_good env_close $ret 0
# Open from other test process
- set env_cmd "berkdb env -mode 0644 -home $testdir"
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
set f1 [open |$tclsh_path r+]
puts $f1 "source $test_path/test.tcl"
@@ -58,7 +62,7 @@ proc mlock_open { maxl nmodes conflicts } {
error_check_good remote:lock_close $ret 0
# Try opening for create. Will succeed because region exists.
- set env_cmd [concat "berkdb env -create -mode 0644 \
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
-lock -lock_max $maxl -lock_conflict" \
[list [list $nmodes $conflicts]] "-home $testdir"]
set local_env [eval $env_cmd]
@@ -76,10 +80,10 @@ proc mlock_open { maxl nmodes conflicts } {
proc mlock_wait { } {
source ./include.tcl
- puts "Lock002.b multi-process get/put wait test"
+ puts "\tLock002.b multi-process get/put wait test"
# Open region locally
- set env_cmd "berkdb env -lock -home $testdir"
+ set env_cmd "berkdb_env -lock -home $testdir"
set local_env [eval $env_cmd]
error_check_good env_open [is_valid_env $local_env] TRUE
@@ -95,15 +99,15 @@ proc mlock_wait { } {
# remotely. We hold the locks for several seconds
# so that we can use timestamps to figure out if the
# other process waited.
- set locker 1
- set local_lock [$local_env lock_get write $locker object1]
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
# Now request a lock that we expect to hang; generate
# timestamps so we can tell if it actually hangs.
- set locker 2
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
set remote_lock [send_timed_cmd $f1 1 \
- "set lock \[$remote_env lock_get write $locker object1\]"]
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
# Now sleep before releasing lock
tclsleep 5
@@ -127,8 +131,7 @@ proc mlock_wait { } {
set ret [send_cmd $f1 "$remote_lock put"]
- set locker 1
- set local_lock [$local_env lock_get write $locker object1]
+ set local_lock [$local_env lock_get write $locker1 object1]
error_check_good lock_get:time \
[expr [expr [timestamp -r] - $start] > 2] 1
error_check_good lock_get:local \
@@ -139,6 +142,8 @@ proc mlock_wait { } {
error_check_good lock_put:remote $result 0
# Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
set ret [send_cmd $f1 "reset_env $remote_env"]
close $f1
@@ -146,6 +151,7 @@ proc mlock_wait { } {
# Now close up locally
set ret [$local_lock put]
error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
reset_env $local_env
}
diff --git a/bdb/test/lock003.tcl b/bdb/test/lock003.tcl
index 539b6d0ff66..91a8a2e90f6 100644
--- a/bdb/test/lock003.tcl
+++ b/bdb/test/lock003.tcl
@@ -1,48 +1,99 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lock003.tcl,v 11.16 2000/08/25 14:21:51 sue Exp $
+# $Id: lock003.tcl,v 11.25 2002/09/05 17:23:06 sandstro Exp $
#
-# Exercise multi-process aspects of lock. Generate a bunch of parallel
-# testers that try to randomly obtain locks.
-proc lock003 { dir {iter 500} {max 1000} {procs 5} {ldegree 5} {objs 75} \
- {reads 65} {wait 1} {conflicts { 3 0 0 0 0 0 1 0 1 1}} {seeds {}} } {
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
puts "Lock003: Multi-process random lock test"
# Clean up after previous runs
- env_cleanup $dir
+ env_cleanup $testdir
# Open/create the lock region
- set e [berkdb env -create -lock -home $dir]
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
- set ret [$e close]
- error_check_good env_close $ret 0
+ error_check_good env_close [$e close] 0
# Now spawn off processes
set pidlist {}
+
for { set i 0 } {$i < $procs} {incr i} {
if { [llength $seeds] == $procs } {
set s [lindex $seeds $i]
}
- puts "$tclsh_path\
- $test_path/wrap.tcl \
- lockscript.tcl $dir/$i.lockout\
- $dir $iter $objs $wait $ldegree $reads &"
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
set p [exec $tclsh_path $test_path/wrap.tcl \
lockscript.tcl $testdir/lock003.$i.out \
- $dir $iter $objs $wait $ldegree $reads &]
+ $testdir $iter $objs $wait $ldegree $reads &]
lappend pidlist $p
}
- puts "Lock003: $procs independent processes now running"
- watch_procs 30 10800
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/lock003.*.out]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
# Remove log files
for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $dir/$i.lockout
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
}
}
+
diff --git a/bdb/test/lock004.tcl b/bdb/test/lock004.tcl
new file mode 100644
index 00000000000..7fd51ee42f2
--- /dev/null
+++ b/bdb/test/lock004.tcl
@@ -0,0 +1,29 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock004.tcl,v 11.5 2002/04/25 19:30:30 sue Exp $
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/bdb/test/lock005.tcl b/bdb/test/lock005.tcl
new file mode 100644
index 00000000000..5afe7344d36
--- /dev/null
+++ b/bdb/test/lock005.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock005.tcl,v 1.7 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "Lock not granted"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/bdb/test/lockscript.tcl b/bdb/test/lockscript.tcl
index bd07d80b54b..812339a4a70 100644
--- a/bdb/test/lockscript.tcl
+++ b/bdb/test/lockscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: lockscript.tcl,v 11.11 2000/03/24 19:53:39 krinsky Exp $
+# $Id: lockscript.tcl,v 11.17 2002/02/20 17:08:23 sandstro Exp $
#
# Random lock tester.
# Usage: lockscript dir numiters numobjs sleepint degree readratio
@@ -32,25 +32,28 @@ set numobjs [ lindex $argv 2 ]
set sleepint [ lindex $argv 3 ]
set degree [ lindex $argv 4 ]
set readratio [ lindex $argv 5 ]
-set locker [pid]
# Initialize random number generator
global rand_init
berkdb srand $rand_init
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
puts "$sleepint $degree $readratio"
flush stdout
-set e [berkdb env -create -lock -home $dir]
-error_check_good env_open [is_substr $e env] 1
-
for { set iter 0 } { $iter < $numiters } { incr iter } {
set nlocks [berkdb random_int 1 $degree]
# We will always lock objects in ascending order to avoid
# deadlocks.
set lastobj 1
set locklist {}
+ set objlist {}
for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
# Pick lock parameters
set obj [berkdb random_int $lastobj $numobjs]
@@ -61,20 +64,46 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
} else {
set rw write
}
- puts "[timestamp] $locker $lnum: $rw $obj"
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
# Do get; add to list
- set lockp [$e lock_get $rw $locker $obj]
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
lappend locklist $lockp
if {$lastobj > $numobjs} {
break
}
}
# Pick sleep interval
- tclsleep [berkdb random_int 1 $sleepint]
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
# Now release locks
- puts "[timestamp] $locker released locks"
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
release_list $locklist
flush stdout
}
@@ -82,7 +111,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set ret [$e close]
error_check_good env_close $ret 0
-puts "[timestamp] $locker Complete"
+puts "[timestamp -c] $locker Complete"
flush stdout
exit
diff --git a/bdb/test/log.tcl b/bdb/test/log.tcl
deleted file mode 100644
index c3802d0f971..00000000000
--- a/bdb/test/log.tcl
+++ /dev/null
@@ -1,337 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: log.tcl,v 11.17 2000/11/30 20:09:19 dda Exp $
-#
-# Options are:
-# -dir <directory in which to store memp>
-# -maxfilesize <maxsize of log file>
-# -iterations <iterations>
-# -stat
-proc log_usage {} {
- puts "log -dir <directory> -iterations <number of ops> \
- -maxfilesize <max size of log files> -stat"
-}
-proc logtest { args } {
- source ./include.tcl
- global rand_init
-
- # Set defaults
- set iterations 1000
- set maxfile [expr 1024 * 128]
- set dostat 0
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -m.* { incr i; set maxfile [lindex $args $i] }
- -s.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- log_usage
- return
- }
- }
- }
- set multi_log [expr 3 * $iterations]
-
- # Clean out old log if it existed
- puts "Unlinking log: error message OK"
- env_cleanup $testdir
-
- # Now run the various functionality tests
- berkdb srand $rand_init
-
- log001 $testdir $maxfile $iterations
- log001 $testdir $maxfile $multi_log
- log002 $testdir $maxfile
- log003 $testdir $maxfile
- log004 $testdir
-}
-
-proc log001 { dir max nrecs } {
- source ./include.tcl
-
- puts "Log001: Basic put/get test"
-
- env_cleanup $dir
-
- set env [berkdb env -log -create -home $dir \
- -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # We will write records to the log and make sure we can
- # read them back correctly. We'll use a standard pattern
- # repeated some number of times for each record.
-
- set lsn_list {}
- set rec_list {}
- puts "Log001.a: Writing $nrecs log records"
- for { set i 0 } { $i < $nrecs } { incr i } {
- set rec ""
- for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
- set rec $rec$i:logrec:$i
- }
- set lsn [$env log_put $rec]
- error_check_bad log_put [is_substr $lsn log_cmd] 1
- lappend lsn_list $lsn
- lappend rec_list $rec
- }
- puts "Log001.b: Retrieving log records sequentially (forward)"
- set i 0
- for { set grec [$env log_get -first] } { [llength $grec] != 0 } {
- set grec [$env log_get -next]} {
- error_check_good log_get:seq [lindex $grec 1] \
- [lindex $rec_list $i]
- incr i
- }
-
- puts "Log001.c: Retrieving log records sequentially (backward)"
- set i [llength $rec_list]
- for { set grec [$env log_get -last] } { [llength $grec] != 0 } {
- set grec [$env log_get -prev] } {
- incr i -1
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- }
-
- puts "Log001.d: Retrieving log records sequentially by LSN"
- set i 0
- foreach lsn $lsn_list {
- set grec [$env log_get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $i]
- incr i
- }
-
- puts "Log001.e: Retrieving log records randomly by LSN"
- set m [expr [llength $lsn_list] - 1]
- for { set i 0 } { $i < $nrecs } { incr i } {
- set recno [berkdb random_int 0 $m ]
- set lsn [lindex $lsn_list $recno]
- set grec [$env log_get -set $lsn]
- error_check_good \
- log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
- }
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
-
- puts "Log001 Complete"
-}
-
-proc log002 { dir {max 32768} } {
- source ./include.tcl
-
- puts "Log002: Multiple log test w/trunc, file, compare functionality"
-
- env_cleanup $dir
-
- set env [berkdb env -create -home $dir -mode 0644 -log -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # We'll record every hundred'th record for later use
- set info_list {}
-
- set i 0
- puts "Log002.a: Writing log records"
-
- for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
- set rec [random_data 120 0 0]
- set len [string length $rec]
- set lsn [$env log_put $rec]
-
- if { [expr $i % 100 ] == 0 } {
- lappend info_list [list $lsn $rec]
- }
- incr i
- }
-
- puts "Log002.b: Checking log_compare"
- set last {0 0}
- foreach p $info_list {
- set l [lindex $p 0]
- if { [llength $last] != 0 } {
- error_check_good \
- log_compare [$env log_compare $l $last] 1
- error_check_good \
- log_compare [$env log_compare $last $l] -1
- error_check_good \
- log_compare [$env log_compare $l $l] 0
- }
- set last $l
- }
-
- puts "Log002.c: Checking log_file"
- set flist [glob $dir/log*]
- foreach p $info_list {
-
- set lsn [lindex $p 0]
- set f [$env log_file $lsn]
-
- # Change all backslash separators on Windows to forward slash
- # separators, which is what the rest of the test suite expects.
- regsub -all {\\} $f {/} f
-
- error_check_bad log_file:$f [lsearch $flist $f] -1
- }
-
- puts "Log002.d: Verifying records"
- for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
- set p [lindex $info_list $i]
- set grec [$env log_get -set [lindex $p 0]]
- error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
- }
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
-
- puts "Log002 Complete"
-}
-
-proc log003 { dir {max 32768} } {
- source ./include.tcl
-
- puts "Log003: Verify log_flush behavior"
-
- env_cleanup $dir
- set short_rec "abcdefghijklmnopqrstuvwxyz"
- set long_rec [repeat $short_rec 200]
- set very_long_rec [repeat $long_rec 4]
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "Log003.a: Verify flush on [string length $rec] byte rec"
-
- set env [berkdb env -log -home $dir \
- -create -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set lsn [$env log_put $rec]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- set ret [$env log_flush $lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash"
- set ret [$env close]
- error_check_good log_env:close $ret 0
-
- # Now, remove the log region
- #set ret [berkdb envremove -home $dir]
- #error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb env -create -home $dir \
- -log -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set gotrec [$env log_get -first]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
- log_cleanup $dir
- }
-
- foreach rec "$short_rec $long_rec $very_long_rec" {
- puts "Log003.b: \
- Verify flush on non-last record [string length $rec]"
- set env [berkdb env \
- -create -log -home $dir -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- # Put 10 random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Put the record we are interested in
- set save_lsn [$env log_put $rec]
- error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
-
- # Put 10 more random records
- for { set i 0 } { $i < 10 } { incr i} {
- set r [random_data 450 0 0]
- set lsn [$env log_put $r]
- error_check_bad log_put [lindex $lsn 0] "ERROR:"
- }
-
- # Now check the flush
- set ret [$env log_flush $save_lsn]
- error_check_good log_flush $ret 0
-
- # Now, we want to crash the region and recheck. Closing the
- # log does not flush any records, so we'll use a close to
- # do the "crash"
-
- #
- # Now, close and remove the log region
- error_check_good env:close:$env [$env close] 0
- set ret [berkdb envremove -home $dir]
- error_check_good env:remove $ret 0
-
- # Re-open the log and try to read the record.
- set env [berkdb env \
- -home $dir -create -log -mode 0644 -log_max $max]
- error_check_bad log_env:$dir $env NULL
- error_check_good log:$dir [is_substr $env "env"] 1
-
- set gotrec [$env log_get -set $save_lsn]
- error_check_good lp_get [lindex $gotrec 1] $rec
-
- # Close and unlink the file
- error_check_good env:close:$env [$env close] 0
- error_check_good envremove:$dir [berkdb envremove -home $dir] 0
- log_cleanup $dir
- }
-
- puts "Log003 Complete"
-}
-
-# Make sure that if we do PREVs on a log, but the beginning of the
-# log has been truncated, we do the right thing.
-proc log004 { dir } {
- source ./include.tcl
-
- puts "Log004: Prev on log when beginning of log has been truncated."
- # Use archive test to populate log
- env_cleanup $dir
- puts "Log004.a: Call archive to populate log."
- archive
-
- # Delete all log files under 100
- puts "Log004.b: Delete all log files under 100."
- set ret [catch { glob $dir/log.00000000* } result]
- if { $ret == 0 } {
- eval fileremove -f $result
- }
-
- # Now open the log and get the first record and try a prev
- puts "Log004.c: Open truncated log, attempt to access missing portion."
- set myenv [berkdb env -create -log -home $dir]
- error_check_good log_open [is_substr $myenv "env"] 1
-
- set ret [$myenv log_get -first]
- error_check_bad log_get [llength $ret] 0
-
- # This should give DB_NOTFOUND which is a ret of length 0
- catch {$myenv log_get -prev} ret
- error_check_good log_get_prev [string length $ret] 0
-
- puts "Log004.d: Close log and environment."
- error_check_good log_close [$myenv close] 0
- puts "Log004 complete."
-}
diff --git a/bdb/test/log001.tcl b/bdb/test/log001.tcl
new file mode 100644
index 00000000000..87df780cb5a
--- /dev/null
+++ b/bdb/test/log001.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log001.tcl,v 11.29 2002/04/30 20:27:56 sue Exp $
+#
+
+# TEST log001
+# TEST Read/write log records.
+proc log001 { } {
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+ set max [expr 1024 * 128]
+ log001_body $max $iter 1
+ log001_body $max $iter 0
+ log001_body $max $iter 1 "-encryptaes $passwd"
+ log001_body $max $iter 0 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 1
+ log001_body $max [expr $iter * 15] 0
+ log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
+}
+
+proc log001_body { max nrecs fixedlength {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records "
+ if { $fixedlength == 1 } {
+ puts "(fixed-length $encargs)"
+ } else {
+ puts "(variable-length $encargs)"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/bdb/test/log002.tcl b/bdb/test/log002.tcl
new file mode 100644
index 00000000000..6e91f55398f
--- /dev/null
+++ b/bdb/test/log002.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log002.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+ set env [berkdb_env -create -home $testdir -mode 0644 \
+ -log -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/bdb/test/log003.tcl b/bdb/test/log003.tcl
new file mode 100644
index 00000000000..11297b59d50
--- /dev/null
+++ b/bdb/test/log003.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log003.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb_env \
+ -create -log -home $testdir -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env \
+ -home $testdir -create -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/bdb/test/log004.tcl b/bdb/test/log004.tcl
new file mode 100644
index 00000000000..66968a8c1b4
--- /dev/null
+++ b/bdb/test/log004.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log004.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/bdb/test/log005.tcl b/bdb/test/log005.tcl
new file mode 100644
index 00000000000..ab2ad703c55
--- /dev/null
+++ b/bdb/test/log005.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log005.tcl,v 11.1 2002/05/30 22:16:49 bostic Exp $
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+ source ./include.tcl
+
+ puts "Log005: Check that log file sizes can change."
+ env_cleanup $testdir
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set env [berkdb_env \
+ -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/bdb/test/logtrack.tcl b/bdb/test/logtrack.tcl
index cea4912e627..ad6b480b4e3 100644
--- a/bdb/test/logtrack.tcl
+++ b/bdb/test/logtrack.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: logtrack.tcl,v 11.6 2000/10/27 15:30:39 krinsky Exp $
+# $Id: logtrack.tcl,v 11.11 2002/09/03 16:44:37 sue Exp $
#
# logtrack.tcl: A collection of routines, formerly implemented in Perl
# as log.pl, to track which log record types the test suite hits.
@@ -35,20 +35,26 @@ proc logtrack_init { } {
# records were seen.
proc logtrack_read { dirname } {
global ltsname tmpname util_path
+ global encrypt passwd
set seendb [berkdb_open $ltsname]
error_check_good seendb_open [is_valid_db $seendb] TRUE
file delete -force $tmpname
- set ret [catch {exec $util_path/db_printlog -N \
- -h "$dirname" > $tmpname} res]
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
error_check_good printlog $ret 0
error_check_good tmpfile_exists [file exists $tmpname] 1
set f [open $tmpname r]
while { [gets $f record] >= 0 } {
- regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name
- error_check_good seendb_put [$seendb put $name ""] 0
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
}
close $f
file delete -force $tmpname
@@ -73,7 +79,7 @@ proc logtrack_summary { } {
set pref ""
while { [gets $f line] >= 0 } {
# Get the keyword, the first thing on the line:
- # BEGIN/DEPRECATED/PREFIX
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
set keyword [lindex $line 0]
if { [string compare $keyword PREFIX] == 0 } {
@@ -92,7 +98,8 @@ proc logtrack_summary { } {
error_check_good exist_put [$existdb put \
${pref}_[lindex $line 1] ""] 0
- } elseif { [string compare $keyword DEPRECATED] == 0 } {
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
error_check_good deprec_put [$deprecdb put \
${pref}_[lindex $line 1] ""] 0
}
diff --git a/bdb/test/mdbscript.tcl b/bdb/test/mdbscript.tcl
index 368aad371b2..9f3c971ee3c 100644
--- a/bdb/test/mdbscript.tcl
+++ b/bdb/test/mdbscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mdbscript.tcl,v 11.23 2000/10/09 02:26:11 krinsky Exp $
+# $Id: mdbscript.tcl,v 11.29 2002/03/22 21:43:06 krinsky Exp $
#
# Process script for the multi-process db tester.
@@ -78,12 +78,18 @@ puts "$procid process id"
puts "$procs processes"
set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
flush stdout
-set dbenv [berkdb env -create -cdb -home $dir]
-#set dbenv [berkdb env -create -cdb -log -home $dir]
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
error_check_good dbenv [is_valid_env $dbenv] TRUE
+set locker [ $dbenv lock_id ]
+
set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -96,6 +102,7 @@ tclsleep 5
proc get_lock { k } {
global dbenv
global procid
+ global locker
global klock
global DB_LOCK_WRITE
global DB_LOCK_NOWAIT
@@ -103,7 +110,7 @@ proc get_lock { k } {
global exception_handled
# Make sure that the key isn't in the middle of
# a delete operation
- if {[catch {$dbenv lock_get -nowait write $procid $k} klock] != 0 } {
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
set exception_handled 1
error_check_good \
@@ -136,7 +143,7 @@ set dlen [string length $datastr]
for { set i 0 } { $i < $iter } { incr i } {
set op [berkdb random_int 0 5]
puts "iteration $i operation $op"
- flush stdout
+ set close_cursor 0
if {[catch {
switch $op {
0 {
@@ -337,7 +344,6 @@ for { set i 0 } { $i < $iter } { incr i } {
set fnl [string first "\n" $errorInfo]
set theError [string range $errorInfo 0 [expr $fnl - 1]]
- flush stdout
if { [string compare $klock NOLOCK] != 0 } {
catch {$klock put}
}
@@ -348,11 +354,11 @@ for { set i 0 } { $i < $iter } { incr i } {
if {[string first FAIL $theError] == 0 && \
$exception_handled != 1} {
+ flush stdout
error "FAIL:[timestamp] test042: key $k: $theError"
}
set exception_handled 0
} else {
- flush stdout
if { [string compare $klock NOLOCK] != 0 } {
error_check_good "$klock put" [$klock put] 0
set klock NOLOCK
@@ -360,14 +366,11 @@ for { set i 0 } { $i < $iter } { incr i } {
}
}
-if {[catch {$db close} ret] != 0 } {
- error_check_good close [is_substr $errorInfo "DB_INCOMPLETE"] 1
- puts "Warning: sync incomplete on close ([pid])"
-} else {
- error_check_good close $ret 0
-}
-$dbenv close
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+flush stdout
exit
puts "[timestamp] [pid] Complete"
diff --git a/bdb/test/memp001.tcl b/bdb/test/memp001.tcl
new file mode 100644
index 00000000000..c4bbf99b9b2
--- /dev/null
+++ b/bdb/test/memp001.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp001.tcl,v 11.50 2002/08/07 16:46:28 bostic Exp $
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/bdb/test/memp002.tcl b/bdb/test/memp002.tcl
new file mode 100644
index 00000000000..d55f2987f06
--- /dev/null
+++ b/bdb/test/memp002.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp002.tcl,v 11.47 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/bdb/test/memp003.tcl b/bdb/test/memp003.tcl
new file mode 100644
index 00000000000..31eb55b757c
--- /dev/null
+++ b/bdb/test/memp003.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp003.tcl,v 11.46 2002/04/30 17:26:06 sue Exp $
+#
+
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/bdb/test/mpool.tcl b/bdb/test/mpool.tcl
deleted file mode 100644
index b2eb2252037..00000000000
--- a/bdb/test/mpool.tcl
+++ /dev/null
@@ -1,420 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mpool.tcl,v 11.34 2001/01/18 04:58:07 krinsky Exp $
-#
-# Options are:
-# -cachesize {gbytes bytes ncache}
-# -nfiles <files>
-# -iterations <iterations>
-# -pagesize <page size in bytes>
-# -dir <directory in which to store memp>
-# -stat
-proc memp_usage {} {
- puts "memp -cachesize {gbytes bytes ncache}"
- puts "\t-nfiles <files>"
- puts "\t-iterations <iterations>"
- puts "\t-pagesize <page size in bytes>"
- puts "\t-dir <memp directory>"
- puts "\t-mem {private system}"
- return
-}
-
-proc mpool { args } {
- source ./include.tcl
- global errorCode
-
- puts "mpool {$args} running"
- # Set defaults
- set cachearg " -cachesize {0 200000 3}"
- set nfiles 5
- set iterations 500
- set pagesize "512 1024 2048 4096 8192"
- set npages 100
- set procs 4
- set seeds ""
- set shm_key 1
- set dostat 0
- set flags ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -c.* {
- incr i
- set cachesize [lindex $args $i]
- set cachearg " -cachesize $cachesize"
- }
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -me.* {
- incr i
- if { [string \
- compare [lindex $args $i] private] == 0 } {
- set flags -private
- } elseif { [string \
- compare [lindex $args $i] system] == 0 } {
- #
- # We need to use a shm id. Use one
- # that is the same each time so that
- # we do not grow segments infinitely.
- set flags "-system_mem -shm_key $shm_key"
- } else {
- puts -nonewline \
- "FAIL:[timestamp] Usage: "
- memp_usage
- return
- }
- }
- -nf.* { incr i; set nfiles [lindex $args $i] }
- -np.* { incr i; set npages [lindex $args $i] }
- -pa.* { incr i; set pagesize [lindex $args $i] }
- -pr.* { incr i; set procs [lindex $args $i] }
- -se.* { incr i; set seeds [lindex $args $i] }
- -st.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- memp_usage
- return
- }
- }
- }
-
- # Clean out old directory
- env_cleanup $testdir
-
- # Open the memp with region init specified
- set ret [catch {eval {berkdb env -create -mode 0644}\
- $cachearg {-region_init -home $testdir} $flags} res]
- if { $ret == 0 } {
- set env $res
- } else {
- # If the env open failed, it may be because we're on a platform
- # such as HP-UX 10 that won't support mutexes in shmget memory.
- # Or QNX, which doesn't support system memory at all.
- # Verify that the return value was EINVAL or EOPNOTSUPP
- # and bail gracefully.
- error_check_good is_shm_test [is_substr $flags -system_mem] 1
- error_check_good returned_error [expr \
- [is_substr $errorCode EINVAL] || \
- [is_substr $errorCode EOPNOTSUPP]] 1
- puts "Warning:\
- platform does not support mutexes in shmget memory."
- puts "Skipping shared memory mpool test."
- return
- }
- error_check_good env_open [is_substr $env env] 1
-
- reset_env $env
- env_cleanup $testdir
-
- # Now open without region init
- set env [eval {berkdb env -create -mode 0644}\
- $cachearg {-home $testdir} $flags]
- error_check_good evn_open [is_substr $env env] 1
-
- memp001 $env \
- $testdir $nfiles $iterations [lindex $pagesize 0] $dostat $flags
- reset_env $env
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
- env_cleanup $testdir
-
- memp002 $testdir \
- $procs $pagesize $iterations $npages $seeds $dostat $flags
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
- env_cleanup $testdir
-
- memp003 $testdir $iterations $flags
- set ret [berkdb envremove -home $testdir]
- error_check_good env_remove $ret 0
-
- env_cleanup $testdir
-}
-
-proc memp001 {env dir n iter psize dostat flags} {
- source ./include.tcl
- global rand_init
-
- puts "Memp001: {$flags} random update $iter iterations on $n files."
-
- # Open N memp files
- for {set i 1} {$i <= $n} {incr i} {
- set fname "data_file.$i"
- file_create $dir/$fname 50 $psize
-
- set mpools($i) \
- [$env mpool -create -pagesize $psize -mode 0644 $fname]
- error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
- }
-
- # Now, loop, picking files at random
- berkdb srand $rand_init
- for {set i 0} {$i < $iter} {incr i} {
- set mpool $mpools([berkdb random_int 1 $n])
- set p1 [get_range $mpool 10]
- set p2 [get_range $mpool 10]
- set p3 [get_range $mpool 10]
- set p1 [replace $mpool $p1]
- set p3 [replace $mpool $p3]
- set p4 [get_range $mpool 20]
- set p4 [replace $mpool $p4]
- set p5 [get_range $mpool 10]
- set p6 [get_range $mpool 20]
- set p7 [get_range $mpool 10]
- set p8 [get_range $mpool 20]
- set p5 [replace $mpool $p5]
- set p6 [replace $mpool $p6]
- set p9 [get_range $mpool 40]
- set p9 [replace $mpool $p9]
- set p10 [get_range $mpool 40]
- set p7 [replace $mpool $p7]
- set p8 [replace $mpool $p8]
- set p9 [replace $mpool $p9]
- set p10 [replace $mpool $p10]
- }
-
- if { $dostat == 1 } {
- puts [$env mpool_stat]
- for {set i 1} {$i <= $n} {incr i} {
- error_check_good mp_sync [$mpools($i) fsync] 0
- }
- }
-
- # Close N memp files
- for {set i 1} {$i <= $n} {incr i} {
- error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
- fileremove -f $dir/data_file.$i
- }
-}
-
-proc file_create { fname nblocks blocksize } {
- set fid [open $fname w]
- for {set i 0} {$i < $nblocks} {incr i} {
- seek $fid [expr $i * $blocksize] start
- puts -nonewline $fid $i
- }
- seek $fid [expr $nblocks * $blocksize - 1]
-
- # We don't end the file with a newline, because some platforms (like
- # Windows) emit CR/NL. There does not appear to be a BINARY open flag
- # that prevents this.
- puts -nonewline $fid "Z"
- close $fid
-
- # Make sure it worked
- if { [file size $fname] != $nblocks * $blocksize } {
- error "FAIL: file_create could not create correct file size"
- }
-}
-
-proc get_range { mpool max } {
- set pno [berkdb random_int 0 $max]
- set p [$mpool get $pno]
- error_check_good page [is_valid_page $p $mpool] TRUE
- set got [$p pgnum]
- if { $got != $pno } {
- puts "Get_range: Page mismatch page |$pno| val |$got|"
- }
- set ret [$p init "Page is pinned by [pid]"]
- error_check_good page_init $ret 0
-
- return $p
-}
-
-proc replace { mpool p } {
- set pgno [$p pgnum]
-
- set ret [$p init "Page is unpinned by [pid]"]
- error_check_good page_init $ret 0
-
- set ret [$p put -dirty]
- error_check_good page_put $ret 0
-
- set p2 [$mpool get $pgno]
- error_check_good page [is_valid_page $p2 $mpool] TRUE
-
- return $p2
-}
-
-proc memp002 { dir procs psizes iterations npages seeds dostat flags } {
- source ./include.tcl
-
- puts "Memp002: {$flags} Multiprocess mpool tester"
-
- if { [is_substr $flags -private] != 0 } {
- puts "Memp002 skipping\
- multiple processes not supported by private memory"
- return
- }
- set iter [expr $iterations / $procs]
-
- # Clean up old stuff and create new.
- env_cleanup $dir
-
- for { set i 0 } { $i < [llength $psizes] } { incr i } {
- fileremove -f $dir/file$i
- }
- set e [eval {berkdb env -create -lock -home $dir} $flags]
- error_check_good dbenv [is_valid_widget $e env] TRUE
-
- set pidlist {}
- for { set i 0 } { $i < $procs } {incr i} {
- if { [llength $seeds] == $procs } {
- set seed [lindex $seeds $i]
- } else {
- set seed -1
- }
-
- puts "$tclsh_path\
- $test_path/mpoolscript.tcl $dir $i $procs \
- $iter $psizes $npages 3 $flags > \
- $dir/memp002.$i.out &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mpoolscript.tcl $dir/memp002.$i.out $dir $i $procs \
- $iter $psizes $npages 3 $flags &]
- lappend pidlist $p
- }
- puts "Memp002: $procs independent processes now running"
- watch_procs
-
- reset_env $e
-}
-
-# Test reader-only/writer process combinations; we use the access methods
-# for testing.
-proc memp003 { dir {nentries 10000} flags } {
- global alphabet
- source ./include.tcl
-
- puts "Memp003: {$flags} Reader/Writer tests"
-
- if { [is_substr $flags -private] != 0 } {
- puts "Memp003 skipping\
- multiple processes not supported by private memory"
- return
- }
-
- env_cleanup $dir
- set psize 1024
- set testfile mpool.db
- set t1 $dir/t1
-
- # Create an environment that the two processes can share
- set c [list 0 [expr $psize * 10] 3]
- set dbenv [eval {berkdb env \
- -create -lock -home $dir -cachesize $c} $flags]
- error_check_good dbenv [is_valid_env $dbenv] TRUE
-
- # First open and create the file.
-
- set db [berkdb_open -env $dbenv -create -truncate \
- -mode 0644 -pagesize $psize -btree $testfile]
- error_check_good dbopen/RW [is_valid_db $db] TRUE
-
- set did [open $dict]
- set txn ""
- set count 0
-
- puts "\tMemp003.a: create database"
- set keys ""
- # Here is the loop where we put and get each key/data pair
- while { [gets $did str] != -1 && $count < $nentries } {
- lappend keys $str
-
- set ret [eval {$db put} $txn {$str $str}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $txn {$str}]
- error_check_good get $ret [list [list $str $str]]
-
- incr count
- }
- close $did
- error_check_good close [$db close] 0
-
- # Now open the file for read-only
- set db [berkdb_open -env $dbenv -rdonly $testfile]
- error_check_good dbopen/RO [is_substr $db db] 1
-
- puts "\tMemp003.b: verify a few keys"
- # Read and verify a couple of keys; saving them to check later
- set testset ""
- for { set i 0 } { $i < 10 } { incr i } {
- set ndx [berkdb random_int 0 [expr $nentries - 1]]
- set key [lindex $keys $ndx]
- if { [lsearch $testset $key] != -1 } {
- incr i -1
- continue;
- }
-
- # The remote process stuff is unhappy with
- # zero-length keys; make sure we don't pick one.
- if { [llength $key] == 0 } {
- incr i -1
- continue
- }
-
- lappend testset $key
-
- set ret [eval {$db get} $txn {$key}]
- error_check_good get/RO $ret [list [list $key $key]]
- }
-
- puts "\tMemp003.c: retrieve and modify keys in remote process"
- # Now open remote process where we will open the file RW
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- puts $f1 "flush stdout"
- flush $f1
-
- set c [concat "{" [list 0 [expr $psize * 10] 3] "}" ]
- set remote_env [send_cmd $f1 \
- "berkdb env -create -lock -home $dir -cachesize $c $flags"]
- error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
-
- set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
- error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
-
- foreach k $testset {
- # Get the key
- set ret [send_cmd $f1 "$remote_db get $k"]
- error_check_good remote_get $ret [list [list $k $k]]
-
- # Now replace the key
- set ret [send_cmd $f1 "$remote_db put $k $k$k"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.d: verify changes in local process"
- foreach k $testset {
- set ret [eval {$db get} $txn {$key}]
- error_check_good get_verify/RO $ret [list [list $key $key$key]]
- }
-
- puts "\tMemp003.e: Fill up the cache with dirty buffers"
- foreach k $testset {
- # Now rewrite the keys with BIG data
- set data [replicate $alphabet 32]
- set ret [send_cmd $f1 "$remote_db put $k $data"]
- error_check_good remote_put $ret 0
- }
-
- puts "\tMemp003.f: Get more pages for the read-only file"
- dump_file $db $txn $t1 nop
-
- puts "\tMemp003.g: Sync from the read-only file"
- error_check_good db_sync [$db sync] 0
- error_check_good db_close [$db close] 0
-
- set ret [send_cmd $f1 "$remote_db close"]
- error_check_good remote_get $ret 0
-
- # Close the environment both remotely and locally.
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
- close $f1
-
- reset_env $dbenv
-}
diff --git a/bdb/test/mpoolscript.tcl b/bdb/test/mpoolscript.tcl
index 8695254c257..c13f70eb945 100644
--- a/bdb/test/mpoolscript.tcl
+++ b/bdb/test/mpoolscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mpoolscript.tcl,v 11.12 2000/05/05 15:23:47 sue Exp $
+# $Id: mpoolscript.tcl,v 11.16 2002/04/29 14:47:16 sandstro Exp $
#
# Random multiple process mpool tester.
# Usage: mpoolscript dir id numiters numfiles numpages sleepint
@@ -61,7 +61,7 @@ foreach i $pgsizes {
}
set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
-set env_cmd {berkdb env -lock -cachesize $cache -home $dir}
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
set e [eval $env_cmd $flags]
error_check_good env_open [is_valid_env $e] TRUE
@@ -78,7 +78,8 @@ foreach psize $pgsizes {
puts "Establishing long-term pin on file 0 page $id for process $id"
# Set up the long-pin page
-set lock [$e lock_get write $id 0:$id]
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
error_check_good lock_get [is_valid_lock $lock $e] TRUE
set mp [lindex $mpools 0]
@@ -109,7 +110,7 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
set mpf [lindex $mpools $fnum]
for { set p 0 } { $p < $numpages } { incr p } {
- set lock [$e lock_get write $id $fnum:$p]
+ set lock [$e lock_get write $locker $fnum:$p]
error_check_good lock_get:$fnum:$p \
[is_valid_lock $lock $e] TRUE
diff --git a/bdb/test/mutex.tcl b/bdb/test/mutex.tcl
deleted file mode 100644
index 5300fb0c4a3..00000000000
--- a/bdb/test/mutex.tcl
+++ /dev/null
@@ -1,225 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: mutex.tcl,v 11.18 2000/09/01 19:24:59 krinsky Exp $
-#
-# Exercise mutex functionality.
-# Options are:
-# -dir <directory in which to store mpool>
-# -iter <iterations>
-# -mdegree <number of mutexes per iteration>
-# -nmutex <number of mutexes>
-# -procs <number of processes to run>
-# -wait <wait interval after getting locks>
-proc mutex_usage {} {
- puts stderr "mutex\n\t-dir <dir>\n\t-iter <iterations>"
- puts stderr "\t-mdegree <locks per iteration>\n\t-nmutex <n>"
- puts stderr "\t-procs <nprocs>"
- puts stderr "\n\t-wait <max wait interval>"
- return
-}
-
-proc mutex { args } {
- source ./include.tcl
-
- set dir db
- set iter 500
- set mdegree 3
- set nmutex 20
- set procs 5
- set wait 2
-
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -i.* { incr i; set iter [lindex $args $i] }
- -m.* { incr i; set mdegree [lindex $args $i] }
- -n.* { incr i; set nmutex [lindex $args $i] }
- -p.* { incr i; set procs [lindex $args $i] }
- -w.* { incr i; set wait [lindex $args $i] }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- mutex_usage
- return
- }
- }
- }
-
- if { [file exists $testdir/$dir] != 1 } {
- file mkdir $testdir/$dir
- } elseif { [file isdirectory $testdir/$dir ] != 1 } {
- error "$testdir/$dir is not a directory"
- }
-
- # Basic sanity tests
- mutex001 $testdir $nmutex
-
- # Basic synchronization tests
- mutex002 $testdir $nmutex
-
- # Multiprocess tests
- mutex003 $testdir $iter $nmutex $procs $mdegree $wait
-}
-
-proc mutex001 { dir nlocks } {
- source ./include.tcl
-
- puts "Mutex001: Basic functionality"
- env_cleanup $dir
-
- # Test open w/out create; should fail
- error_check_bad \
- env_open [catch {berkdb env -lock -home $dir} env] 0
-
- # Now open for real
- set env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set m [$env mutex 0644 $nlocks]
- error_check_good mutex_init [is_valid_mutex $m $env] TRUE
-
- # Get, set each mutex; sleep, then get Release
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m get $i ]
- error_check_good mutex_get $r 0
-
- set r [$m setval $i $i]
- error_check_good mutex_setval $r 0
- }
- tclsleep 5
- for { set i 0 } { $i < $nlocks } { incr i } {
- set r [$m getval $i]
- error_check_good mutex_getval $r $i
-
- set r [$m release $i ]
- error_check_good mutex_get $r 0
- }
-
- error_check_good mutex_close [$m close] 0
- error_check_good env_close [$env close] 0
- puts "Mutex001: completed successfully."
-}
-
-# Test basic synchronization
-proc mutex002 { dir nlocks } {
- source ./include.tcl
-
- puts "Mutex002: Basic synchronization"
- env_cleanup $dir
-
- # Fork off child before we open any files.
- set f1 [open |$tclsh_path r+]
- puts $f1 "source $test_path/test.tcl"
- flush $f1
-
- # Open the environment and the mutex locally
- set local_env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $local_env] TRUE
-
- set local_mutex [$local_env mutex 0644 $nlocks]
- error_check_good \
- mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
-
- # Open the environment and the mutex remotely
- set remote_env [send_cmd $f1 "berkdb env -lock -home $dir"]
- error_check_good remote:env_open [is_valid_env $remote_env] TRUE
-
- set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
- error_check_good \
- mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
-
- # Do a get here, then set the value to be pid.
- # On the remote side fire off a get and getval.
- set r [$local_mutex get 1]
- error_check_good lock_get $r 0
-
- set r [$local_mutex setval 1 [pid]]
- error_check_good lock_get $r 0
-
- # Now have the remote side request the lock and check its
- # value. Then wait 5 seconds, release the mutex and see
- # what the remote side returned.
- send_timed_cmd $f1 1 "$remote_mutex get 1"
- send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
-
- # Now sleep before resetting and releasing lock
- tclsleep 5
- set newv [expr [pid] - 1]
- set r [$local_mutex setval 1 $newv]
- error_check_good mutex_setval $r 0
-
- set r [$local_mutex release 1]
- error_check_good mutex_release $r 0
-
- # Now get the result from the other script
- # Timestamp
- set result [rcv_result $f1]
- error_check_good lock_get:remote_time [expr $result > 4] 1
-
- # Timestamp
- set result [rcv_result $f1]
-
- # Mutex value
- set result [send_cmd $f1 "puts \$ret"]
- error_check_good lock_get:remote_getval $result $newv
-
- # Close down the remote
- set ret [send_cmd $f1 "$remote_mutex close" 5]
- # Not sure why we need this, but we do... an extra blank line
- # someone gets output somewhere
- gets $f1 ret
- error_check_good remote:mutex_close $ret 0
-
- set ret [send_cmd $f1 "$remote_env close"]
- error_check_good remote:env_close $ret 0
-
- catch { close $f1 } result
-
- set ret [$local_mutex close]
- error_check_good local:mutex_close $ret 0
-
- set ret [$local_env close]
- error_check_good local:env_close $ret 0
-
- puts "Mutex002: completed successfully."
-}
-
-# Generate a bunch of parallel
-# testers that try to randomly obtain locks.
-proc mutex003 { dir iter nmutex procs mdegree wait } {
- source ./include.tcl
-
- puts "Mutex003: Multi-process random mutex test ($procs processes)"
-
- env_cleanup $dir
-
- # Now open the region we'll use for multiprocess testing.
- set env [berkdb env -create -mode 0644 -lock -home $dir]
- error_check_good env_open [is_valid_env $env] TRUE
-
- set mutex [$env mutex 0644 $nmutex]
- error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
-
- error_check_good mutex_close [$mutex close] 0
-
- # Now spawn off processes
- set proclist {}
- for { set i 0 } {$i < $procs} {incr i} {
- puts "$tclsh_path\
- $test_path/mutexscript.tcl $dir\
- $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
- set p [exec $tclsh_path $test_path/wrap.tcl \
- mutexscript.tcl $testdir/$i.mutexout $dir\
- $iter $nmutex $wait $mdegree &]
- lappend proclist $p
- }
- puts "Mutex003: $procs independent processes now running"
- watch_procs
- error_check_good env_close [$env close] 0
- # Remove output files
- for { set i 0 } {$i < $procs} {incr i} {
- fileremove -f $dir/$i.mutexout
- }
-}
diff --git a/bdb/test/mutex001.tcl b/bdb/test/mutex001.tcl
new file mode 100644
index 00000000000..93f858993a5
--- /dev/null
+++ b/bdb/test/mutex001.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex001.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex001
+# TEST Test basic mutex functionality
+proc mutex001 { } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb_env -lock -home $testdir} env] 0
+
+ puts "\tMutex001.a: Create lock env"
+ # Now open for real
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "\tMutex001.b: Create $nlocks mutexes"
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ puts "\tMutex001.c: Get/set loop"
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/mutex002.tcl b/bdb/test/mutex002.tcl
new file mode 100644
index 00000000000..193e600fe8b
--- /dev/null
+++ b/bdb/test/mutex002.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex002.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex002
+# TEST Test basic mutex synchronization
+proc mutex002 { } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ puts "\tMutex002.a: Open local and remote env"
+ set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ puts "\tMutex002.b: Local and remote get/set"
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ puts "\tMutex002.c: Close remote"
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+}
diff --git a/bdb/test/mutex003.tcl b/bdb/test/mutex003.tcl
new file mode 100644
index 00000000000..da35ac0d115
--- /dev/null
+++ b/bdb/test/mutex003.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex003.tcl,v 11.24 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST mutex003
+# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
+proc mutex003 { } {
+ source ./include.tcl
+
+ set nmutex 20
+ set iter 500
+ set procs 5
+ set mdegree 3
+ set wait 2
+ puts "Mutex003: Multi-process random mutex test"
+
+ env_cleanup $testdir
+
+ puts "\tMutex003.a: Create environment"
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ puts "\tMutex003.b: Create $procs processes"
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $testdir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $testdir\
+ $iter $nmutex $wait $mdegree &]
+ lappend pidlist $p
+ }
+ puts "\tMutex003.c: $procs independent processes now running"
+ watch_procs $pidlist
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/$i.mutexout
+ }
+}
diff --git a/bdb/test/mutexscript.tcl b/bdb/test/mutexscript.tcl
index 9a49e471186..bc410f2716d 100644
--- a/bdb/test/mutexscript.tcl
+++ b/bdb/test/mutexscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: mutexscript.tcl,v 11.12 2000/11/21 22:14:56 dda Exp $
+# $Id: mutexscript.tcl,v 11.16 2002/04/29 14:58:16 sandstro Exp $
#
# Random mutex tester.
# Usage: mutexscript dir numiters mlocks sleepint degree
@@ -43,7 +43,7 @@ puts " $numiters $nmutex $sleepint $degree"
flush stdout
# Open the environment and the mutex
-set e [berkdb env -create -mode 0644 -lock -home $dir]
+set e [berkdb_env -create -mode 0644 -lock -home $dir]
error_check_good evn_open [is_valid_env $e] TRUE
set mutex [$e mutex 0644 $nmutex]
@@ -73,8 +73,8 @@ for { set iter 0 } { $iter < $numiters } { incr iter } {
}
}
- # Pick sleep interval
- tclsleep [ berkdb random_int 1 $sleepint ]
+ # Sleep for 10 to (100*$sleepint) ms.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
# Now release locks
foreach i $mlist {
diff --git a/bdb/test/ndbm.tcl b/bdb/test/ndbm.tcl
index a6286de0266..0bf8e0cc87c 100644
--- a/bdb/test/ndbm.tcl
+++ b/bdb/test/ndbm.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: ndbm.tcl,v 11.13 2000/08/25 14:21:51 sue Exp $
+# $Id: ndbm.tcl,v 11.16 2002/07/08 13:11:30 mjc Exp $
#
# Historic NDBM interface test.
# Use the first 1000 entries from the dictionary.
@@ -80,11 +80,14 @@ proc ndbm { { nentries 1000 } } {
error_check_good NDBM:diff($t3,$t2) \
[filecmp $t3 $t2] 0
- puts "\tNDBM.c: pagf/dirf test"
- set fd [$db pagfno]
- error_check_bad pagf $fd -1
- set fd [$db dirfno]
- error_check_bad dirf $fd -1
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
puts "\tNDBM.d: close, open, and dump file"
diff --git a/bdb/test/parallel.tcl b/bdb/test/parallel.tcl
new file mode 100644
index 00000000000..4e101c088cb
--- /dev/null
+++ b/bdb/test/parallel.tcl
@@ -0,0 +1,295 @@
+# Code to load up the tests in to the Queue database
+# $Id: parallel.tcl,v 11.28 2002/09/05 17:23:06 sandstro Exp $
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+# file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method \
+ "hash queue queueext recno rbtree frecno rrecno btree" {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/bdb/test/recd001.tcl b/bdb/test/recd001.tcl
index bbf5159011b..bc7ac6d896a 100644
--- a/bdb/test/recd001.tcl
+++ b/bdb/test/recd001.tcl
@@ -1,19 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd001.tcl,v 11.28 2000/12/07 19:13:46 sue Exp $
+# $Id: recd001.tcl,v 11.40 2002/05/08 19:36:18 sandstro Exp $
#
-# Recovery Test 1.
-# These are the most basic recovery tests. We do individual recovery
-# tests for each operation in the access method interface. First we
-# create a file and capture the state of the database (i.e., we copy
-# it. Then we run a transaction containing a single operation. In
-# one test, we abort the transaction and compare the outcome to the
-# original copy of the file. In the second test, we restore the
-# original copy of the database and then run recovery and compare
-# this against the actual database.
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
proc recd001 { method {select 0} args} {
global fixed_len
source ./include.tcl
@@ -43,7 +51,7 @@ proc recd001 { method {select 0} args} {
set flags "-create -txn -home $testdir"
puts "\tRecd001.a.0: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -124,6 +132,7 @@ proc recd001 { method {select 0} args} {
set newdata NEWrecd001_dataNEW
set off 3
set len 12
+
set partial_grow replacement_record_grow
set partial_shrink xxx
if { [is_fixed_length $method] == 1 } {
@@ -165,16 +174,69 @@ proc recd001 { method {select 0} args} {
# }
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
- op_recover prepare-abort $testdir $env_cmd $testfile2 $cmd $msg
- op_recover prepare-commit $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
}
set fixed_len $orig_fixed_len
- puts "\tRecd001.o: Verify db_printlog can read logfile"
- set tmpfile $testdir/printlog.out
- set stat [catch {exec $util_path/db_printlog -h $testdir \
- > $tmpfile} ret]
- error_check_good db_printlog $stat 0
- fileremove $tmpfile
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial {$len $len2} $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg
+ }
+ return
}
diff --git a/bdb/test/recd002.tcl b/bdb/test/recd002.tcl
index ffcec6527e8..ed579291283 100644
--- a/bdb/test/recd002.tcl
+++ b/bdb/test/recd002.tcl
@@ -1,11 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd002.tcl,v 11.22 2000/12/11 17:24:54 sue Exp $
+# $Id: recd002.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
-# Recovery Test #2. Verify that splits can be recovered.
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
proc recd002 { method {select 0} args} {
source ./include.tcl
global rand_init
@@ -37,7 +39,7 @@ proc recd002 { method {select 0} args} {
"-create -txn -lock_max 2000 -home $testdir"
puts "\tRecd002.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -80,9 +82,14 @@ proc recd002 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd003.tcl b/bdb/test/recd003.tcl
index af7097c8909..0fd054832ce 100644
--- a/bdb/test/recd003.tcl
+++ b/bdb/test/recd003.tcl
@@ -1,14 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd003.tcl,v 11.22 2000/12/07 19:13:46 sue Exp $
+# $Id: recd003.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
#
-# Recovery Test 3.
-# Test all the duplicate log messages and recovery operations. We make
-# sure that we exercise all possible recovery actions: redo, undo, undo
-# but no fix necessary and redo but no fix necessary.
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
proc recd003 { method {select 0} args } {
source ./include.tcl
global rand_init
@@ -31,7 +34,7 @@ proc recd003 { method {select 0} args } {
set eflags "-create -txn -home $testdir"
puts "\tRecd003.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -95,9 +98,14 @@ proc recd003 { method {select 0} args } {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd004.tcl b/bdb/test/recd004.tcl
index 012dd80f6e5..74504ac3cd7 100644
--- a/bdb/test/recd004.tcl
+++ b/bdb/test/recd004.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd004.tcl,v 11.21 2000/12/11 17:24:55 sue Exp $
+# $Id: recd004.tcl,v 11.29 2002/02/25 16:44:25 sandstro Exp $
#
-# Recovery Test #4.
-# Verify that we work correctly when big keys get elevated.
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
proc recd004 { method {select 0} args} {
source ./include.tcl
global rand_init
@@ -32,7 +32,7 @@ proc recd004 { method {select 0} args} {
set testfile2 recd004-2.db
set eflags "-create -txn -home $testdir"
puts "\tRecd004.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -74,9 +74,14 @@ proc recd004 { method {select 0} args} {
}
op_recover abort $testdir $env_cmd $testfile $cmd $msg
op_recover commit $testdir $env_cmd $testfile $cmd $msg
- op_recover prepare $testdir $env_cmd $testfile2 $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
op_recover prepare-abort $testdir $env_cmd $testfile2 \
$cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
op_recover prepare-commit $testdir $env_cmd $testfile2 \
$cmd $msg
}
diff --git a/bdb/test/recd005.tcl b/bdb/test/recd005.tcl
index 06a346f4484..7668c9e3be3 100644
--- a/bdb/test/recd005.tcl
+++ b/bdb/test/recd005.tcl
@@ -1,13 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd005.tcl,v 11.27 2000/12/15 21:41:38 ubell Exp $
+# $Id: recd005.tcl,v 11.34 2002/05/22 15:42:39 sue Exp $
#
-# Recovery Test 5.
-# Make sure that we can do catastrophic recovery even if we open
-# files using the same log file id.
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
proc recd005 { method args} {
source ./include.tcl
global rand_init
@@ -15,7 +17,7 @@ proc recd005 { method args} {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Recd005: $method catastropic recovery"
+ puts "Recd005: $method catastrophic recovery"
berkdb srand $rand_init
@@ -38,7 +40,7 @@ proc recd005 { method args} {
puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
puts "\tRecd005.$tnum.a: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -147,12 +149,11 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Save the initial file and open the environment and the first file
file copy -force $dir/$filename $dir/$filename.init
copy_extent_file $dir $filename init
- set oflags "-unknown -env $env"
+ set oflags "-auto_commit -unknown -env $env"
set db [eval {berkdb_open} $oflags $filename]
# Dump out file contents for initial case
- set tflags ""
- open_and_dump_file $filename $env $tflags $init_file nop \
+ open_and_dump_file $filename $env $init_file nop \
dump_file_direction "-first" "-next"
set txn [$env txn]
@@ -167,7 +168,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
error_check_good sync:$db [$db sync] 0
file copy -force $dir/$filename $dir/$filename.afterop
copy_extent_file $dir $filename afterop
- open_and_dump_file $testdir/$filename.afterop NULL $tflags \
+ open_and_dump_file $testdir/$filename.afterop NULL \
$afterop_file nop dump_file_direction "-first" "-next"
error_check_good txn_$op:$txn [$txn $op] 0
@@ -179,7 +180,7 @@ proc do_one_file { dir method env env_cmd filename num op } {
# Dump out file and save a copy.
error_check_good sync:$db [$db sync] 0
- open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
file copy -force $dir/$filename $dir/$filename.final
copy_extent_file $dir $filename final
@@ -211,8 +212,7 @@ proc check_file { dir env_cmd filename op } {
set afterop_file $dir/$filename.t2
set final_file $dir/$filename.t3
- set tflags ""
- open_and_dump_file $testdir/$filename NULL $tflags $final_file nop \
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "abort" } {
filesort $init_file $init_file.sort
@@ -227,5 +227,4 @@ proc check_file { dir env_cmd filename op } {
diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
}
-
}
diff --git a/bdb/test/recd006.tcl b/bdb/test/recd006.tcl
index 14f01cc0b8f..fc35e755b08 100644
--- a/bdb/test/recd006.tcl
+++ b/bdb/test/recd006.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd006.tcl,v 11.21 2000/12/07 19:13:46 sue Exp $
+# $Id: recd006.tcl,v 11.26 2002/03/15 16:30:53 sue Exp $
#
-# Recovery Test 6.
-# Test nested transactions.
+# TEST recd006
+# TEST Nested transactions.
proc recd006 { method {select 0} args} {
global kvals
source ./include.tcl
@@ -83,7 +83,7 @@ proc recd006 { method {select 0} args} {
set eflags "-create -txn -home $testdir"
puts "\tRecd006.b: creating environment"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_bad dbenv $dbenv NULL
@@ -176,7 +176,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# OK, do child 1
set kid1 [$env txn -parent $parent]
- error_check_good kid1 [is_valid_widget $kid1 $env.txn] TRUE
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
# Reading write-locked parent object should be OK
#puts "\tRead write-locked parent object for kid1."
@@ -193,7 +193,7 @@ proc nesttest { db parent env do p1 p2 child1 child2} {
# Now start child2
#puts "\tBegin txn for kid2."
set kid2 [$env txn -parent $parent]
- error_check_good kid2 [is_valid_widget $kid2 $env.txn] TRUE
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
# Getting anything in the p1 set should deadlock, so let's
# work on the p2 set.
diff --git a/bdb/test/recd007.tcl b/bdb/test/recd007.tcl
index d077ae19f2c..aeac3bea2c1 100644
--- a/bdb/test/recd007.tcl
+++ b/bdb/test/recd007.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd007.tcl,v 11.38 2000/12/20 21:39:23 krinsky Exp $
+# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
#
-# Recovery Test 7.
-# This is a recovery test for create/delete of databases. We have
-# hooks in the database so that we can abort the process at various
-# points and make sure that the transaction doesn't commit. We
-# then need to recover and make sure the file is correctly existing
-# or not, as the case may be.
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
proc recd007 { method args} {
global fixed_len
source ./include.tcl
@@ -28,10 +30,10 @@ proc recd007 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd007.a: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
- #
+
# We need to create a database to get the pagesize (either
# the default or whatever might have been specified).
# Then remove it so we can compute fixed_len and create the
@@ -54,7 +56,6 @@ proc recd007 { method args} {
# Convert the args again because fixed_len is now real.
set opts [convert_args $method ""]
- #
# List of recovery tests: {HOOKS MSG} pairs
# Where each HOOK is a list of {COPY ABORT}
#
@@ -89,25 +90,26 @@ proc recd007 { method args} {
}
set rlist {
- { {"none" "prerename"} "Recd007.l0: none/prerename"}
- { {"none" "postrename"} "Recd007.l1: none/postrename"}
- { {"prerename" "none"} "Recd007.m0: prerename/none"}
- { {"postrename" "none"} "Recd007.m1: postrename/none"}
- { {"prerename" "prerename"} "Recd007.n: prerename/prerename"}
- { {"prerename" "postrename"} "Recd007.o: prerename/postrename"}
- { {"postrename" "postrename"} "Recd007.p: postrename/postrename"}
- }
- foreach op { dbremove dbrename } {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+ foreach op { dbremove dbrename dbtruncate } {
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
file_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile $cmd $msg $op
+ $opts $testfile $cmd $msg $op
}
}
if { $is_windows_test != 1 } {
- do_file_recover_delmk $testdir $env_cmd $omethod $opts $testfile
+ set env_cmd "berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
}
puts "\tRecd007.r: Verify db_printlog can read logfile"
@@ -150,6 +152,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
env_cleanup $dir
+ set dflags "-dar"
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
set copy [lindex $cmd 0]
@@ -167,17 +170,16 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
return
}
- #
# Basically non-existence is our initial state. When we
# abort, it is also our final state.
#
switch $sub {
0 {
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile"
}
1 {
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
}
2 {
@@ -185,14 +187,14 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
- set oflags "-create $method -mode 0644 \
+ set oflags "-create $method -auto_commit -mode 0644 \
-env $env $opts $dbfile sub1"
}
default {
@@ -214,8 +216,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
# Sync the mpool so any changes to the file that are
# in mpool get written to the disk file before the
# diff.
- puts "\t\tSyncing"
- $env mpool_sync "0 0"
+ $env mpool_sync
#
# If we don't abort, then we expect success.
@@ -238,7 +239,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
} else {
error_check_good \
diff(init,postcreate):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
} else {
#
@@ -289,7 +290,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
#
# Need a new copy to get the right LSN into the file.
#
@@ -300,7 +301,6 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
}
}
- #
# If we didn't make a copy, then we are done.
#
if {[string first "none" $copy] != -1} {
@@ -310,11 +310,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
# Now move the .afterop file to $dbfile. Run recovery again.
#
- file copy -force $dir/$dbfile.afterop $dir/$dbfile
-
- if { [is_queue $method] == 1 } {
- move_file_extent $dir $dbfile afterop copy
- }
+ copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@@ -339,7 +335,7 @@ proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@@ -384,43 +380,61 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good abort_location [is_valid_delete_loc $abort] 1
if { [is_record_based $method] == 1 } {
- set key 1
+ set key1 1
+ set key2 2
} else {
- set key recd007_key
+ set key1 recd007_key1
+ set key2 recd007_key2
}
- set data1 recd007_data
- set data2 NEWrecd007_data2
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
#
# Depending on what sort of subdb we want, if any, our
# args to the open call will be different (and if we
# want a 2nd subdb, we create the first here.
#
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
switch $sub {
0 {
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile"
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
}
1 {
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub0"
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
}
2 {
#
# If we are aborting here, then we need to
# create a first subdb, then create a second
#
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub0"
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data2]
+ set ret [$db put -txn $txn $key1 $data1]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
- set oflags "-create $method -mode 0644 \
- -env $env $opts $dbfile sub1"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
}
default {
puts "\tBad value $sub for sub"
@@ -443,11 +457,15 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data1]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
+ $env mpool_sync
+
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
@@ -459,16 +477,51 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
# If we don't abort, then we expect success.
# If we abort, we expect no file removed.
#
- if { [string compare $op dbremove] == 0 } {
- set ret [catch { berkdb $op -env $env $dbfile } remret]
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
} else {
- set ret [catch { berkdb $op -env $env $dbfile $dbfile.new } \
- remret]
- }
- if {[string first "none" $abort] == -1} {
#
# Operation was aborted, verify it did not change.
#
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
puts "\t\tCommand executed and aborted."
error_check_good $op $ret 1
@@ -479,30 +532,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error_check_good post$op.1 [file exists $dir/$dbfile] 1
error_check_good \
diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
- } else {
- #
- # Operation was committed, verify it does
- # not exist.
- #
- puts "\t\tCommand executed and committed."
- error_check_good $op $ret 0
- #
- # Check that the file does not exist or correct
- # file exists.
- #
- error_check_good $op [file exists $dir/$dbfile] 0
- if { [string compare $op dbrename] == 0 } {
- error_check_good $op [file exists $dir/$dbfile.new] 1
- }
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
+ $env mpool_sync
error_check_good env_close [$env close] 0
catch { file copy -force $dir/$dbfile $init_file } res
-
if { [is_queue $method] == 1} {
copy_extent_file $dir $dbfile init
}
+
#
# Run recovery here. Should be a no-op. Verify that
# the file still doesn't exist or change (depending on abort)
@@ -517,20 +556,24 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
error "FAIL: Recovery error: $result."
return
}
+
puts "complete"
- if { [string first "none" $abort] != -1} {
+
+ if { $abort == "none" } {
#
- # Operation was committed, verify it still does
- # not exist.
+ # Operate was committed.
#
- error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
+ berkdb debug_check
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
#
@@ -541,15 +584,10 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
}
#
- # Now move the .afterop file to $dbfile. Run recovery again.
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
#
- set filecopy [glob $dir/*.afterop]
- set afterop [lindex $filecopy 0]
- file rename -force $afterop $dir/$dbfile
- set afterop [string range $afterop \
- [expr [string last "/" $afterop] + 1] \
- [string last "." $afterop]]
- move_file_extent $dir $dbfile afterop rename
+ copy_afterop $dir
berkdb debug_check
puts -nonewline "\t\tAbout to run recovery ... "
@@ -563,18 +601,16 @@ proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
puts "complete"
if { [string first "none" $abort] != -1} {
- #
- # Operation was committed, verify it still does
- # not exist.
- #
- error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
} else {
#
# Operation was aborted, verify it did not change.
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
}
}
@@ -597,11 +633,13 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
if { $log_log_record_types == 1} {
logtrack_read $dir
}
+ set omethod [convert_method $method]
puts "\tRecd007.q: Delete and recreate a database"
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
if { [is_record_based $method] == 1 } {
set key 1
@@ -611,13 +649,14 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
set data1 recd007_data
set data2 NEWrecd007_data2
- set oflags "-create $method -mode 0644 -env $env $opts $dbfile"
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
#
# Open our db, add some data, close and copy as our
# init file.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
set ret [$db put -txn $txn $key $data1]
@@ -625,7 +664,9 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
- set ret [catch { berkdb dbremove -env $env $dbfile } remret]
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
#
# Operation was committed, verify it does
# not exist.
@@ -637,10 +678,10 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
#
# Now create a new db with the same name.
#
- set db [eval {berkdb_open} $oflags]
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
error_check_good db_open [is_valid_db $db] TRUE
set txn [$env txn]
- set ret [$db put -txn $txn $key $data1]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
error_check_good db_put $ret 0
error_check_good commit [$txn commit] 0
error_check_good db_sync [$db sync] 0
@@ -663,9 +704,29 @@ proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
# up the Tcl widgets.
#
set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
}
+
proc is_valid_create_loc { loc } {
switch $loc {
none -
@@ -683,8 +744,8 @@ proc is_valid_create_loc { loc } {
proc is_valid_delete_loc { loc } {
switch $loc {
none -
- prerename -
- postrename -
+ predestroy -
+ postdestroy -
postremcall
{ return 1 }
default
@@ -697,23 +758,23 @@ proc is_valid_delete_loc { loc } {
# just a free/invalid page.
# Return 1 if they are different, 0 if logically the same (or identical).
#
-proc dbdump_diff { initfile dbfile } {
+proc dbdump_diff { flags initfile dir dbfile } {
source ./include.tcl
set initdump $initfile.dump
set dbdump $dbfile.dump
- set stat [catch {exec $util_path/db_dump -dar -f $initdump \
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
$initfile} ret]
error_check_good dbdump.init $stat 0
# Do a dump without the freelist which should eliminate any
# recovery differences.
- set stat [catch {exec $util_path/db_dump -dar -f $dbdump $dbfile} \
- ret]
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
error_check_good dbdump.db $stat 0
- set stat [filecmp $dbdump $initdump]
+ set stat [filecmp $dir/$dbdump $initdump]
if {$stat == 0} {
return 0
@@ -721,3 +782,105 @@ proc dbdump_diff { initfile dbfile } {
puts "diff: $dbdump $initdump gives:\n$ret"
return 1
}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags } {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist \
+ [file exists $dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/bdb/test/recd008.tcl b/bdb/test/recd008.tcl
index b75605b0475..548813a403b 100644
--- a/bdb/test/recd008.tcl
+++ b/bdb/test/recd008.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd008.tcl,v 1.22 2000/12/07 19:13:46 sue Exp $
+# $Id: recd008.tcl,v 1.26 2002/02/25 16:44:26 sandstro Exp $
#
-# Recovery Test 8.
-# Test deeply nested transactions and many-child transactions.
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
proc recd008 { method {breadth 4} {depth 4} args} {
global kvals
source ./include.tcl
@@ -59,7 +59,7 @@ proc recd008 { method {breadth 4} {depth 4} args} {
set eflags "-mode 0644 -create -txn_max $txn_max \
-txn -home $testdir"
- set env_cmd "berkdb env $eflags"
+ set env_cmd "berkdb_env $eflags"
set dbenv [eval $env_cmd]
error_check_good env_open [is_valid_env $dbenv] TRUE
diff --git a/bdb/test/recd009.tcl b/bdb/test/recd009.tcl
index 2b49437346c..5538d2d7652 100644
--- a/bdb/test/recd009.tcl
+++ b/bdb/test/recd009.tcl
@@ -1,13 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd009.tcl,v 1.13 2000/12/07 19:13:46 sue Exp $
+# $Id: recd009.tcl,v 1.18 2002/04/01 20:11:44 krinsky Exp $
#
-# Recovery Test 9.
-# Test stability of record numbers across splits
-# and reverse splits and across recovery.
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
proc recd009 { method {select 0} args} {
global fixed_len
source ./include.tcl
@@ -31,11 +30,11 @@ proc recd009 { method {select 0} args} {
puts "\tRecd009.a: Create $method environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
- set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/recd010.tcl b/bdb/test/recd010.tcl
index 4fd1aefbb60..2549e03a2c0 100644
--- a/bdb/test/recd010.tcl
+++ b/bdb/test/recd010.tcl
@@ -1,20 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd010.tcl,v 1.14 2000/12/11 17:24:55 sue Exp $
+# $Id: recd010.tcl,v 1.19 2002/03/15 19:05:07 sue Exp $
#
-# Recovery Test 10.
-# Test stability of btree duplicates across btree off-page dup splits
-# and reverse splits and across recovery.
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
proc recd010 { method {select 0} args} {
- global fixed_len
- global kvals
- global kvals_dups
- source ./include.tcl
-
- if { [is_dbtree $method] != 1 && [is_ddbtree $method] != 1} {
+ if { [is_btree $method] != 1 } {
puts "Recd010 skipping for method $method."
return
}
@@ -24,11 +19,24 @@ proc recd010 { method {select 0} args} {
puts "Recd010: skipping for specific pagesizes"
return
}
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
- set opts [convert_args $method $args]
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
set method [convert_method $method]
- puts "\tRecd010 ($opts): Test duplicates across splits and recovery"
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
set testfile recd010.db
env_cleanup $testdir
@@ -41,10 +49,10 @@ proc recd010 { method {select 0} args} {
set data "data"
set key "recd010_key"
- puts "\tRecd010.a: Create $method environment and database."
+ puts "\tRecd010.a: Create environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -69,17 +77,17 @@ proc recd010 { method {select 0} args} {
return
}
set rlist {
- { {recd010_split DB TXNID 1 $method 2 $mkeys}
+ { {recd010_split DB TXNID 1 2 $mkeys}
"Recd010.c: btree split 2 large dups"}
- { {recd010_split DB TXNID 0 $method 2 $mkeys}
+ { {recd010_split DB TXNID 0 2 $mkeys}
"Recd010.d: btree reverse split 2 large dups"}
- { {recd010_split DB TXNID 1 $method 10 $mkeys}
+ { {recd010_split DB TXNID 1 10 $mkeys}
"Recd010.e: btree split 10 dups"}
- { {recd010_split DB TXNID 0 $method 10 $mkeys}
+ { {recd010_split DB TXNID 0 10 $mkeys}
"Recd010.f: btree reverse split 10 dups"}
- { {recd010_split DB TXNID 1 $method 100 $mkeys}
+ { {recd010_split DB TXNID 1 100 $mkeys}
"Recd010.g: btree split 100 dups"}
- { {recd010_split DB TXNID 0 $method 100 $mkeys}
+ { {recd010_split DB TXNID 0 100 $mkeys}
"Recd010.h: btree reverse split 100 dups"}
}
@@ -100,7 +108,7 @@ proc recd010 { method {select 0} args} {
op_recover commit $testdir $env_cmd $testfile $cmd $msg
recd010_check $testdir $testfile $opts commit $reverse $firstkeys
}
- puts "\tRecd010.e: Verify db_printlog can read logfile"
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@@ -178,7 +186,14 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
set d [$dbc get -nextdup]} {
set thisdata [lindex [lindex $d 0] 1]
- error_check_good dup_check $thisdata $data$datacnt
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
incr datacnt
}
error_check_good dup_count $datacnt $numdups
@@ -202,7 +217,7 @@ proc recd010_check { tdir testfile opts op reverse origdups } {
error_check_good db_close [$db close] 0
}
-proc recd010_split { db txn split method nkeys mkeys } {
+proc recd010_split { db txn split nkeys mkeys } {
global errorCode
global kvals
global kvals_dups
@@ -220,7 +235,14 @@ proc recd010_split { db txn split method nkeys mkeys } {
"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
for {set k 0} { $k < $nkeys } { incr k } {
for {set i 0} { $i < $numdups } { incr i } {
- set ret [$db put -txn $txn $key$k $data$i]
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
error_check_good dbput:more $ret 0
}
}
diff --git a/bdb/test/recd011.tcl b/bdb/test/recd011.tcl
index a6fc269741b..74108a30650 100644
--- a/bdb/test/recd011.tcl
+++ b/bdb/test/recd011.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd011.tcl,v 11.13 2000/12/06 17:09:54 sue Exp $
+# $Id: recd011.tcl,v 11.19 2002/02/25 16:44:26 sandstro Exp $
#
-# Recovery Test 11.
-# Test recovery to a specific timestamp.
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
source ./include.tcl
@@ -29,11 +29,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
puts "\tRecd0$tnum.a: Create environment and database."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
- set oflags "-env $dbenv -create -mode 0644 $args $omethod"
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -70,11 +70,11 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Now, loop through and recover to each timestamp, verifying the
# expected increment.
puts "\tRecd0$tnum.c: Recover to each timestamp and check."
- for { set i 0 } { $i <= $niter } { incr i } {
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
# Run db_recover.
- berkdb debug_check
set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ berkdb debug_check
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
error_check_good db_recover($i,$t) $ret 0
@@ -91,7 +91,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
# Finally, recover to a time well before the first timestamp
# and well after the last timestamp. The latter should
- # be just like the last timestamp; the former should fail.
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
puts "\tRecd0$tnum.d: Recover to before the first timestamp."
set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
@@ -108,8 +109,8 @@ proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
error_check_good db_open(after) [is_valid_db $db] TRUE
set dbt [$db get $key]
- set datum [lindex [lindex $dbt 0] 1]
+ set datum2 [lindex [lindex $dbt 0] 1]
- error_check_good timestamp_recover $datum [pad_data $method $niter]
+ error_check_good timestamp_recover $datum2 $datum
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/recd012.tcl b/bdb/test/recd012.tcl
index 19dd7b011d1..8231e648588 100644
--- a/bdb/test/recd012.tcl
+++ b/bdb/test/recd012.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd012.tcl,v 11.14 2000/12/11 17:24:55 sue Exp $
+# $Id: recd012.tcl,v 11.27 2002/05/10 00:48:07 margo Exp $
#
-# Recovery Test 12.
-# Test recovery handling of file opens and closes.
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
proc recd012 { method {start 0} \
{niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
source ./include.tcl
@@ -24,9 +25,8 @@ proc recd012 { method {start 0} \
puts "Recd012: skipping for specific pagesizes"
return
}
-
+
for { set i $start } { $i <= $niter } { incr i } {
-
env_cleanup $testdir
# For repeatability, we pass in the iteration number
@@ -35,13 +35,13 @@ proc recd012 { method {start 0} \
# This lets us re-run a potentially failing iteration
# without having to start from the beginning and work
# our way to it.
- #
+ #
# The number of databases ranges from 4 to 8 and is
# a function of $niter
-# set ndbs [expr ($i % 5) + 4]
-
+ # set ndbs [expr ($i % 5) + 4]
+
recd012_body \
- $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
}
}
@@ -55,8 +55,15 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
puts "\tRecd0$tnum $method ($largs): Iteration $iter"
puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
error_check_good env_remove [berkdb envremove -home $testdir] 0
set dbenv [eval $env_cmd]
error_check_good dbenv [is_valid_env $dbenv] TRUE
@@ -67,9 +74,12 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Initialize database that keeps track of number of open files (so
# we don't run out of descriptors).
set ofname of.db
- set ofdb [berkdb_open -env $dbenv\
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
-create -dup -mode 0644 -btree -pagesize 512 $ofname]
error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
@@ -80,9 +90,10 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Create ndbs databases to work in, and a file listing db names to
# pick from.
- set f [open TESTDIR/dblist w]
- set oflags \
- "-env $dbenv -create -mode 0644 -pagesize $psz $largs $omethod"
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
for { set i 0 } { $i < $ndbs } { incr i } {
# 50-50 chance of being a subdb, unless we're a queue.
if { [berkdb random_int 0 1] || [is_queue $method] } {
@@ -96,18 +107,17 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
set db [eval berkdb_open $oflags $dbname]
error_check_good db($i) [is_valid_db $db] TRUE
error_check_good db($i)_close [$db close] 0
- }
+ }
close $f
-
error_check_good env_close [$dbenv close] 0
-
+
# Now we get to the meat of things. Our goal is to do some number
# of opens, closes, updates, and shutdowns (simulated here by a
# close of all open handles and a close/reopen of the environment,
# with or without an envremove), matching the regular expression
#
# ((O[OUC]+S)+R+V)
- #
+ #
# We'll repeat the inner + a random number up to $niniter times,
# and the outer + a random number up to $noutiter times.
#
@@ -116,23 +126,22 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# all handles properly. The environment will be left lying around
# before we run recovery 50% of the time.
set out [berkdb random_int 1 $noutiter]
- puts "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter\
- ops."
+ puts \
+ "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
for { set i 0 } { $i < $out } { incr i } {
set child [open "|$tclsh_path" w]
-
- # For performance, don't source everything,
+
+ # For performance, don't source everything,
# just what we'll need.
puts $child "load $tcllib"
puts $child "set fixed_len $fixed_len"
- puts $child "source ../test/testutils.tcl"
- puts $child "source ../test/recd0$tnum.tcl"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd0$tnum.tcl"
set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
# Go.
- # puts "recd012_dochild {$env_cmd} $rnd $i $niniter\
- # $ndbs $tnum $method $ofname $largs"
+ berkdb debug_check
puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
$ndbs $tnum $method $ofname $largs"
close $child
@@ -140,35 +149,35 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
# Run recovery 0-3 times.
set nrecs [berkdb random_int 0 3]
for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
set ret [catch {exec $util_path/db_recover \
-h $testdir} res]
- if { $ret != 0 } {
+ if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
file mkdir /tmp/12out
set fd [open /tmp/12out/[pid] w]
- puts $fd $res
+ puts $fd $res
close $fd
}
error_check_good recover($j) $ret 0
}
-
}
- # Run recovery one final time; it doesn't make sense to
+ # Run recovery one final time; it doesn't make sense to
# check integrity if we do not.
set ret [catch {exec $util_path/db_recover -h $testdir} res]
- if { $ret != 0 } {
+ if { $ret != 0 } {
puts "FAIL: db_recover returned with nonzero\
exit status, output as follows:"
- puts $res
+ puts $res
}
# Make sure each datum is the correct filename.
puts "\t\tRecd0$tnum.c: Checking data integrity."
- set dbenv [berkdb env -create -private -home $testdir]
+ set dbenv [berkdb_env -create -private -home $testdir]
error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
- set f [open TESTDIR/dblist r]
+ set f [open $testdir/dblist r]
set i 0
while { [gets $f dbinfo] > 0 } {
set db [eval berkdb_open -env $dbenv $dbinfo]
@@ -188,21 +197,21 @@ proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} }
close $f
error_check_good env_close_integrity [$dbenv close] 0
-
# Verify
- error_check_good verify [verify_dir $testdir "\t\tRecd0$tnum.d: "] 0
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
}
-
proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
ofname args } {
global recd012_ofkey
+ source ./include.tcl
if { [is_record_based $method] } {
set keybase ""
} else {
set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
}
-
+
# Initialize our random number generator, repeatably based on an arg.
berkdb srand $rnd
@@ -212,7 +221,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
# Find out how many databases appear to be open in the log--we
# don't want recovery to run out of filehandles.
- set ofdb [berkdb_open -env $dbenv $ofname]
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
set oftxn [$dbenv txn]
error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
@@ -222,14 +235,14 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
error_check_good of_commit [$oftxn commit] 0
# Read our dbnames
- set f [open TESTDIR/dblist r]
+ set f [open $testdir/dblist r]
set i 0
while { [gets $f dbname($i)] > 0 } {
incr i
}
close $f
- # We now have $ndbs extant databases.
+ # We now have $ndbs extant databases.
# Open one of them, just to get us started.
set opendbs {}
set oflags "-env $dbenv $args"
@@ -254,14 +267,13 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
set num_open [llength $opendbs]
if { $num_open == 0 } {
# If none are open, do an open first.
-
recd012_open
}
set n [berkdb random_int 0 [expr $num_open - 1]]
set pair [lindex $opendbs $n]
set udb [lindex $pair 0]
set uname [lindex $pair 1]
-
+
set key [berkdb random_int 1000 1999]$keybase
set data [chop_data $method $uname]
error_check_good put($uname,$udb,$key,$data) \
@@ -273,12 +285,11 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$curtxn commit] 0
set curtxn [$dbenv txn]
error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
+ [is_valid_txn $curtxn $dbenv] TRUE
}
}
2 {
# Close.
-
if { [llength $opendbs] == 0 } {
# If none are open, open instead of closing.
recd012_open
@@ -286,28 +297,26 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
}
# Commit curtxn first, lest we self-deadlock.
- error_check_good txn_recommit \
- [$curtxn commit] 0
+ error_check_good txn_recommit [$curtxn commit] 0
# Do it.
set which [berkdb random_int 0 \
[expr [llength $opendbs] - 1]]
-
+
set db [lindex [lindex $opendbs $which] 0]
error_check_good db_choice [is_valid_db $db] TRUE
global errorCode errorInfo
error_check_good db_close \
[[lindex [lindex $opendbs $which] 0] close] 0
+
set opendbs [lreplace $opendbs $which $which]
incr nopenfiles -1
-
-
+
# Reopen txn.
set curtxn [$dbenv txn]
error_check_good txn_reopen \
[is_valid_txn $curtxn $dbenv] TRUE
-
}
}
@@ -335,12 +344,12 @@ proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
[$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
error_check_good of_commit [$oftxn commit] 0
error_check_good ofdb_close [$ofdb close] 0
-}
+}
proc recd012_open { } {
- # This is basically an inline and has to modify curtxn,
+ # This is basically an inline and has to modify curtxn,
# so use upvars.
- upvar curtxn curtxn
+ upvar curtxn curtxn
upvar ndbs ndbs
upvar dbname dbname
upvar dbenv dbenv
@@ -361,21 +370,21 @@ proc recd012_open { } {
# Do it.
set which [berkdb random_int 0 [expr $ndbs - 1]]
- set db [eval berkdb_open \
- $oflags $dbname($which)]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
lappend opendbs [list $db $dbname($which)]
# Reopen txn.
set curtxn [$dbenv txn]
- error_check_good txn_reopen \
- [is_valid_txn $curtxn $dbenv] TRUE
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
incr nopenfiles
}
# Update the database containing the number of files that db_recover has
# to contend with--we want to avoid letting it run out of file descriptors.
-# We do this by keeping track of the number of unclosed opens since the
+# We do this by keeping track of the number of unclosed opens since the
# checkpoint before last.
# $recd012_ofkey stores this current value; the two dups available
# at $recd012_ofckptkey store the number of opens since the last checkpoint
@@ -399,7 +408,7 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
error_check_good del [$dbc del] 0
set nopenfiles [expr $nopenfiles - $discard]
-
+
# Get the next ckpt value
set dbt [$dbc get -nextdup]
error_check_good set2 [llength $dbt] 1
@@ -410,10 +419,10 @@ proc recd012_nopenfiles_ckpt { env db nopenfiles } {
# Put this new number at the end of the dup set.
error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
-
+
# We should never deadlock since we're the only one in this db.
error_check_good dbc_close [$dbc close] 0
- error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_commit [$txn commit] 0
return $nopenfiles
}
diff --git a/bdb/test/recd013.tcl b/bdb/test/recd013.tcl
index d134d487f1e..e08654f34e0 100644
--- a/bdb/test/recd013.tcl
+++ b/bdb/test/recd013.tcl
@@ -1,22 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd013.tcl,v 11.10 2000/12/11 17:24:55 sue Exp $
+# $Id: recd013.tcl,v 11.18 2002/02/25 16:44:27 sandstro Exp $
#
-# Recovery Test 13.
-# Smoke test of aborted cursor adjustments.
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
#
# XXX
# Other tests that cover more specific variants of the same issue
# are in the access method tests for now. This is probably wrong; we
# put this one here because they're closely based on and intertwined
# with other, non-transactional cursor stability tests that are among
-# the access method tests, and because we need at least one test to
+# the access method tests, and because we need at least one test to
# fit under recd and keep logtrack from complaining. We'll sort out the mess
# later; the important thing, for now, is that everything that needs to gets
-# tested. (This really shouldn't be under recd at all, since it doesn't
+# tested. (This really shouldn't be under recd at all, since it doesn't
# run recovery!)
proc recd013 { method { nitems 100 } args } {
source ./include.tcl
@@ -48,11 +48,12 @@ proc recd013 { method { nitems 100 } args } {
Create environment, database, and parent transaction."
set flags "-create -txn -home $testdir"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
set env [eval $env_cmd]
error_check_good dbenv [is_valid_env $env] TRUE
- set oflags "-env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -63,19 +64,44 @@ proc recd013 { method { nitems 100 } args } {
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
set key $keybase$i
set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
}
- error_check_good init_txn_commit [$txn commit] 0
- # Create an initial txn; set a cursor of that txn to each item.
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
- set dbc($i) [$db cursor -txn $txn]
- error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
-
+
puts "\tRecd0$tnum.b: Put test."
puts "\t\tRecd0$tnum.b.1: Put items."
set ctxn [$env txn -parent $txn]
@@ -99,7 +125,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good curs_close [$curs close] 0
}
}
-
+
puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
@@ -122,7 +148,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good db_verify \
[verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
- # Now put back all the even records, this time in the parent.
+ # Now put back all the even records, this time in the parent.
# Commit and re-begin the transaction so we can abort and
# get back to a nice full database.
for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
@@ -135,9 +161,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good txn [is_valid_txn $txn $env] TRUE
# Delete test. Set a cursor to each record. Delete the even ones
- # in the parent and check cursor stability. Then open a child
+ # in the parent and check cursor stability. Then open a child
# transaction, and delete the odd ones. Verify that the database
- # is empty
+ # is empty.
puts "\tRecd0$tnum.c: Delete test."
unset dbc
@@ -149,8 +175,9 @@ proc recd013 { method { nitems 100 } args } {
error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
[list [list $keybase$i [pad_data $method $i$alphabet]]]
}
-
- puts "\t\tRecd0$tnum.c.1: Delete even items in parent txn."
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
+
if { [is_rrecno $method] != 1 } {
set init 2
set bound [expr 2 * $nitems]
@@ -162,9 +189,25 @@ proc recd013 { method { nitems 100 } args } {
set bound [expr $nitems + 1]
set step 1
}
+
+ set ctxn [$env txn -parent $txn]
for { set i $init } { $i <= $bound } { incr i $step } {
- error_check_good del($i) [$db del -txn $txn $keybase$i] 0
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
}
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
# Verify that even items are deleted and odd items are not.
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
@@ -181,10 +224,10 @@ proc recd013 { method { nitems 100 } args } {
[list [list "" ""]]
}
- puts "\t\tRecd0$tnum.c.2: Delete odd items in child txn."
+ puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
set ctxn [$env txn -parent $txn]
-
+
for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
if { [is_rrecno $method] != 1 } {
set j $i
@@ -196,14 +239,14 @@ proc recd013 { method { nitems 100 } args } {
}
error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
}
-
+
# Verify that everyone's deleted.
for { set i 1 } { $i <= 2 * $nitems } { incr i } {
error_check_good get_deleted($i) \
[llength [$db get -txn $ctxn $keybase$i]] 0
}
- puts "\t\tRecd0$tnum.c.3: Verify cursor stability after abort."
+ puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
error_check_good ctxn_abort [$ctxn abort] 0
# Verify that even items are deleted and odd items are not.
@@ -229,7 +272,7 @@ proc recd013 { method { nitems 100 } args } {
# Sync and verify.
error_check_good db_sync [$db sync] 0
error_check_good db_verify \
- [verify_dir $testdir "\t\tRecd0$tnum.c.4: "] 0
+ [verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
puts "\tRecd0$tnum.d: Clean up."
error_check_good txn_commit [$txn commit] 0
@@ -238,7 +281,7 @@ proc recd013 { method { nitems 100 } args } {
error_check_good verify_dir \
[verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
- if { $log_log_record_types == 1 } {
+ if { $log_log_record_types == 1 } {
logtrack_read $testdir
}
}
diff --git a/bdb/test/recd014.tcl b/bdb/test/recd014.tcl
index 83b3920de9b..6796341dca2 100644
--- a/bdb/test/recd014.tcl
+++ b/bdb/test/recd014.tcl
@@ -1,16 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: recd014.tcl,v 1.9 2001/01/11 17:16:04 sue Exp $
+# $Id: recd014.tcl,v 1.19 2002/08/15 19:21:24 sandstro Exp $
#
-# Recovery Test 14.
-# This is a recovery test for create/delete of queue extents. We have
-# hooks in the database so that we can abort the process at various
-# points and make sure that the extent file does or does not exist. We
-# then need to recover and make sure the file is correctly existing
-# or not, as the case may be.
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
proc recd014 { method args} {
global fixed_len
source ./include.tcl
@@ -51,7 +49,7 @@ proc recd014 { method args} {
set flags "-create -txn -home $testdir"
puts "\tRecd014.a: creating environment"
- set env_cmd "berkdb env $flags"
+ set env_cmd "berkdb_env $flags"
puts "\tRecd014.b: Create test commit"
ext_recover_create $testdir $env_cmd $omethod \
@@ -61,21 +59,14 @@ proc recd014 { method args} {
$opts $testfile abort
puts "\tRecd014.c: Consume test commit"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile consume commit
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
puts "\tRecd014.c: Consume test abort"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile consume abort
-
- puts "\tRecd014.d: Delete test commit"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile delete commit
- puts "\tRecd014.d: Delete test abort"
- ext_recover_delete $testdir $env_cmd $omethod \
- $opts $testfile delete abort
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
set fixed_len $orig_fixed_len
- puts "\tRecd014.e: Verify db_printlog can read logfile"
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
set tmpfile $testdir/printlog.out
set stat [catch {exec $util_path/db_printlog -h $testdir \
> $tmpfile} ret]
@@ -105,7 +96,11 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
- set ret [catch {eval {berkdb_open} $oflags} db]
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
#
# The command to execute to create an extent is a put.
@@ -123,7 +118,7 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
puts "\t\tSyncing"
error_check_good db_sync [$db sync] 0
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
@@ -149,7 +144,10 @@ proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
}
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
error_check_good env_close [$env close] 0
#
@@ -241,7 +239,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation aborted. The file is there, but make
@@ -255,8 +253,7 @@ proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
}
}
-
-proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
global log_log_record_types
global alphabet
source ./include.tcl
@@ -269,55 +266,52 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
env_cleanup $dir
# Open the environment and set the copy/abort locations
set env [eval $env_cmd]
-
- set oflags "-create $method -mode 0644 -pagesize 512 \
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
-env $env $opts $dbfile"
-
+
#
# Open our db, add some data, close and copy as our
# init file.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
-
+
set extnum 0
set data [chop_data $method [replicate $alphabet 512]]
set txn [$env txn]
error_check_good txn_begin [is_valid_txn $txn $env] TRUE
- set putrecno [$db put -append $data]
+ set putrecno [$db put -txn $txn -append $data]
error_check_good db_put $putrecno 1
error_check_good commit [$txn commit] 0
error_check_good db_close [$db close] 0
-
+
puts "\t\tExecuting command"
-
+
set init_file $dir/$dbfile.init
catch { file copy -force $dir/$dbfile $init_file } res
copy_extent_file $dir $dbfile init
-
+
#
# If we don't abort, then we expect success.
# If we abort, we expect no file removed until recovery is run.
#
set db [eval {berkdb_open} $oflags]
error_check_good db_open [is_valid_db $db] TRUE
-
+
set t [$env txn]
error_check_good txn_begin [is_valid_txn $t $env] TRUE
- if { [string compare $op "delete"] == 0 } {
- set dbcmd "$db del -txn $t $putrecno"
- } else {
- set dbcmd "$db get -txn $t -consume"
- }
+ set dbcmd "$db get -txn $t -consume"
set ret [eval $dbcmd]
error_check_good db_sync [$db sync] 0
- catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
set dbq [make_ext_filename $dir $dbfile $extnum]
if {$txncmd == "abort"} {
#
@@ -330,20 +324,10 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
# Since we aborted the txn, we should be able
# to get to our original entry.
#
- error_check_good post$op.1 [file exists $dbq] 1
-
- set xdb [eval {berkdb_open} $oflags]
- error_check_good db_open [is_valid_db $xdb] TRUE
- set kd [$xdb get $putrecno]
- set key [lindex [lindex $kd 0] 0]
- error_check_good dbget_key $key $putrecno
- set retdata [lindex [lindex $kd 0] 1]
- error_check_good dbget_data $data $retdata
- error_check_good db_close [$xdb close] 0
-
+ error_check_good postconsume.1 [file exists $dbq] 1
error_check_good \
- diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
- [dbdump_diff $init_file $dir/$dbfile] 0
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@@ -353,14 +337,8 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Check file existence. Consume operations remove
# the extent when we move off, which we should have
- # done. Delete operations won't remove the extent
- # until we run recovery.
- #
- if { [string compare $op "delete"] == 0 } {
- error_check_good ${op}_exists [file exists $dbq] 1
- } else {
- error_check_good ${op}_exists [file exists $dbq] 0
- }
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
}
error_check_good db_close [$db close] 0
error_check_good env_close [$env close] 0
@@ -384,7 +362,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
@@ -396,7 +374,7 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
# Run recovery here. Re-do the operation.
- # Verify that the file doesn't exist
+ # Verify that the file doesn't exist
# (if we committed) or change (if we aborted)
# when we are done.
#
@@ -418,14 +396,14 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it does
# not exist. Both operations should result
# in no file existing now that we've run recovery.
#
- error_check_good after_recover1 [file exists $dbq] 0
+ error_check_good after_recover2 [file exists $dbq] 0
}
#
@@ -456,12 +434,12 @@ proc ext_recover_delete { dir env_cmd method opts dbfile op txncmd} {
#
error_check_good \
diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
- [dbdump_diff $init_file $dir/$dbfile] 0
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
} else {
#
# Operation was committed, verify it still does
# not exist.
#
- error_check_good after_recover2 [file exists $dbq] 0
+ error_check_good after_recover3 [file exists $dbq] 0
}
}
diff --git a/bdb/test/recd015.tcl b/bdb/test/recd015.tcl
new file mode 100644
index 00000000000..8c3ad612419
--- /dev/null
+++ b/bdb/test/recd015.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd015.tcl,v 1.13 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ puts "$msg Simple test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 250
+ set testfile recd015.db
+ set txnmax [expr $numtxns + 5]
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ env_cleanup $testdir
+ set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set msg "\tRecd015.b"
+ puts "$msg Large test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/bdb/test/recd016.tcl b/bdb/test/recd016.tcl
new file mode 100644
index 00000000000..504aca09617
--- /dev/null
+++ b/bdb/test/recd016.tcl
@@ -0,0 +1,183 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd016.tcl,v 11.8 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST recd016
+# TEST This is a recovery test for testing running recovery while
+# TEST recovery is already running. While bad things may or may not
+# TEST happen, if recovery is then run properly, things should be correct.
+proc recd016 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd016: $method ($args) simultaneous recovery test"
+ puts "Recd016: Skipping; waiting on SR #6277"
+ return
+
+ # Create the database and environment.
+ set testfile recd016.db
+
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ cleanup $testdir NULL
+
+ #
+ # Use a smaller log to make more files and slow down recovery.
+ #
+ set gflags ""
+ set pflags ""
+ set log_max [expr 256 * 1024]
+ set nentries 10000
+ set nrec 6
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ # Since we are using txns, we need at least 1 lock per
+ # record (for queue). So set lock_max accordingly.
+ set lkmax [expr $nentries * 2]
+
+ puts "\tRecd016.a: Create environment and database"
+ set env_cmd "berkdb_env -create -log_max $log_max \
+ -lock_max $lkmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} \
+ $omethod -auto_commit -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set abid [open $t4 w]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc recd016_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc recd016.check
+ }
+ puts "\tRecd016.b: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # Start a transaction. Alternately abort and commit them.
+ # This will create a bigger log for recovery to collide.
+ #
+ set txn [$env txn]
+ set ret [eval \
+ {$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ if {[expr $count % 2] == 0} {
+ set ret [$txn commit]
+ error_check_good txn_commit $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good commit_get \
+ $ret [list [list $key [pad_data $method $str]]]
+ } else {
+ set ret [$txn abort]
+ error_check_good txn_abort $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good abort_get [llength $ret] 0
+ puts $abid $key
+ }
+ incr count
+ }
+ close $did
+ close $abid
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set pidlist {}
+ puts "\tRecd016.c: Start up $nrec recovery processes at once"
+ for {set i 0} {$i < $nrec} {incr i} {
+ set p [exec $util_path/db_recover -h $testdir -c &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+ #
+ # Now that they are all done run recovery correctly
+ puts "\tRecd016.d: Run recovery process"
+ set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tRecd016.e: Open, dump and check database"
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j $i
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t4 $t3
+ file rename -force $t3 $t4
+ fileextract $t2 $t4 $t3
+ file rename -force $t3 $t5
+
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good envclose [$env close] 0
+
+ error_check_good Recd016:diff($t5,$t3) \
+ [filecmp $t5 $t3] 0
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+# Check function for recd016; keys and data are identical
+proc recd016.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc recd016_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/bdb/test/recd017.tcl b/bdb/test/recd017.tcl
new file mode 100644
index 00000000000..9f8208c1b3e
--- /dev/null
+++ b/bdb/test/recd017.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd017.tcl,v 11.4 2002/09/03 16:44:37 sue Exp $
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/bdb/test/recd018.tcl b/bdb/test/recd018.tcl
new file mode 100644
index 00000000000..fb5a589d851
--- /dev/null
+++ b/bdb/test/recd018.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd018.tcl,v 11.2 2002/03/13 21:04:20 sue Exp $
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 18
+
+ puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/bdb/test/recd019.tcl b/bdb/test/recd019.tcl
new file mode 100644
index 00000000000..dd67b7dcb2a
--- /dev/null
+++ b/bdb/test/recd019.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd019.tcl,v 11.3 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+}
diff --git a/bdb/test/recd020.tcl b/bdb/test/recd020.tcl
new file mode 100644
index 00000000000..93a89f32578
--- /dev/null
+++ b/bdb/test/recd020.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd020.tcl,v 11.8 2002/08/08 15:38:08 bostic Exp $
+#
+# TEST recd020
+# TEST Test recovery after checksum error.
+proc recd020 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd020: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Recd020: skipping for method $method"
+ return
+ }
+
+ puts "Recd020: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd020.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd020.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd020.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd020.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: catastrophic recovery required"] 1
+ set got_err 1
+ # Now verify that it was an error on the page we set.
+ error_check_good dbget:pg$pg [is_substr $ret \
+ "failed for page $pg"] 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "run recovery"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd020.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error: catastrophic recovery required"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd020.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
diff --git a/bdb/test/recd15scr.tcl b/bdb/test/recd15scr.tcl
new file mode 100644
index 00000000000..e1238907a71
--- /dev/null
+++ b/bdb/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd15scr.tcl,v 1.5 2002/01/30 13:18:04 margo Exp $
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/bdb/test/recdscript.tcl b/bdb/test/recdscript.tcl
new file mode 100644
index 00000000000..a2afde46e4d
--- /dev/null
+++ b/bdb/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recdscript.tcl,v 11.4 2002/01/11 15:53:32 bostic Exp $
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+
+op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
+flush stdout
diff --git a/bdb/test/rep001.tcl b/bdb/test/rep001.tcl
new file mode 100644
index 00000000000..97a640029f5
--- /dev/null
+++ b/bdb/test/rep001.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep001.tcl,v 1.16 2002/08/26 17:52:19 margo Exp $
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can correctly
+# TEST run test001 and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "01" } args } {
+ global passwd
+
+ puts "Rep0$tnum: Replication sanity test."
+
+ set envargs ""
+ rep001_sub $method $niter $tnum $envargs $args
+
+ puts "Rep0$tnum: Replication and security sanity test."
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ rep001_sub $method $niter $tnum $envargs $args
+}
+
+proc rep001_sub { method niter tnum envargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set masterenv \
+ [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
+ $envargs {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
+ {-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open a test database on the master (so we can test having handles
+ # open across an upgrade).
+ puts "\tRep0$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep0$tnum.b: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.c: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Remove the file (and update client).
+ puts "\tRep0$tnum.d: Remove the file on the master and close master."
+ error_check_good remove \
+ [$masterenv dbremove -auto_commit test0$tnum.db] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Don't get confused in Tcl.
+ puts "\tRep0$tnum.e: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run test001 in the new master
+ puts "\tRep0$tnum.f: Running test001 in new master."
+ eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ puts "\tRep0$tnum.g: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env -create -recover} $envargs \
+ -txn -lock_max 2500 \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ error_check_good newclient_env [is_valid_env $newclientenv] TRUE
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set stats [$newmasterenv rep_stat]
+ set nthrottles [getstats $stats {Transmission limited}]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+
+ # Run a modified test001 in the new master (and update client).
+ puts "\tRep0$tnum.h: Running test001 in new master."
+ eval test001 $method \
+ $niter $niter $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.j: Verifying new client database contents."
+ set testdir [get_home $newmasterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $newclientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/bdb/test/rep002.tcl b/bdb/test/rep002.tcl
new file mode 100644
index 00000000000..68666b0d0f0
--- /dev/null
+++ b/bdb/test/rep002.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep002.tcl,v 11.11 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
+ source ./include.tcl
+ global elect_timeout
+
+ set elect_timeout 1000000
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep0$tnum.d: Starting election without dead master."
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # We want to verify all the clients but the one that declared an
+ # election get the election message.
+ # We also want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the returned newmaster,
+ # if any, is 1 (the master's replication ID).
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+
+ incr nproced [replprocessqueue $masterenv 1 0 he nm]
+
+ if { $he == 1 } {
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
+ set got_hold_elect(M) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] 0 \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good got_hold_elect(master) $got_hold_elect(M) 0
+ unset got_hold_elect(M)
+ # error_check_good got_hold_elect(0) $got_hold_elect(0) 0
+ unset got_hold_elect(0)
+ for { set i 1 } { $i < $nclients } { incr i } {
+ error_check_good got_hold_elect($i) $got_hold_elect($i) 1
+ unset got_hold_elect($i)
+ }
+
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep0$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep0$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+
+ # Client #1 has priority 100; everyone else
+ # has priority 10.
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc reptwo { args } { eval rep002 $args }
diff --git a/bdb/test/rep003.tcl b/bdb/test/rep003.tcl
new file mode 100644
index 00000000000..7bb7e00ddbf
--- /dev/null
+++ b/bdb/test/rep003.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep003.tcl,v 11.9 2002/08/09 02:23:50 margo Exp $
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment; start up,
+# TEST shut down, and restart client processes, with and without recovery.
+# TEST To ensure that environment state is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "03" } args } {
+ source ./include.tcl
+ global testdir rep003_dbname rep003_omethod rep003_oargs
+
+ env_cleanup $testdir
+ set niter 10
+ set rep003_dbname rep003.db
+
+ if { [is_record_based $method] } {
+ puts "Rep0$tnum: Skipping for method $method"
+ return
+ }
+
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+
+ puts "Rep0$tnum: Replication repeated-startup test"
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb_env_noerr -create -log_max 1000000 \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep0$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep0$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep0$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover_fatal -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/bdb/test/rep004.tcl b/bdb/test/rep004.tcl
new file mode 100644
index 00000000000..e1d4d3b65c7
--- /dev/null
+++ b/bdb/test/rep004.tcl
@@ -0,0 +1,198 @@
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep004
+# TEST Test of DB_REP_LOGSONLY.
+# TEST
+# TEST Run a quick put test in a master environment that has one logs-only
+# TEST client. Shut down, then run catastrophic recovery in the logs-only
+# TEST client and check that the database is present and populated.
+
+proc rep004 { method { nitems 10 } { tnum "04" } args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+ set dbname rep0$tnum.db
+
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $args]
+
+ puts "Rep0$tnum: Test of logs-only replication clients"
+
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ set logsonlydir $testdir/LOGSONLYDIR
+ file mkdir $logsonlydir
+
+ # Open a master, a logsonly replica, and a normal client.
+ repladd 1
+ set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
+ -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
+ -rep_transport [list 2 replsend]]
+ error_check_good logsonly_env [is_valid_env $loenv] TRUE
+
+ repladd 3
+ set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
+ -rep_transport [list 3 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+
+ puts "\tRep0$tnum.a: Populate database."
+
+ set db [eval {berkdb open -create -mode 0644 -auto_commit} \
+ -env $masterenv $oargs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ }
+
+ puts "\tRep0$tnum.b: Sync up clients."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+ incr nproced [replprocessqueue $clientenv 3]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ puts "\tRep0$tnum.c: Get master and logs-only client ahead."
+ set newcount 0
+ while { [gets $did str] != -1 && $newcount < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ incr newcount
+ }
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ # "Crash" the master, and fail over to the upgradeable client.
+ error_check_good masterenv_close [$masterenv close] 0
+ replclear 3
+
+ error_check_good upgrade_client [$clientenv rep_start -master] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $clientenv 3]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good loenv_close [$loenv close] 0
+
+ puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
+ set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
+
+ puts "\tRep0$tnum.f: Verify logs-only client contents."
+ set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
+ set loc [$lodb cursor]
+
+ set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
+ set cc [$cdb cursor]
+
+ # Make sure new master and recovered logs-only replica match.
+ for { set cdbt [$cc get -first] } \
+ { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
+ set lodbt [$loc get -next]
+
+ error_check_good newmaster_replica_match $cdbt $lodbt
+ }
+
+ # Reset new master cursor.
+ error_check_good cc_close [$cc close] 0
+ set cc [$cdb cursor]
+
+ for { set lodbt [$loc get -first] } \
+ { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
+ set cdbt [$cc get -next]
+
+ error_check_good replica_newmaster_match $lodbt $cdbt
+ }
+
+ error_check_good loc_close [$loc close] 0
+ error_check_good lodb_close [$lodb close] 0
+ error_check_good loenv_close [$loenv close] 0
+
+ error_check_good cc_close [$cc close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ close $did
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/bdb/test/rep005.tcl b/bdb/test/rep005.tcl
new file mode 100644
index 00000000000..e0515f1cd62
--- /dev/null
+++ b/bdb/test/rep005.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep005.tcl,v 11.3 2002/08/08 18:13:13 sue Exp $
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method { niter 10 } { tnum "05" } args } {
+ source ./include.tcl
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set nclients 3
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+ #
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep0$tnum"
+ set count 0
+ foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c1 { electinit electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c2 { electinit electvote1 electwait1 \
+ electvote2 electwait2 } {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd clientenv $qdir $m \
+ $count $elist
+ incr count
+ }
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep005_elect { ecmd cenv qdir msg count elist } {
+ global elect_timeout
+ upvar $ecmd env_cmd
+ upvar $cenv clientenv
+
+ set elect_timeout 1000000
+ set nclients [llength $elist]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) [lindex $elist $i]
+ }
+ puts "\t$msg.d.$count: Starting election with errors $elist"
+ set elect_pipe(0) [start_election $qdir $env_cmd(0) \
+ [expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+# puts "Processing queue for client $i"
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # Client #1 has priority 100; everyone else
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+# puts "Starting election on client $i"
+ set elect_pipe($i) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout $err_cmd($i)]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+}
diff --git a/bdb/test/reputils.tcl b/bdb/test/reputils.tcl
new file mode 100644
index 00000000000..340e359f26d
--- /dev/null
+++ b/bdb/test/reputils.tcl
@@ -0,0 +1,659 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: reputils.tcl,v 11.34 2002/08/12 17:54:18 sandstro Exp $
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+global elect_timeout
+set elect_timeout 50000000
+set drop 0
+
+# Create the directory structure for replication testing.
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+#
+proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run.
+ #
+ set lmax [expr 3 * 1024 * 1024]
+ set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
+ {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set clientenv [eval {berkdb_env -create} $envargs -txn \
+ {-cachesize { 0 10000000 0 }} -lock_max 10000 \
+ {-home $clientdir($i) -rep_client -rep_transport \
+ [list $envid replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $tnum $nclients $oob
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+
+proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts -nonewline "\t$name: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set do_check 1
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set stats [$clientenv rep_stat]
+ set queued [getstats $stats \
+ {Total log records queued}]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [getstats $stats \
+ {Log records requested}]
+ error_check_bad requested_stats \
+ $requested -1
+ if { $queued != 0 && $do_check != 0 } {
+ error_check_good num_requested \
+ [expr $requested < $queued] 1
+ }
+
+ $clientenv rep_request 1 1
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ set do_check 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$name: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 4 128
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+
+proc repl_envver0 { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+ set name [format "Repl%03d" $tnum]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying client $i database \
+ $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+
+proc repl_verdel { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set name [format "Repl%03d" $tnum]
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$name: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $tnum $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying emptiness of client database $i."
+
+ set db [berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { tnum envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global testdir
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ set do_check 0
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts "\t$name: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ }
+ repl_envprocq $tnum $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$name: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$name: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Close up a replication group
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb_env -create -txn -lock_max 20000 -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ return 0
+}
+
+# Nuke all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+
+proc replprocessqueue { dbenv machid { skip_interval 0 } \
+ { hold_electp NONE } { newmasterp NONE } } {
+ global queuedbs queueenv errorCode
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # newmasterp is the same idea, only returning the ID of a master
+ # given in a DB_REP_NEWMASTER return.
+ if { [string compare $newmasterp NONE] != 0 } {
+ upvar $newmasterp newmaster
+ }
+ set newmaster 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ continue
+ }
+ }
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save our recno, close
+ # the cursor, and then reopen and reset the cursor.
+ set recno [lindex [lindex $dbt 0] 0]
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set $recno]
+
+ if { $ret != 0 } {
+ if { [is_substr $res DB_REP_HOLDELECTION] } {
+ set hold_elect 1
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ $dbc del
+
+ if { $ret == 0 && $res != 0 } {
+ if { [is_substr $res DB_REP_NEWSITE] } {
+ # NEWSITE; do nothing.
+ } else {
+ set newmaster $res
+ # Break as soon as we get a NEWMASTER message;
+ # our caller needs to handle it.
+ break
+ }
+ }
+
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election { qdir envstring nsites pri timeout {err "none"}} {
+ source ./include.tcl
+ global elect_serial elect_timeout elections_in_progress machids
+
+ incr elect_serial
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+
+ puts $t "source $test_path/test.tcl"
+ puts $t "replsetup $qdir"
+ foreach i $machids { puts $t "repladd $i" }
+ puts $t "set env_cmd \{$envstring\}"
+ puts $t "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
+# puts "Start election err $err, env $envstring"
+ puts $t "\$dbenv test abort $err"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ if { $err != "none" } {
+ puts $t "\$dbenv test abort none"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ }
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ set t $elections_in_progress($i)
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
diff --git a/bdb/test/rpc001.tcl b/bdb/test/rpc001.tcl
index 331a18cfbf1..1b65639014f 100644
--- a/bdb/test/rpc001.tcl
+++ b/bdb/test/rpc001.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rpc001.tcl,v 11.23 2001/01/02 20:04:56 sue Exp $
-#
-# Test RPC specifics, primarily that unsupported functions return
-# errors and such.
+# $Id: rpc001.tcl,v 11.33 2002/07/25 22:57:32 mjc Exp $
#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+# TEST Test RPC specifics, primarily that unsupported functions return
+# TEST errors and such.
proc rpc001 { } {
global __debug_on
global __debug_print
global errorInfo
+ global rpc_svc
source ./include.tcl
#
@@ -21,10 +23,10 @@ proc rpc001 { } {
set itime 10
puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc \
+ set dpid [exec $util_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir -t $ttime -I $itime&]
}
puts "\tRpc001.a: Started server, pid $dpid"
@@ -36,14 +38,14 @@ proc rpc001 { } {
set testfile "rpc001.db"
set home [file tail $rpc_testdir]
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good lock_env:open [is_valid_env $env] TRUE
puts "\tRpc001.c: Opening a database"
#
# NOTE: the type of database doesn't matter, just use btree.
- set db [eval {berkdb_open -create -btree -mode 0644} \
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
-env $env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -230,9 +232,10 @@ proc rpc001 { } {
#
# We need a 2nd env just to do an op to timeout the env.
+ # Make the flags different so we don't end up sharing a handle.
#
- set env1 [eval {berkdb env -create -mode 0644 -home $home \
- -server $rpc_server -client_timeout 10000 -txn}]
+ set env1 [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000}]
error_check_good lock_env:open [is_valid_env $env1] TRUE
puts "\tRpc001.l: Timeout idle env handle"
@@ -247,7 +250,7 @@ proc rpc001 { } {
error_check_good env_timeout \
[is_substr $errorInfo "DB_NOSERVER_ID"] 1
- exec $KILL $dpid
+ tclkill $dpid
}
proc rpc_timeoutjoin {env msg sleeptime use_txn} {
@@ -257,8 +260,10 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
puts -nonewline "\t$msg: Test join cursors and timeouts"
if { $use_txn } {
puts " (using txns)"
+ set txnflag "-auto_commit"
} else {
puts " (without txns)"
+ set txnflag ""
}
#
# Set up a simple set of join databases
@@ -278,32 +283,32 @@ proc rpc_timeoutjoin {env msg sleeptime use_txn} {
{apple pie} {raspberry pie} {lemon pie}
}
set fdb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup fruit.db]
+ $txnflag -env $env -dup fruit.db]
error_check_good dbopen [is_valid_db $fdb] TRUE
set pdb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup price.db]
+ $txnflag -env $env -dup price.db]
error_check_good dbopen [is_valid_db $pdb] TRUE
set ddb [eval {berkdb_open -create -btree -mode 0644} \
- -env $env -dup dessert.db]
+ $txnflag -env $env -dup dessert.db]
error_check_good dbopen [is_valid_db $ddb] TRUE
foreach kd $fruit {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$fdb put $k $d]
+ set ret [eval {$fdb put} $txnflag {$k $d}]
error_check_good fruit_put $ret 0
}
error_check_good sync [$fdb sync] 0
foreach kd $price {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$pdb put $k $d]
+ set ret [eval {$pdb put} $txnflag {$k $d}]
error_check_good price_put $ret 0
}
error_check_good sync [$pdb sync] 0
foreach kd $dessert {
set k [lindex $kd 0]
set d [lindex $kd 1]
- set ret [$ddb put $k $d]
+ set ret [eval {$ddb put} $txnflag {$k $d}]
error_check_good dessert_put $ret 0
}
error_check_good sync [$ddb sync] 0
@@ -326,7 +331,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
set curs_list {}
set txn_list {}
- set msgnum [expr $op * 2 + 1]
+ set msgnum [expr $op * 2 + 1]
if { $use_txn } {
puts "\t$msg$msgnum: Set up txns and join cursor"
set txn [$env txn]
@@ -346,7 +351,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
#
# Start a cursor, (using txn child0 in the fruit and price dbs, if
- # needed). # Just pick something simple to join on.
+ # needed). # Just pick something simple to join on.
# Then call join on the dessert db.
#
set fkey yellow
@@ -372,7 +377,7 @@ proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
set ret [$jdbc get]
error_check_bad jget [llength $ret] 0
- set msgnum [expr $op * 2 + 2]
+ set msgnum [expr $op * 2 + 2]
if { $op == 1 } {
puts -nonewline "\t$msg$msgnum: Timeout all cursors"
if { $use_txn } {
diff --git a/bdb/test/rpc002.tcl b/bdb/test/rpc002.tcl
index 6b11914c2eb..4b69265bf3a 100644
--- a/bdb/test/rpc002.tcl
+++ b/bdb/test/rpc002.tcl
@@ -1,16 +1,17 @@
-# See the file LICENSE for redistribution information.
+# Sel the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rpc002.tcl,v 1.7 2000/10/27 13:23:56 sue Exp $
+# $Id: rpc002.tcl,v 1.17 2002/07/16 20:53:03 bostic Exp $
#
-# RPC Test 2
-# Test invalid RPC functions and make sure we error them correctly
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
proc rpc002 { } {
global __debug_on
global __debug_print
global errorInfo
+ global rpc_svc
source ./include.tcl
set testfile "rpc002.db"
@@ -20,9 +21,9 @@ proc rpc002 { } {
#
puts "Rpc002: Unsupported interface test"
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRpc002.a: Started server, pid $dpid"
@@ -32,7 +33,7 @@ proc rpc002 { } {
puts "\tRpc002.b: Unsupported env options"
#
# Test each "pre-open" option for env's. These need to be
- # tested on the 'berkdb env' line.
+ # tested on the 'berkdb_env' line.
#
set rlist {
{ "-data_dir $rpc_testdir" "Rpc002.b0"}
@@ -50,8 +51,8 @@ proc rpc002 { } {
{ "-verbose {recovery on}" "Rpc002.b13"}
}
- set e "berkdb env -create -mode 0644 -home $home -server $rpc_server \
- -client_timeout 10000 -txn"
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
foreach pair $rlist {
set cmd [lindex $pair 0]
set msg [lindex $pair 1]
@@ -60,7 +61,7 @@ proc rpc002 { } {
set stat [catch {eval $e $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
}
#
@@ -68,7 +69,7 @@ proc rpc002 { } {
# the rest)
#
puts "\tRpc002.c: Unsupported env related interfaces"
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000 -txn}]
error_check_good envopen [is_valid_env $env] TRUE
set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
@@ -89,16 +90,14 @@ proc rpc002 { } {
{ " log_archive" "Rpc002.c5"}
{ " log_file {0 0}" "Rpc002.c6"}
{ " log_flush" "Rpc002.c7"}
- { " log_get -current" "Rpc002.c8"}
- { " log_register $db $testfile" "Rpc002.c9"}
- { " log_stat" "Rpc002.c10"}
- { " log_unregister $db" "Rpc002.c11"}
- { " mpool -create -pagesize 512" "Rpc002.c12"}
- { " mpool_stat" "Rpc002.c13"}
- { " mpool_sync {0 0}" "Rpc002.c14"}
- { " mpool_trickle 50" "Rpc002.c15"}
- { " txn_checkpoint -min 1" "Rpc002.c16"}
- { " txn_stat" "Rpc002.c17"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
}
foreach pair $rlist {
@@ -109,7 +108,7 @@ proc rpc002 { } {
set stat [catch {eval $env $cmd} ret]
error_check_good $cmd $stat 1
error_check_good $cmd.err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
}
error_check_good dbclose [$db close] 0
@@ -128,7 +127,7 @@ proc rpc002 { } {
set stat [catch {eval $dbcmd} ret]
error_check_good dbopen_cache $stat 1
error_check_good dbopen_cache_err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
puts "\tRpc002.d1: Try to upgrade a database"
#
@@ -136,9 +135,9 @@ proc rpc002 { } {
set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
error_check_good dbupgrade $stat 1
error_check_good dbupgrade_err \
- [is_substr $errorInfo "meaningless in RPC env"] 1
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
error_check_good envclose [$env close] 0
- exec $KILL $dpid
+ tclkill $dpid
}
diff --git a/bdb/test/rpc003.tcl b/bdb/test/rpc003.tcl
new file mode 100644
index 00000000000..76f0dca6c07
--- /dev/null
+++ b/bdb/test/rpc003.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc003.tcl,v 11.9 2002/07/16 20:53:03 bostic Exp $
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/bdb/test/rpc004.tcl b/bdb/test/rpc004.tcl
new file mode 100644
index 00000000000..ca1462f3a89
--- /dev/null
+++ b/bdb/test/rpc004.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc004.tcl,v 11.6 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ cleanup $testdir NULL
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ }
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ tclkill $dpid
+}
diff --git a/bdb/test/rpc005.tcl b/bdb/test/rpc005.tcl
new file mode 100644
index 00000000000..f46e7355e5a
--- /dev/null
+++ b/bdb/test/rpc005.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc005.tcl,v 11.4 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc005.c: Compare identical and different configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ tclkill $dpid
+}
diff --git a/bdb/test/rsrc001.tcl b/bdb/test/rsrc001.tcl
index 6d76044f454..1d57769fda2 100644
--- a/bdb/test/rsrc001.tcl
+++ b/bdb/test/rsrc001.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc001.tcl,v 11.18 2001/01/18 06:41:03 krinsky Exp $
+# $Id: rsrc001.tcl,v 11.23 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test.
-# Try different patterns of adding records and making sure that the
-# corresponding file matches
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
proc rsrc001 { } {
source ./include.tcl
@@ -47,7 +47,7 @@ proc rsrc001 { } {
# Now fill out the backing file and create the check file.
set oid1 [open $testdir/rsrc.txt a]
set oid2 [open $testdir/check.txt w]
-
+
# This one was already put into rsrc.txt.
puts $oid2 $rec1
@@ -154,15 +154,15 @@ proc rsrc001 { } {
set rec "Last record with reopen"
puts $oid $rec
- incr key
+ incr key
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
puts "\tRsrc001.g:\
- Put several beyond end of file, after reopen."
+ Put several beyond end of file, after reopen with snapshot."
error_check_good db_close [$db close] 0
set db [eval {berkdb_open -create -mode 0644\
- -recno -source $testdir/rsrc.txt} $testfile]
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set rec "Really really last record with reopen"
@@ -171,7 +171,7 @@ proc rsrc001 { } {
puts $oid ""
puts $oid $rec
- incr key
+ incr key
incr key
incr key
incr key
@@ -179,8 +179,6 @@ proc rsrc001 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno_with_reopen $ret 0
-
-
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
diff --git a/bdb/test/rsrc002.tcl b/bdb/test/rsrc002.tcl
index d3b45c9a7f3..0cb3cf752e6 100644
--- a/bdb/test/rsrc002.tcl
+++ b/bdb/test/rsrc002.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc002.tcl,v 11.11 2000/11/29 15:01:06 sue Exp $
+# $Id: rsrc002.tcl,v 11.14 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test #2: test of set_re_delim.
-# Specify a backing file with colon-delimited records,
-# and make sure they are correctly interpreted.
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
proc rsrc002 { } {
source ./include.tcl
diff --git a/bdb/test/rsrc003.tcl b/bdb/test/rsrc003.tcl
index c93b3bbde12..f357a1e7f80 100644
--- a/bdb/test/rsrc003.tcl
+++ b/bdb/test/rsrc003.tcl
@@ -1,13 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: rsrc003.tcl,v 11.1 2000/11/29 18:28:49 sue Exp $
+# $Id: rsrc003.tcl,v 11.5 2002/01/11 15:53:33 bostic Exp $
#
-# Recno backing file test.
-# Try different patterns of adding records and making sure that the
-# corresponding file matches
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
proc rsrc003 { } {
source ./include.tcl
global fixed_len
@@ -26,7 +26,7 @@ proc rsrc003 { } {
set bigrec3 [replicate "This is record 3 " 512]
set orig_fixed_len $fixed_len
- set rlist {
+ set rlist {
{{$rec1 $rec2 $rec3} "small records" }
{{$bigrec1 $bigrec2 $bigrec3} "large records" }}
@@ -65,26 +65,26 @@ proc rsrc003 { } {
puts \
"Rsrc003: Testing with disk-backed database with $msg."
}
-
+
puts -nonewline \
"\tRsrc003.a: Read file, rewrite last record;"
puts " write it out and diff"
set db [eval {berkdb_open -create -mode 0644 -recno \
-len $reclen -source $testdir/rsrc.txt} $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
-
+
# Read the last record; replace it (don't change it).
# Then close the file and diff the two files.
set txn ""
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor \
[is_valid_cursor $dbc $db] TRUE
-
+
set rec [$dbc get -last]
error_check_good get_last [llength [lindex $rec 0]] 2
set key [lindex [lindex $rec 0] 0]
set data [lindex [lindex $rec 0] 1]
-
+
# Get the last record from the text file
set oid [open $testdir/rsrc.txt]
set laststr ""
@@ -95,17 +95,17 @@ proc rsrc003 { } {
close $oid
set data [sanitize_record $data]
error_check_good getlast $data $laststr
-
+
set ret [eval {$db put} $txn {$key $data}]
error_check_good replace_last $ret 0
-
+
error_check_good curs_close [$dbc close] 0
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
error_check_good \
diff1($testdir/rsrc.txt,$testdir/check.txt) \
[filecmp $testdir/rsrc.txt $testdir/check.txt] 0
-
+
puts -nonewline "\tRsrc003.b: "
puts "Append some records in tree and verify in file."
set oid [open $testdir/check.txt a]
@@ -124,7 +124,7 @@ proc rsrc003 { } {
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff2($testdir/{rsrc.txt,check.txt}) $ret 0
-
+
puts "\tRsrc003.c: Append by record number"
set oid [open $testdir/check.txt a]
for {set i 1} {$i < 10} {incr i} {
@@ -136,14 +136,14 @@ proc rsrc003 { } {
set ret [eval {$db put} $txn {$key $rec}]
error_check_good put_byno $ret 0
}
-
+
error_check_good db_sync [$db sync] 0
error_check_good db_sync [$db sync] 0
close $oid
set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
error_check_good \
diff3($testdir/{rsrc.txt,check.txt}) $ret 0
-
+
puts \
"\tRsrc003.d: Verify proper syncing of changes on close."
error_check_good Rsrc003:db_close [$db close] 0
@@ -171,4 +171,3 @@ proc rsrc003 { } {
set fixed_len $orig_fixed_len
return
}
-
diff --git a/bdb/test/rsrc004.tcl b/bdb/test/rsrc004.tcl
new file mode 100644
index 00000000000..f6c2f997eb8
--- /dev/null
+++ b/bdb/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc004.tcl,v 11.3 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/bdb/test/scr001/chk.code b/bdb/test/scr001/chk.code
new file mode 100644
index 00000000000..eb01d8614b3
--- /dev/null
+++ b/bdb/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# $Id: chk.code,v 1.10 2002/02/04 16:03:26 bostic Exp $
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/bdb/test/scr002/chk.def b/bdb/test/scr002/chk.def
new file mode 100644
index 00000000000..7d5e6670f63
--- /dev/null
+++ b/bdb/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/bdb/test/scr003/chk.define b/bdb/test/scr003/chk.define
new file mode 100644
index 00000000000..f73355eddf6
--- /dev/null
+++ b/bdb/test/scr003/chk.define
@@ -0,0 +1,77 @@
+#!/bin/sh -
+#
+# $Id: chk.define,v 1.21 2002/03/27 04:32:58 bostic Exp $
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ_/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^LIST_/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SH_CIRCLEQ_/d' \
+ -e '/^SH_LIST_/d' \
+ -e '/^SH_TAILQ_/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/bdb/test/scr004/chk.javafiles b/bdb/test/scr004/chk.javafiles
new file mode 100644
index 00000000000..d30c5e3e779
--- /dev/null
+++ b/bdb/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.javafiles,v 1.5 2002/01/30 19:50:52 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+j=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr005/chk.nl b/bdb/test/scr005/chk.nl
new file mode 100644
index 00000000000..47c7ff74d4b
--- /dev/null
+++ b/bdb/test/scr005/chk.nl
@@ -0,0 +1,112 @@
+#!/bin/sh -
+#
+# $Id: chk.nl,v 1.6 2002/01/07 15:12:12 bostic Exp $
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr006/chk.offt b/bdb/test/scr006/chk.offt
new file mode 100644
index 00000000000..6800268d2a2
--- /dev/null
+++ b/bdb/test/scr006/chk.offt
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# $Id: chk.offt,v 1.9 2001/10/26 13:40:15 bostic Exp $
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr007/chk.proto b/bdb/test/scr007/chk.proto
new file mode 100644
index 00000000000..ae406fa23fe
--- /dev/null
+++ b/bdb/test/scr007/chk.proto
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id: chk.proto,v 1.8 2002/03/27 04:32:59 bostic Exp $
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -low $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr008/chk.pubdef b/bdb/test/scr008/chk.pubdef
new file mode 100644
index 00000000000..4f59e831b25
--- /dev/null
+++ b/bdb/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/dbinc/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/dbinc/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/bdb/test/scr009/chk.srcfiles b/bdb/test/scr009/chk.srcfiles
new file mode 100644
index 00000000000..4f09a2890f6
--- /dev/null
+++ b/bdb/test/scr009/chk.srcfiles
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id: chk.srcfiles,v 1.10 2002/02/04 22:25:33 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^test\//d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr010/chk.str b/bdb/test/scr010/chk.str
new file mode 100644
index 00000000000..2b5698c0ff2
--- /dev/null
+++ b/bdb/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.str,v 1.5 2001/10/12 17:55:36 bostic Exp $
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr010/spell.ok b/bdb/test/scr010/spell.ok
new file mode 100644
index 00000000000..18af8d1306d
--- /dev/null
+++ b/bdb/test/scr010/spell.ok
@@ -0,0 +1,825 @@
+AES
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Aclmop
+Ahlm
+Ahm
+BCFILprRsvVxX
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+Bc
+BerkeleyDB
+BtRecExample
+Btree
+CD
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFLprsvVxX
+CFh
+CHKSUM
+CLpsvxX
+CONFIG
+CdFILTvX
+ClassNotFoundException
+Config
+DBC
+DBENV
+DBP
+DBS
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+DUPSORT
+Db
+DbAppendRecno
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUtil
+DbXAResource
+DbXid
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+Fh
+FileNotFoundException
+GetFileInformationByHandle
+GetJavaVM
+GetJoin
+HOFFSET
+HOLDELECTION
+Hashtable
+ILo
+ILprR
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+LprRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NOTYPE
+NOWAIT
+NP
+NoP
+NoqV
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PGNO
+PID
+PREV
+Pgno
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SHA
+SS
+Shm
+Sleepycat
+Subdatabase
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Tt
+Txn
+Txnid
+Txns
+UID
+UNAVAIL
+USERMEM
+Unencrypted
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XAException
+Xid
+XxZ
+YIELDCPU
+YY
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+appname
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+cksum
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbreg
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+dev
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+ds
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupset
+dupsort
+efh
+eid
+electinit
+electsend
+electvote
+electwait
+encryptaes
+encryptany
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fid
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+func
+fv
+gbytes
+gc'ed
+gen
+getBranchQualifier
+getFormatId
+getGlobalTransactionId
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+intValue
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+javax
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+killtest
+klNpP
+klNprRV
+klNprRs
+krinsky
+lM
+lP
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+logsonly
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNP
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mjc
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newclient
+newfile
+newitem
+newmaster
+newname
+newpage
+newpgno
+newsite
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+notgranted
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+ok
+olddata
+olditem
+oldname
+opd
+opflags
+opmods
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+passwd
+perf
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pkey
+plist
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prR
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qV
+qam
+qs
+qtest
+rRV
+rRs
+rV
+rand
+rcuradj
+rdonly
+readd
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+recs
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmid
+rmw
+ro
+rootent
+rootlsn
+rpc
+rpcid
+rs
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+sS
+sV
+sVv
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strcmp
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+thr
+threadID
+tid
+tiebreaker
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+util
+vVxXZ
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wrnosync
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/bdb/test/scr011/chk.tags b/bdb/test/scr011/chk.tags
new file mode 100644
index 00000000000..14a3c4e011d
--- /dev/null
+++ b/bdb/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# $Id: chk.tags,v 1.10 2001/10/12 17:55:36 bostic Exp $
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/bdb/test/scr012/chk.vx_code b/bdb/test/scr012/chk.vx_code
new file mode 100644
index 00000000000..8d7ca608f93
--- /dev/null
+++ b/bdb/test/scr012/chk.vx_code
@@ -0,0 +1,68 @@
+#!/bin/sh -
+#
+# $Id: chk.vx_code,v 1.6 2002/03/27 20:20:25 bostic Exp $
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr013/chk.stats b/bdb/test/scr013/chk.stats
new file mode 100644
index 00000000000..3a404699668
--- /dev/null
+++ b/bdb/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id: chk.stats,v 1.6 2002/08/19 18:35:18 bostic Exp $
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/lock/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/log/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/memp/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/txn/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/db/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/lock/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/log/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/memp/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/txn/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/bdb/test/scr014/chk.err b/bdb/test/scr014/chk.err
new file mode 100644
index 00000000000..72b4a62719f
--- /dev/null
+++ b/bdb/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/bdb/test/scr015/README b/bdb/test/scr015/README
new file mode 100644
index 00000000000..75a356eea06
--- /dev/null
+++ b/bdb/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/bdb/test/scr015/TestConstruct01.cpp b/bdb/test/scr015/TestConstruct01.cpp
new file mode 100644
index 00000000000..7ae328d458c
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.cpp,v 1.5 2002/01/23 14:26:40 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestConstruct01.testerr b/bdb/test/scr015/TestConstruct01.testerr
new file mode 100644
index 00000000000..1ba627d103b
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/bdb/test/scr015/TestConstruct01.testout b/bdb/test/scr015/TestConstruct01.testout
new file mode 100644
index 00000000000..9b840f9fcf4
--- /dev/null
+++ b/bdb/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/bdb/test/scr015/TestExceptInclude.cpp b/bdb/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 00000000000..28bc498222f
--- /dev/null
+++ b/bdb/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestExceptInclude.cpp,v 1.4 2002/07/05 22:17:59 dda Exp $
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class. We do need db.h to get a few
+ * typedefs defined that the DbException classes use.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include <db.h>
+#include <cxx_except.h>
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/bdb/test/scr015/TestGetSetMethods.cpp b/bdb/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 00000000000..81ef914eac3
--- /dev/null
+++ b/bdb/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.cpp,v 1.4 2002/01/11 15:53:59 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestKeyRange.cpp b/bdb/test/scr015/TestKeyRange.cpp
new file mode 100644
index 00000000000..980d2f518e0
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.cpp,v 1.4 2002/01/23 14:26:41 bostic Exp $
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/bdb/test/scr015/TestKeyRange.testin b/bdb/test/scr015/TestKeyRange.testin
new file mode 100644
index 00000000000..a2b6bd74e7b
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/bdb/test/scr015/TestKeyRange.testout b/bdb/test/scr015/TestKeyRange.testout
new file mode 100644
index 00000000000..25b2e1a835c
--- /dev/null
+++ b/bdb/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/bdb/test/scr015/TestLogc.cpp b/bdb/test/scr015/TestLogc.cpp
new file mode 100644
index 00000000000..94fcfa0b3ec
--- /dev/null
+++ b/bdb/test/scr015/TestLogc.cpp
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.cpp,v 1.6 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestLogc.testout b/bdb/test/scr015/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/bdb/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/bdb/test/scr015/TestSimpleAccess.cpp b/bdb/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 00000000000..2450b9b3030
--- /dev/null
+++ b/bdb/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestSimpleAccess.testout b/bdb/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/bdb/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr015/TestTruncate.cpp b/bdb/test/scr015/TestTruncate.cpp
new file mode 100644
index 00000000000..d5c0dc6de29
--- /dev/null
+++ b/bdb/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/bdb/test/scr015/TestTruncate.testout b/bdb/test/scr015/TestTruncate.testout
new file mode 100644
index 00000000000..0a4bc98165d
--- /dev/null
+++ b/bdb/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr015/chk.cxxtests b/bdb/test/scr015/chk.cxxtests
new file mode 100644
index 00000000000..5c21e27208c
--- /dev/null
+++ b/bdb/test/scr015/chk.cxxtests
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id: chk.cxxtests,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/bdb/test/scr015/ignore b/bdb/test/scr015/ignore
new file mode 100644
index 00000000000..55ce82ae372
--- /dev/null
+++ b/bdb/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $
+#
+# A list of tests to ignore
diff --git a/bdb/test/scr015/testall b/bdb/test/scr015/testall
new file mode 100644
index 00000000000..a2d493a8b22
--- /dev/null
+++ b/bdb/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/bdb/test/scr015/testone b/bdb/test/scr015/testone
new file mode 100644
index 00000000000..3bbba3f90f0
--- /dev/null
+++ b/bdb/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/bdb/test/scr016/CallbackTest.java b/bdb/test/scr016/CallbackTest.java
new file mode 100644
index 00000000000..eede964a027
--- /dev/null
+++ b/bdb/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/bdb/test/scr016/CallbackTest.testout b/bdb/test/scr016/CallbackTest.testout
new file mode 100644
index 00000000000..68797d4a2de
--- /dev/null
+++ b/bdb/test/scr016/CallbackTest.testout
@@ -0,0 +1,60 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key9
+compare function called
+ key1, key0
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/bdb/test/scr016/README b/bdb/test/scr016/README
new file mode 100644
index 00000000000..226a8aa3b77
--- /dev/null
+++ b/bdb/test/scr016/README
@@ -0,0 +1,37 @@
+# $Id: README,v 1.2 2001/05/31 23:09:10 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/bdb/test/scr016/TestAppendRecno.java b/bdb/test/scr016/TestAppendRecno.java
new file mode 100644
index 00000000000..f4ea70ca084
--- /dev/null
+++ b/bdb/test/scr016/TestAppendRecno.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAppendRecno.java,v 1.4 2002/08/16 19:35:53 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ table.set_append_recno(this);
+
+ table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/bdb/test/scr016/TestAppendRecno.testout b/bdb/test/scr016/TestAppendRecno.testout
new file mode 100644
index 00000000000..970174e7a96
--- /dev/null
+++ b/bdb/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/bdb/test/scr016/TestAssociate.java b/bdb/test/scr016/TestAssociate.java
new file mode 100644
index 00000000000..4105b9cb0a1
--- /dev/null
+++ b/bdb/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAssociate.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(null, FileName + "2", null,
+ Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(null, table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/bdb/test/scr016/TestAssociate.testout b/bdb/test/scr016/TestAssociate.testout
new file mode 100644
index 00000000000..34414b660d1
--- /dev/null
+++ b/bdb/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/bdb/test/scr016/TestClosedDb.java b/bdb/test/scr016/TestClosedDb.java
new file mode 100644
index 00000000000..3bd6e5380f8
--- /dev/null
+++ b/bdb/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestClosedDb.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestClosedDb.testout b/bdb/test/scr016/TestClosedDb.testout
new file mode 100644
index 00000000000..ce13883f63a
--- /dev/null
+++ b/bdb/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/bdb/test/scr016/TestConstruct01.java b/bdb/test/scr016/TestConstruct01.java
new file mode 100644
index 00000000000..b60073ebc0d
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.java,v 1.6 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(null, name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/bdb/test/scr016/TestConstruct01.testerr b/bdb/test/scr016/TestConstruct01.testerr
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.testerr
diff --git a/bdb/test/scr016/TestConstruct01.testout b/bdb/test/scr016/TestConstruct01.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/bdb/test/scr016/TestConstruct02.java b/bdb/test/scr016/TestConstruct02.java
new file mode 100644
index 00000000000..5bbb55ccd56
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct02.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct02.java,v 1.6 2002/08/16 19:35:54 dda Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+ if (count >= 64)
+ throw new IllegalArgumentException("rundb count arg >= 64");
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ byte[] key_bytes = readkey.get_data();
+ byte[] data_bytes = readdata.get_data();
+
+ int len = key_bytes.length;
+ if (len != data_bytes.length) {
+ ERR("key and data are different");
+ }
+ for (i=0; i<len-1; i++) {
+ byte want = (byte)('0' + i);
+ if (key_bytes[i] != want || data_bytes[i] != want) {
+ System.out.println(" got " + new String(key_bytes) +
+ "/" + new String(data_bytes));
+ ERR("key or data is corrupt");
+ }
+ }
+ if (len <= 0 ||
+ key_bytes[len-1] != (byte)'x' ||
+ data_bytes[len-1] != (byte)'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len);
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ bitmap |= bit;
+ expected &= ~(bit);
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " +
+ expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall()
+ {
+ itemcount = 0;
+ try {
+ VERBOSEOUT(" Running test 1:\n");
+ t1();
+ VERBOSEOUT(" finished.\n");
+ removeall(true, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall()) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/bdb/test/scr016/TestConstruct02.testout b/bdb/test/scr016/TestConstruct02.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/bdb/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/bdb/test/scr016/TestDbtFlags.java b/bdb/test/scr016/TestDbtFlags.java
new file mode 100644
index 00000000000..98527e6b3e7
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestDbtFlags.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "If we shadows have offended",
+ "Think but this, and all is mended",
+ "That you have but slumber'd here",
+ "While these visions did appear",
+ "And this weak and idle theme",
+ "No more yielding but a dream",
+ "Gentles, do not reprehend",
+ "if you pardon, we will mend",
+ "And, as I am an honest Puck, if we have unearned luck",
+ "Now to 'scape the serpent's tongue, we will make amends ere long;",
+ "Else the Puck a liar call; so, good night unto you all.",
+ "Give me your hands, if we be friends, and Robin shall restore amends."
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestDbtFlags.testerr b/bdb/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 00000000000..7666868ebd4
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,54 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 20
+ data size: 53 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 25 ulen: 20
+ data size: 25 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 20
+ data size: 69 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 20
+ data size: 65 ulen: 20
+exception, iteration 8: Dbt not large enough for available data
+ key size: 32 ulen: 20
+ data size: 32 ulen: 20
+exception, iteration 9: Dbt not large enough for available data
+ key size: 33 ulen: 20
+ data size: 33 ulen: 20
+exception, iteration 10: Dbt not large enough for available data
+ key size: 30 ulen: 20
+ data size: 30 ulen: 20
+exception, iteration 11: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 50
+ data size: 53 ulen: 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 50
+ data size: 69 ulen: 50
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 50
+ data size: 65 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/bdb/test/scr016/TestDbtFlags.testout b/bdb/test/scr016/TestDbtFlags.testout
new file mode 100644
index 00000000000..b8deb1bcc16
--- /dev/null
+++ b/bdb/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,78 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 16 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 20
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 50
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 200
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 0 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/bdb/test/scr016/TestGetSetMethods.java b/bdb/test/scr016/TestGetSetMethods.java
new file mode 100644
index 00000000000..a1b2722d8fd
--- /dev/null
+++ b/bdb/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.java,v 1.3 2002/01/11 15:54:02 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestKeyRange.java b/bdb/test/scr016/TestKeyRange.java
new file mode 100644
index 00000000000..8eda2de426f
--- /dev/null
+++ b/bdb/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestKeyRange.testout b/bdb/test/scr016/TestKeyRange.testout
new file mode 100644
index 00000000000..c265f3289fb
--- /dev/null
+++ b/bdb/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/bdb/test/scr016/TestLockVec.java b/bdb/test/scr016/TestLockVec.java
new file mode 100644
index 00000000000..ad48e9f2f9a
--- /dev/null
+++ b/bdb/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLockVec.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/bdb/test/scr016/TestLockVec.testout b/bdb/test/scr016/TestLockVec.testout
new file mode 100644
index 00000000000..1cf16c6ac4e
--- /dev/null
+++ b/bdb/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/bdb/test/scr016/TestLogc.java b/bdb/test/scr016/TestLogc.java
new file mode 100644
index 00000000000..ec9c373a93b
--- /dev/null
+++ b/bdb/test/scr016/TestLogc.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.java,v 1.7 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // System.out.println("logc.get: " + count);
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** FAIL: logc.get returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ System.out.println("*** FAIL: not enough log records");
+
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("*** FAIL: Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/bdb/test/scr016/TestLogc.testout b/bdb/test/scr016/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/bdb/test/scr016/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/bdb/test/scr016/TestOpenEmpty.java b/bdb/test/scr016/TestOpenEmpty.java
new file mode 100644
index 00000000000..ae92fd363d9
--- /dev/null
+++ b/bdb/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestOpenEmpty.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestOpenEmpty.testerr b/bdb/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 00000000000..dd3e01c7ab7
--- /dev/null
+++ b/bdb/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/bdb/test/scr016/TestReplication.java b/bdb/test/scr016/TestReplication.java
new file mode 100644
index 00000000000..87cb683d60f
--- /dev/null
+++ b/bdb/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestReplication.java,v 1.3 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/bdb/test/scr016/TestRpcServer.java b/bdb/test/scr016/TestRpcServer.java
new file mode 100644
index 00000000000..ef325cef075
--- /dev/null
+++ b/bdb/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestRpcServer.java,v 1.3 2002/01/11 15:54:03 bostic Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestSameDbt.java b/bdb/test/scr016/TestSameDbt.java
new file mode 100644
index 00000000000..9866ed49307
--- /dev/null
+++ b/bdb/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSameDbt.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestSameDbt.testout b/bdb/test/scr016/TestSameDbt.testout
new file mode 100644
index 00000000000..be4bbbe59e9
--- /dev/null
+++ b/bdb/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/bdb/test/scr016/TestSimpleAccess.java b/bdb/test/scr016/TestSimpleAccess.java
new file mode 100644
index 00000000000..ba7390cada1
--- /dev/null
+++ b/bdb/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.java,v 1.5 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ TestUtil.populate(db);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestSimpleAccess.testout b/bdb/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/bdb/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr016/TestStat.java b/bdb/test/scr016/TestStat.java
new file mode 100644
index 00000000000..55ba9823115
--- /dev/null
+++ b/bdb/test/scr016/TestStat.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestStat.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestStat
+{
+ public static void main(String[] args)
+ {
+ int envflags =
+ Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_TXN | Db.DB_CREATE;
+ try {
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open(".", envflags, 0);
+ Db db = new Db(dbenv, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+
+ TestUtil.populate(db);
+ System.out.println("BtreeStat:");
+ DbBtreeStat stat = (DbBtreeStat)db.stat(0);
+ System.out.println(" bt_magic: " + stat.bt_magic);
+
+ System.out.println("LogStat:");
+ DbLogStat logstat = dbenv.log_stat(0);
+ System.out.println(" st_magic: " + logstat.st_magic);
+ System.out.println(" st_cur_file: " + logstat.st_cur_file);
+
+ System.out.println("RepStat:");
+ DbRepStat repstat = dbenv.rep_stat(0);
+ System.out.println(" st_status: " + repstat.st_status);
+ System.out.println(" st_log_duplication: " +
+ repstat.st_log_duplicated);
+
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestStat.testout b/bdb/test/scr016/TestStat.testout
new file mode 100644
index 00000000000..caf9db1fb13
--- /dev/null
+++ b/bdb/test/scr016/TestStat.testout
@@ -0,0 +1,11 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+BtreeStat:
+ bt_magic: 340322
+LogStat:
+ st_magic: 264584
+ st_cur_file: 1
+RepStat:
+ st_status: 0
+ st_log_duplication: 0
+finished test
diff --git a/bdb/test/scr016/TestTruncate.java b/bdb/test/scr016/TestTruncate.java
new file mode 100644
index 00000000000..71377236246
--- /dev/null
+++ b/bdb/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.java,v 1.4 2002/01/23 14:29:52 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/bdb/test/scr016/TestTruncate.testout b/bdb/test/scr016/TestTruncate.testout
new file mode 100644
index 00000000000..23f291df754
--- /dev/null
+++ b/bdb/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/bdb/test/scr016/TestUtil.java b/bdb/test/scr016/TestUtil.java
new file mode 100644
index 00000000000..1bddfb0b014
--- /dev/null
+++ b/bdb/test/scr016/TestUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestUtil.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Utilities used by many tests.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestUtil
+{
+ public static void populate(Db db)
+ throws DbException
+ {
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ }
+}
diff --git a/bdb/test/scr016/TestXAServlet.java b/bdb/test/scr016/TestXAServlet.java
new file mode 100644
index 00000000000..8b9fe57e261
--- /dev/null
+++ b/bdb/test/scr016/TestXAServlet.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestXAServlet.java,v 1.1 2002/04/24 03:26:33 dda Exp $
+ */
+
+/*
+ * Simple test of XA, using WebLogic.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import com.sleepycat.db.xa.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import javax.servlet.*;
+import javax.servlet.http.*;
+import javax.transaction.*;
+import javax.transaction.xa.*;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import weblogic.transaction.TxHelper;
+import weblogic.transaction.TransactionManager;
+
+public class TestXAServlet extends HttpServlet
+{
+ public static final String ENV_HOME = "TESTXADIR";
+ public static final String DEFAULT_URL = "t3://localhost:7001";
+ public static String filesep = System.getProperty("file.separator");
+
+ private static TransactionManager tm;
+ private static DbXAResource xaresource;
+ private static boolean initialized = false;
+
+ /**
+ * Utility to remove files recursively.
+ */
+ public static void removeRecursive(File f)
+ {
+ if (f.isDirectory()) {
+ String[] sub = f.list();
+ for (int i=0; i<sub.length; i++)
+ removeRecursive(new File(f.getName() + filesep + sub[i]));
+ }
+ f.delete();
+ }
+
+ /**
+ * Typically done only once, unless shutdown is invoked. This
+ * sets up directories, and removes any work files from previous
+ * runs. Also establishes a transaction manager that we'll use
+ * for various transactions. Each call opens/creates a new DB
+ * environment in our work directory.
+ */
+ public static synchronized void startup()
+ {
+ if (initialized)
+ return;
+
+ try {
+ File dir = new File(ENV_HOME);
+ removeRecursive(dir);
+ dir.mkdirs();
+
+ System.out.println("Getting context");
+ InitialContext ic = getInitialContext(DEFAULT_URL);
+ System.out.println("Creating XAResource");
+ xaresource = new DbXAResource(ENV_HOME, 77, 0);
+ System.out.println("Registering with transaction manager");
+ tm = TxHelper.getTransactionManager();
+ tm.registerStaticResource("DbXA", xaresource);
+ initialized = true;
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+ initialized = true;
+ }
+
+ /**
+ * Closes the XA resource manager.
+ */
+ public static synchronized void shutdown(PrintWriter out)
+ throws XAException
+ {
+ if (!initialized)
+ return;
+
+ out.println("Closing the resource.");
+ xaresource.close(0);
+ out.println("Shutdown complete.");
+ initialized = false;
+ }
+
+
+ /**
+ * Should be called once per chunk of major activity.
+ */
+ public void initialize()
+ {
+ startup();
+ }
+
+ private static int count = 1;
+ private static boolean debugInited = false;
+ private Xid bogusXid;
+
+ public static synchronized int incrCount()
+ {
+ return count++;
+ }
+
+ public void debugSetup(PrintWriter out)
+ throws ServletException, IOException
+ {
+ try {
+ Db.load_db();
+ }
+ catch (Exception e) {
+ out.println("got exception during load: " + e);
+ System.out.println("got exception during load: " + e);
+ }
+ out.println("The servlet has been restarted, and Berkeley DB is loaded");
+ out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
+ }
+
+ public void doXATransaction(PrintWriter out, String key, String value,
+ String operation)
+ throws ServletException, IOException
+ {
+ try {
+ int counter = incrCount();
+ if (key == null || key.equals(""))
+ key = "key" + counter;
+ if (value == null || value.equals(""))
+ value = "value" + counter;
+
+ out.println("Adding (\"" + key + "\", \"" + value + "\")");
+
+ System.out.println("XA transaction begin");
+ tm.begin();
+ System.out.println("getting XA transaction");
+ DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
+ DbTxn txn = attach.get_txn();
+ DbEnv env = attach.get_env();
+ Db db = new Db(env, 0);
+ db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.out.println("DB put " + key);
+ db.put(txn,
+ new Dbt(key.getBytes()),
+ new Dbt(value.getBytes()),
+ 0);
+
+ if (operation.equals("rollback")) {
+ out.println("<p>ROLLBACK");
+ System.out.println("XA transaction rollback");
+ tm.rollback();
+ System.out.println("XA rollback returned");
+
+ // The old db is no good after the rollback
+ // since the open was part of the transaction.
+ // Get another db for the cursor dump
+ //
+ db = new Db(env, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ }
+ else {
+ out.println("<p>COMMITTED");
+ System.out.println("XA transaction commit");
+ tm.commit();
+ }
+
+ // Show the current state of the database.
+ Dbc dbc = db.cursor(null, 0);
+ Dbt gotkey = new Dbt();
+ Dbt gotdata = new Dbt();
+
+ out.println("<p>Current database values:");
+ while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
+ out.println("<br> " + getDbtString(gotkey) + " : "
+ + getDbtString(gotdata));
+ }
+ dbc.close();
+ db.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ out.println(" *** Exception received: " + dbe);
+ dbe.printStackTrace();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ out.println(" *** Exception received: " + fnfe);
+ fnfe.printStackTrace();
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ out.println(" *** Exception received: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static Xid getBogusXid()
+ throws XAException
+ {
+ return new DbXid(1, "BOGUS_gtrid".getBytes(),
+ "BOGUS_bqual".getBytes());
+ }
+
+ private static String getDbtString(Dbt dbt)
+ {
+ return new String(dbt.get_data(), 0, dbt.get_size());
+ }
+
+ /**
+ * doGet is called as a result of invoking the servlet.
+ */
+ public void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException
+ {
+ try {
+ resp.setContentType("text/html");
+ PrintWriter out = resp.getWriter();
+
+ String key = req.getParameter("key");
+ String value = req.getParameter("value");
+ String operation = req.getParameter("operation");
+
+ out.println("<HTML>");
+ out.println("<HEAD>");
+ out.println("<TITLE>Berkeley DB with XA</TITLE>");
+ out.println("</HEAD><BODY>");
+ out.println("<a href=\"TestXAServlet" +
+ "\">Database put and commit</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=rollback" +
+ "\">Database put and rollback</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=close" +
+ "\">Close the XA resource manager</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=forget" +
+ "\">Forget an operation (bypasses TM)</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=prepare" +
+ "\">Prepare an operation (bypasses TM)</a><br>");
+ out.println("<br>");
+
+ if (!debugInited) {
+ // Don't initialize XA yet, give the user
+ // a chance to attach a debugger if necessary.
+ debugSetup(out);
+ debugInited = true;
+ }
+ else {
+ initialize();
+ if (operation == null)
+ operation = "commit";
+
+ if (operation.equals("close")) {
+ shutdown(out);
+ }
+ else if (operation.equals("forget")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>FORGET");
+ System.out.println("XA forget bogus XID (bypass TM)");
+ xaresource.forget(getBogusXid());
+ }
+ else if (operation.equals("prepare")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>PREPARE");
+ System.out.println("XA prepare bogus XID (bypass TM)");
+ xaresource.prepare(getBogusXid());
+ }
+ else {
+ // commit, rollback, prepare, forget
+ doXATransaction(out, key, value, operation);
+ }
+ }
+ out.println("</BODY></HTML>");
+
+ System.out.println("Finished.");
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+
+ }
+
+
+ /**
+ * From weblogic's sample code:
+ * samples/examples/jta/jmsjdbc/Client.java
+ */
+ private static InitialContext getInitialContext(String url)
+ throws NamingException
+ {
+ Hashtable env = new Hashtable();
+ env.put(Context.INITIAL_CONTEXT_FACTORY,
+ "weblogic.jndi.WLInitialContextFactory");
+ env.put(Context.PROVIDER_URL, url);
+ return new InitialContext(env);
+ }
+
+}
diff --git a/bdb/test/scr016/chk.javatests b/bdb/test/scr016/chk.javatests
new file mode 100644
index 00000000000..34d7dfe78d7
--- /dev/null
+++ b/bdb/test/scr016/chk.javatests
@@ -0,0 +1,79 @@
+#!/bin/sh -
+#
+# $Id: chk.javatests,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
+pwd
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/bdb/test/scr016/ignore b/bdb/test/scr016/ignore
new file mode 100644
index 00000000000..1dfaf6adea4
--- /dev/null
+++ b/bdb/test/scr016/ignore
@@ -0,0 +1,22 @@
+#
+# $Id: ignore,v 1.4 2002/08/16 19:35:56 dda Exp $
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
+# These are currently not working
+TestAppendRecno
+TestAssociate
+TestLogc
+TestConstruct02
+
+# TestUtil is used by the other tests, it does not stand on its own
+TestUtil
+
+# XA needs a special installation, it is not part of testall
+TestXAServlet
diff --git a/bdb/test/scr016/testall b/bdb/test/scr016/testall
new file mode 100644
index 00000000000..a4e1b5a8c70
--- /dev/null
+++ b/bdb/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/bdb/test/scr016/testone b/bdb/test/scr016/testone
new file mode 100644
index 00000000000..5f5d2e0017d
--- /dev/null
+++ b/bdb/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/bdb/test/scr017/O.BH b/bdb/test/scr017/O.BH
new file mode 100644
index 00000000000..cd499d38779
--- /dev/null
+++ b/bdb/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/bdb/test/scr017/O.R b/bdb/test/scr017/O.R
new file mode 100644
index 00000000000..d78a04727d8
--- /dev/null
+++ b/bdb/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/bdb/test/scr017/chk.db185 b/bdb/test/scr017/chk.db185
new file mode 100644
index 00000000000..c2a07c51d26
--- /dev/null
+++ b/bdb/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id: chk.db185,v 1.2 2001/10/12 17:55:38 bostic Exp $
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr017/t.c b/bdb/test/scr017/t.c
new file mode 100644
index 00000000000..f03b33880d6
--- /dev/null
+++ b/bdb/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/bdb/test/scr018/chk.comma b/bdb/test/scr018/chk.comma
new file mode 100644
index 00000000000..42df48d1881
--- /dev/null
+++ b/bdb/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr018/t.c b/bdb/test/scr018/t.c
new file mode 100644
index 00000000000..4056a605928
--- /dev/null
+++ b/bdb/test/scr018/t.c
@@ -0,0 +1,46 @@
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <strings.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit (1);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/bdb/test/scr019/chk.include b/bdb/test/scr019/chk.include
new file mode 100644
index 00000000000..444217bedb4
--- /dev/null
+++ b/bdb/test/scr019/chk.include
@@ -0,0 +1,40 @@
+#!/bin/sh -
+#
+# $Id: chk.include,v 1.3 2002/03/27 04:33:09 bostic Exp $
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_dump185/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^test_/d' \
+ -e '/^mutex\/tm.c/d' > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/bdb/test/scr020/chk.inc b/bdb/test/scr020/chk.inc
new file mode 100644
index 00000000000..189126b10c3
--- /dev/null
+++ b/bdb/test/scr020/chk.inc
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id: chk.inc,v 1.1 2002/02/10 17:14:33 bostic Exp $
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') > $t1
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/bdb/test/scr021/chk.flags b/bdb/test/scr021/chk.flags
new file mode 100644
index 00000000000..1b2bb62cca7
--- /dev/null
+++ b/bdb/test/scr021/chk.flags
@@ -0,0 +1,97 @@
+#!/bin/sh -
+#
+# $Id: chk.flags,v 1.8 2002/08/14 02:19:55 bostic Exp $
+#
+# Check flag name-spaces.
+
+d=../..
+
+t1=__1
+
+# Check for DB_ENV flags.
+(grep 'F_ISSET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbenv,' $d/*/*.[chys]) |
+ sed -e '/DB_ENV_/d' -e '/F_SET([^ ]*dbenv, db_env_reset)/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_ENV_' $d/*/*.c |
+sed -e '/F_.*dbenv,/d' \
+ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \
+ -e '/\/libdb_java\//d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DB flags.
+(grep 'F_ISSET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbp,' $d/*/*.[chys]) |
+ sed -e '/DB_AM_/d' \
+ -e '/db.c:.*F_SET.*F_ISSET(subdbp,/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_AM_' $d/*/*.c |
+sed -e '/F_.*dbp/d' \
+ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \
+ -e '/"DB_NODUPDATA" behavior for databases with/d' \
+ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \
+ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \
+ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \
+ -e '/ DB_AM_RECNUM\./d' \
+ -e '/ DB_AM_RECOVER set\./d' \
+ -e '/isdup = dbp->flags & DB_AM_DUP/d' \
+ -e '/otherwise we simply do/d' \
+ -e '/pginfo/d' \
+ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \
+ -e '/:[ {]*DB_AM_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DBC flags.
+(grep 'F_ISSET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbc,' $d/*/*.[chys]) |
+ sed -e '/DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DBC_' $d/*/*.c |
+sed -e '/F_.*dbc/d' \
+ -e '/DBC_INTERNAL/d' \
+ -e '/DBC_LOGGING/d' \
+ -e '/Do the actual get. Set DBC_TRANSIENT/d' \
+ -e '/If DBC_WRITEDUP is set, the cursor is an in/d' \
+ -e '/The DBC_TRANSIENT flag indicates that we/d' \
+ -e '/This function replaces the DBC_CONTINUE and DBC_KEYSET/d' \
+ -e '/db_cam.c:.*F_CLR(opd, DBC_ACTIVE);/d' \
+ -e '/{ DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for bad use of macros.
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/scr022/chk.rr b/bdb/test/scr022/chk.rr
new file mode 100644
index 00000000000..df230315299
--- /dev/null
+++ b/bdb/test/scr022/chk.rr
@@ -0,0 +1,22 @@
+#!/bin/sh -
+#
+# $Id: chk.rr,v 1.1 2002/04/19 15:13:05 bostic Exp $
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/common\/db_err.c:/d' \
+ -e '/libdb_java\/java_util.c:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to db_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/bdb/test/sdb001.tcl b/bdb/test/sdb001.tcl
index 938b6c10c6d..a03160e0ab7 100644
--- a/bdb/test/sdb001.tcl
+++ b/bdb/test/sdb001.tcl
@@ -1,24 +1,42 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb001.tcl,v 11.12 2000/08/25 14:21:52 sue Exp $
+# $Id: sdb001.tcl,v 11.18 2002/06/10 15:39:36 sue Exp $
#
-# Sub DB Test 1 {access method}
-# Test non-subdb and subdb operations
-# Test naming (filenames begin with -)
-# Test existence (cannot create subdb of same name with -excl)
+# TEST subdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
proc subdb001 { method args } {
source ./include.tcl
+ global errorInfo
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
puts "Subdb001: $method ($args) subdb and non-subdb tests"
- # Create the database and open the dictionary
set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
set subdb subdb0
cleanup $testdir NULL
puts "\tSubdb001.a: Non-subdb database and subdb operations"
@@ -27,7 +45,7 @@ proc subdb001 { method args } {
# open/add with a subdb. Should fail.
#
puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -70,6 +88,12 @@ proc subdb001 { method args } {
#
set testfile $testdir/subdb001a.db
puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
set db [eval {berkdb_open -create -truncate -mode 0644} $args \
{$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -108,9 +132,18 @@ proc subdb001 { method args } {
# Create 1 db with 1 subdb. Try to create another subdb of
# the same name. Should fail.
#
- puts "\tSubdb001.c: Existence check"
+ puts "\tSubdb001.c: Truncate check"
set testfile $testdir/subdb001c.db
set subdb subdb
+ set stat [catch {eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $args {$omethod $testfile $subdb}} ret]
+ error_check_bad dbopen $stat 0
+ error_check_good trunc [is_substr $ret \
+ "illegal with multiple databases"] 1
+
+ puts "\tSubdb001.d: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
{$omethod $testfile $subdb}} db]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/sdb002.tcl b/bdb/test/sdb002.tcl
index 11547195c02..4757e12afc7 100644
--- a/bdb/test/sdb002.tcl
+++ b/bdb/test/sdb002.tcl
@@ -1,20 +1,47 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb002.tcl,v 11.20 2000/09/20 13:22:04 sue Exp $
+# $Id: sdb002.tcl,v 11.35 2002/08/23 18:01:53 sandstro Exp $
#
-# Sub DB Test 2 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-# Then repeat using an environment.
+# TEST subdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
proc subdb002 { method {nentries 10000} args } {
+ global passwd
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
source ./include.tcl
+ global encrypt
- set largs [convert_args $method $args]
+ set largs [convert_args $method $largs]
set omethod [convert_method $method]
env_cleanup $testdir
@@ -23,8 +50,20 @@ proc subdb002 { method {nentries 10000} args } {
set testfile $testdir/subdb002.db
subdb002_body $method $omethod $nentries $largs $testfile NULL
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
cleanup $testdir NULL
- set env [berkdb env -create -mode 0644 -txn -home $testdir]
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644 -txn} -home $testdir $encargs]
error_check_good env_open [is_valid_env $env] TRUE
puts "Subdb002: $method ($largs) basic subdb tests in an environment"
@@ -36,6 +75,8 @@ proc subdb002 { method {nentries 10000} args } {
}
proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
source ./include.tcl
# Create the database and open the dictionary
@@ -130,7 +171,7 @@ proc subdb002_body { method omethod nentries largs testfile env } {
puts "\tSubdb002.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next" $subdb
if { [is_record_based $method] != 1 } {
filesort $t1 $t3
@@ -142,7 +183,7 @@ proc subdb002_body { method omethod nentries largs testfile env } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdb002.d: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev" $subdb
if { [is_record_based $method] != 1 } {
@@ -151,6 +192,19 @@ proc subdb002_body { method omethod nentries largs testfile env } {
error_check_good Subdb002:diff($t3,$t2) \
[filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
}
# Check function for Subdb002; keys and data are identical
diff --git a/bdb/test/sdb003.tcl b/bdb/test/sdb003.tcl
index 32bb93d5236..5d1536d8c84 100644
--- a/bdb/test/sdb003.tcl
+++ b/bdb/test/sdb003.tcl
@@ -1,15 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb003.tcl,v 11.17 2000/08/25 14:21:52 sue Exp $
+# $Id: sdb003.tcl,v 11.24 2002/06/10 15:39:37 sue Exp $
#
-# Sub DB Test 3 {access method}
-# Use the first 10,000 entries from the dictionary as subdbnames.
-# Insert each with entry as name of subdatabase and a partial list as key/data.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
+# TEST subdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 2000)
+# TEST
+# TEST Use the first 10,000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
proc subdb003 { method {nentries 1000} args } {
source ./include.tcl
@@ -23,12 +27,32 @@ proc subdb003 { method {nentries 1000} args } {
puts "Subdb003: $method ($args) many subdb tests"
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
# Create the database and open the dictionary
- set testfile $testdir/subdb003.db
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir NULL
+ cleanup $testdir $env
set pflags ""
set gflags ""
@@ -62,18 +86,35 @@ proc subdb003 { method {nentries 1000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
- error_check_good get $ret [list [list $key [pad_data $method $str]]]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
incr count
}
close $did
incr fcount
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match
@@ -95,7 +136,7 @@ proc subdb003 { method {nentries 1000} args } {
[filecmp $t3 $t2] 0
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next" $subdb
if { [is_record_based $method] != 1 } {
filesort $t1 $t3
@@ -106,7 +147,7 @@ proc subdb003 { method {nentries 1000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev" $subdb
if { [is_record_based $method] != 1 } {
@@ -120,6 +161,7 @@ proc subdb003 { method {nentries 1000} args } {
flush stdout
}
}
+ close $fdid
puts ""
}
diff --git a/bdb/test/sdb004.tcl b/bdb/test/sdb004.tcl
index fb63f9d6d1d..d3d95f1fde0 100644
--- a/bdb/test/sdb004.tcl
+++ b/bdb/test/sdb004.tcl
@@ -1,15 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb004.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb004.tcl,v 11.22 2002/07/11 18:53:45 sandstro Exp $
#
-# SubDB Test 4 {access method}
-# Create 1 db with many large subdbs. Use the contents as subdb names.
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST subdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
proc subdb004 { method args} {
global names
source ./include.tcl
@@ -25,14 +33,34 @@ proc subdb004 { method args} {
puts "Subdb004: $method ($args) \
filecontents=subdbname filename=key filecontents=data pairs"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
# Create the database and open the dictionary
- set testfile $testdir/subdb004.db
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
set t4 $testdir/t4
- cleanup $testdir NULL
+ cleanup $testdir $env
set pflags ""
set gflags ""
set txn ""
@@ -44,8 +72,14 @@ proc subdb004 { method args} {
}
# Here is the loop where we put and get each key/data pair
- set file_list [glob ../*/*.c ./libdb.so.3.0 ./libtool ./libtool.exe]
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
set count 0
if { [is_record_based $method] == 1 } {
@@ -79,9 +113,17 @@ proc subdb004 { method args} {
set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile $subdb}]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval \
{$db put} $txn $pflags {$key [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Should really catch errors
set fid [open $t4 w]
@@ -104,7 +146,15 @@ proc subdb004 { method args} {
# Now we will get each key from the DB and compare the results
# to the original.
# puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -114,21 +164,30 @@ proc subdb004 { method args} {
# as the data in that subdb and that the filename is the key.
#
puts "\tSubdb004.b: Compare subdb names with key/data"
- set db [berkdb_open -rdonly $testfile]
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set c [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
for {set d [$c get -first] } { [llength $d] != 0 } \
{set d [$c get -next] } {
set subdbname [lindex [lindex $d 0] 0]
- set subdb [berkdb_open $testfile $subdbname]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
error_check_good dbopen [is_valid_db $db] TRUE
# Output the subdb name
set ofid [open $t3 w]
fconfigure $ofid -translation binary
- set subdbname [string trimright $subdbname \0]
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
puts -nonewline $ofid $subdbname
close $ofid
@@ -154,6 +213,9 @@ proc subdb004 { method args} {
error_check_good db_close [$subdb close] 0
}
error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { [is_record_based $method] != 1 } {
diff --git a/bdb/test/sdb005.tcl b/bdb/test/sdb005.tcl
index 22e4083c46c..98cea5b348b 100644
--- a/bdb/test/sdb005.tcl
+++ b/bdb/test/sdb005.tcl
@@ -1,11 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb005.tcl,v 11.12 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb005.tcl,v 11.18 2002/07/11 18:53:46 sandstro Exp $
#
-# Test cursor operations between subdbs.
+# TEST subdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
#
# We should test this on all btrees, all hash, and a combination thereof
proc subdb005 {method {nentries 100} args } {
@@ -20,21 +25,50 @@ proc subdb005 {method {nentries 100} args } {
}
puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
set txn ""
- cleanup $testdir NULL
set psize 8192
- set testfile $testdir/subdb005.db
set duplist {-1 -1 -1 -1 -1}
build_all_subdb \
- $testfile [list $method] [list $psize] $duplist $nentries $args
+ $testfile [list $method] $psize $duplist $nentries $args
set numdb [llength $duplist]
#
# Get a cursor in each subdb and move past the end of each
# subdb. Make sure we don't end up in another subdb.
#
puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for {set i 0} {$i < $numdb} {incr i} {
- set db [berkdb_open -unknown $testfile sub$i.db]
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
error_check_good dbopen [is_valid_db $db] TRUE
set db_handle($i) $db
# Used in 005.c test
@@ -54,6 +88,7 @@ proc subdb005 {method {nentries 100} args } {
error_check_good dbc_get [expr [llength $d] != 0] 1
set d [$dbc get -next]
error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
}
#
# Get a key from each subdb and try to get this key in a
@@ -67,15 +102,17 @@ proc subdb005 {method {nentries 100} args } {
}
set db $db_handle($i)
if { [is_record_based $method] == 1 } {
- set d [$db get -recno $db_key($n)]
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
error_check_good \
db_get [expr [llength $d] == 0] 1
} else {
- set d [$db get $db_key($n)]
+ set d [eval {$db get} $txn {$db_key($n)}]
error_check_good db_get [expr [llength $d] == 0] 1
}
}
-
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
#
# Clean up
#
@@ -92,7 +129,7 @@ proc subdb005 {method {nentries 100} args } {
{berkdb_open_noerr -unknown $testfile} ret] 0
puts "\tSubdb005.d: Check contents of DB for subdb names only"
- set db [berkdb_open -unknown -rdonly $testfile]
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set subdblist [$db get -glob *]
foreach kd $subdblist {
diff --git a/bdb/test/sdb006.tcl b/bdb/test/sdb006.tcl
index 70dee5c7343..fd6066b08d6 100644
--- a/bdb/test/sdb006.tcl
+++ b/bdb/test/sdb006.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb006.tcl,v 11.12 2000/09/20 13:22:03 sue Exp $
+# $Id: sdb006.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
#
-# We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
-# everything else does as well. We'll create test databases called
-# sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
-# describes the duplication -- duplicates are of the form 0, N, 2N, 3N, ...
-# where N is the number of the database. Primary.db is the primary database,
-# and sub0.db is the database that has no matching duplicates. All of
-# these are within a single database.
+# TEST subdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
#
# We should test this on all btrees, all hash, and a combination thereof
proc subdb006 {method {nentries 100} args } {
@@ -27,8 +30,34 @@ proc subdb006 {method {nentries 100} args } {
return
}
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
berkdb srand $rand_init
+ set oargs $args
foreach opt {" -dup" " -dupsort"} {
append args $opt
@@ -40,10 +69,12 @@ proc subdb006 {method {nentries 100} args } {
#
puts "\tSubdb006.a: Intra-subdb join"
- cleanup $testdir NULL
- set testfile $testdir/subdb006.db
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
- set psize [list 8192]
+ set psize 8192
set duplist {0 50 25 16 12}
set numdb [llength $duplist]
build_all_subdb $testfile [list $method] $psize \
@@ -53,77 +84,85 @@ proc subdb006 {method {nentries 100} args } {
puts "Subdb006: Building the primary database $method"
set oflags "-create -mode 0644 [conv $omethod \
[berkdb random_int 1 2]]"
- set db [eval {berkdb_open} $oflags $testfile primary.db]
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
error_check_good dbopen [is_valid_db $db] TRUE
for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set key [format "%04d" $i]
- set ret [$db put $key stub]
+ set ret [eval {$db put} $txn {$key stub}]
error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
error_check_good "primary close" [$db close] 0
set did [open $dict]
gets $did str
- do_join_subdb $testfile primary.db "1 0" $str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 0" $str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 0" $str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 0" $str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1" $str
+ do_join_subdb $testfile primary.db "1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2" $str
+ do_join_subdb $testfile primary.db "2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3" $str
+ do_join_subdb $testfile primary.db "3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4" $str
+ do_join_subdb $testfile primary.db "4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2" $str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2 3" $str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 2 3 4" $str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 1" $str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2 1" $str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 3" $str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 1" $str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "1 4" $str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 1" $str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 3" $str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2" $str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 4" $str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 2" $str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 4" $str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3" $str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "2 3 4" $str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 4 1" $str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 2 1" $str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "0 2 1" $str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "3 2 0" $str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 2 1" $str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
gets $did str
- do_join_subdb $testfile primary.db "4 3 0 1" $str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
close $did
}
diff --git a/bdb/test/sdb007.tcl b/bdb/test/sdb007.tcl
index 6b56fd411dd..0f9488a92a1 100644
--- a/bdb/test/sdb007.tcl
+++ b/bdb/test/sdb007.tcl
@@ -1,19 +1,24 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb007.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+# $Id: sdb007.tcl,v 11.20 2002/07/11 18:53:46 sandstro Exp $
#
-# Sub DB Test 7 {access method}
-# Use the first 10,000 entries from the dictionary spread across each subdb.
-# Use a different page size for every subdb.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdb007 { method {nentries 10000} args } {
+# TEST subdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST Test 3 different scenarios for page sizes.
+# TEST 1. Create/open with a default page size, 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 2. Create/open with specific page size, 2nd subdb create with
+# TEST different one, should error.
+# TEST 3. Create/open with specified page size, 2nd subdb create with
+# TEST same specified size, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb007 { method args } {
source ./include.tcl
+ set db2args [convert_args -btree $args]
set args [convert_args $method $args]
set omethod [convert_method $method]
@@ -23,101 +28,105 @@ proc subdb007 { method {nentries 10000} args } {
}
set pgindex [lsearch -exact $args "-pagesize"]
if { $pgindex != -1 } {
- puts "Subdb007: skipping for specific pagesizes"
+ puts "Subdb007: skipping for specific page sizes"
return
}
- puts "Subdb007: $method ($args) subdb tests with different pagesizes"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdb007.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
- cleanup $testdir NULL
-
- set txn ""
- set count 0
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb007_recno.check
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
} else {
- set checkfunc subdb007.check
- }
- puts "\tSubdb007.a: create subdbs of different page sizes"
- set psize {8192 4096 2048 1024 512}
- set nsubdbs [llength $psize]
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- lappend duplist -1
- }
- set newent [expr $nentries / $nsubdbs]
- build_all_subdb $testfile [list $method] $psize $duplist $newent $args
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- puts "\tSubdb007.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- # (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $newent} {incr i} {
- puts $oid [expr $subdb * $newent + $i]
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set beg [expr $subdb * $newent]
- incr beg
- set end [expr $beg + $newent - 1]
- filehead $end $dict $t3 $beg
- filesort $t3 $t2
- filesort $t1 $t3
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
}
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
- error_check_good Subdb007:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb007.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ #
+ # Figure out what the default page size is so that we can
+ # guarantee we create it with a different value.
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
}
+ }
+ error_check_good dbclose [$db close] 0
- error_check_good Subdb007:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdb007.d: sub$subdb.db:\
- close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
+ if { $pgsz == 512 } {
+ set pgsz2 2048
+ } else {
+ set pgsz2 512
+ }
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
+ puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set newpgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set newpgsz [lindex $pair 1]
}
-
- error_check_good Subdb007:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
}
-}
-
-# Check function for Subdb007; keys and data are identical
-proc subdb007.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
+ error_check_good pgsize $pgsz2 $newpgsz
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: create 2nd subdb with different page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.c.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
-proc subdb007_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
}
diff --git a/bdb/test/sdb008.tcl b/bdb/test/sdb008.tcl
index b005f00931a..1c46aed2087 100644
--- a/bdb/test/sdb008.tcl
+++ b/bdb/test/sdb008.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb008.tcl,v 11.14 2000/08/25 14:21:53 sue Exp $
-#
-# Sub DB Test 8 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Use a different or random lorder for each subdb.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdb008 { method {nentries 10000} args } {
+# $Id: sdb008.tcl,v 11.25 2002/07/11 18:53:46 sandstro Exp $
+# TEST subdb008
+# TEST Tests lorder difference errors between subdbs.
+# TEST Test 3 different scenarios for lorder.
+# TEST 1. Create/open with specific lorder, 2nd subdb create with
+# TEST different one, should error.
+# TEST 2. Create/open with a default lorder 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 3. Create/open with specified lorder, 2nd subdb create with
+# TEST same specified lorder, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb008 { method args } {
source ./include.tcl
- global rand_init
+ set db2args [convert_args -btree $args]
set args [convert_args $method $args]
set omethod [convert_method $method]
@@ -22,130 +25,97 @@ proc subdb008 { method {nentries 10000} args } {
puts "Subdb008: skipping for method $method"
return
}
-
- berkdb srand $rand_init
-
- puts "Subdb008: $method ($args) subdb lorder tests"
-
- # Create the database and open the dictionary
- set testfile $testdir/subdb008.db
- set t1 $testdir/t1
- set t2 $testdir/t2
- set t3 $testdir/t3
- set t4 $testdir/t4
- cleanup $testdir NULL
-
- set txn ""
- set pflags ""
- set gflags ""
-
- if { [is_record_based $method] == 1 } {
- set checkfunc subdb008_recno.check
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb008.db
+ set env NULL
} else {
- set checkfunc subdb008.check
- }
- set nsubdbs 4
- set lo [list 4321 1234]
- puts "\tSubdb008.a: put/get loop"
- # Here is the loop where we put and get each key/data pair
- for { set i 0 } { $i < $nsubdbs } { incr i } {
- set subdb sub$i.db
- if { $i >= [llength $lo]} {
- set r [berkdb random_int 0 1]
- set order [lindex $lo $r]
- } else {
- set order [lindex $lo $i]
+ set testfile subdb008.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append db2args " -auto_commit "
+ append envargs " -auto_commit "
}
- set db [eval {berkdb_open -create -mode 0644} \
- $args {-lorder $order $omethod $testfile $subdb}]
- set did [open $dict]
- set count 0
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- global kvals
-
- set gflags "-recno"
- set key [expr $i * $nentries]
- set key [expr $key + $count + 1]
- set kvals($key) [pad_data $method $str]
- } else {
- set key $str
- }
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $str]}]
- error_check_good put $ret 0
-
- set ret [eval {$db get} $gflags {$key}]
- error_check_good \
- get $ret [list [list $key [pad_data $method $str]]]
- incr count
- }
- close $did
- error_check_good db_close [$db close] 0
+ set testdir [get_home $env]
}
-
- # Now we will get each key from the DB and compare the results
- # to the original.
- for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
- puts "\tSubdb008.b: dump file sub$subdb.db"
- set db [berkdb_open -unknown $testfile sub$subdb.db]
- dump_file $db $txn $t1 $checkfunc
- error_check_good db_close [$db close] 0
-
- # Now compare the keys to see if they match the dictionary
- # (or ints)
- if { [is_record_based $method] == 1 } {
- set oid [open $t2 w]
- for {set i 1} {$i <= $nentries} {incr i} {
- puts $oid [expr $subdb * $nentries + $i]
- }
- close $oid
- file rename -force $t1 $t3
- } else {
- set q q
- filehead $nentries $dict $t3
- filesort $t3 $t2
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
-
- puts "\tSubdb008.c: sub$subdb.db: close, open, and dump file"
- # Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-first" "-next" sub$subdb.db
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t2,$t3) \
- [filecmp $t2 $t3] 0
-
- # Now, reopen the file and run the last test again in the
- # reverse direction.
- puts "\tSubdb008.d: sub$subdb.db:\
- close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
- dump_file_direction "-last" "-prev" sub$subdb.db
-
- if { [is_record_based $method] != 1 } {
- filesort $t1 $t3
- }
-
- error_check_good Subdb008:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+
+ puts "\tSubdb008.b.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ # Figure out what the default lorder is so that we can
+ # guarantee we create it with a different value later.
+ set is_swap [$db is_byteswapped]
+ if { $is_swap } {
+ set other 4321
+ } else {
+ set other 1234
}
-}
-
-# Check function for Subdb008; keys and data are identical
-proc subdb008.check { key data } {
- error_check_good "key/data mismatch" $data $key
-}
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 1234 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.c.0: create subdb with opposite specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.c.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 4321 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.d.0: create subdb with default lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.d.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-lorder $other $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.e.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.e.1: create 2nd subdb with same specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
-proc subdb008_recno.check { key data } {
-global dict
-global kvals
- error_check_good key"$key"_exists [info exists kvals($key)] 1
- error_check_good "key/data mismatch, key $key" $data $kvals($key)
}
diff --git a/bdb/test/sdb009.tcl b/bdb/test/sdb009.tcl
index 060bea643bb..4e4869643ef 100644
--- a/bdb/test/sdb009.tcl
+++ b/bdb/test/sdb009.tcl
@@ -1,15 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb009.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb009.tcl,v 11.9 2002/07/11 18:53:46 sandstro Exp $
#
-# Subdatabase Test 9 (replacement)
-# Test the DB->rename method.
+# TEST subdb009
+# TEST Test DB->rename() method for subdbs
proc subdb009 { method args } {
global errorCode
source ./include.tcl
+
set omethod [convert_method $method]
set args [convert_args $method $args]
@@ -20,43 +21,72 @@ proc subdb009 { method args } {
return
}
- set file $testdir/subdb009.db
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
set oldsdb OLDDB
set newsdb NEWDB
# Make sure we're starting from a clean slate.
- cleanup $testdir NULL
- error_check_bad "$file exists" [file exists $file] 1
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
puts "\tSubdb009.a: Create/rename file"
puts "\t\tSubdb009.a.1: create"
set db [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $oldsdb]
+ $omethod $args {$testfile $oldsdb}]
error_check_good dbopen [is_valid_db $db] TRUE
# The nature of the key and data are unimportant; use numeric key
# so record-based methods don't need special treatment.
+ set txn ""
set key 1
set data [pad_data $method data]
- error_check_good dbput [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
puts "\t\tSubdb009.a.2: rename"
- error_check_good rename_file [eval {berkdb dbrename} $file \
- $oldsdb $newsdb] 0
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
puts "\t\tSubdb009.a.3: check"
# Open again with create to make sure we've really completely
# disassociated the subdb from the old name.
set odb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $oldsdb]
+ $omethod $args $testfile $oldsdb]
error_check_good odb_open [is_valid_db $odb] TRUE
set odbt [$odb get $key]
error_check_good odb_close [$odb close] 0
set ndb [eval {berkdb_open -create -mode 0644}\
- $omethod $args $file $newsdb]
+ $omethod $args $testfile $newsdb]
error_check_good ndb_open [is_valid_db $ndb] TRUE
set ndbt [$ndb get $key]
error_check_good ndb_close [$ndb close] 0
@@ -69,7 +99,8 @@ proc subdb009 { method args } {
# Now there's both an old and a new. Rename the "new" to the "old"
# and make sure that fails.
puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
- set ret [catch {eval {berkdb dbrename} $file $oldsdb $newsdb} res]
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
error_check_bad rename_overwrite $ret 0
error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
diff --git a/bdb/test/sdb010.tcl b/bdb/test/sdb010.tcl
index 6bec78d372b..51f25976c56 100644
--- a/bdb/test/sdb010.tcl
+++ b/bdb/test/sdb010.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdb010.tcl,v 11.4 2000/08/25 14:21:53 sue Exp $
+# $Id: sdb010.tcl,v 11.14 2002/07/11 18:53:47 sandstro Exp $
#
-# Subdatabase Test 10 {access method}
-# Test of dbremove
+# TEST subdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
proc subdb010 { method args } {
global errorCode
source ./include.tcl
@@ -14,33 +14,153 @@ proc subdb010 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Subdb010: Test of DB->remove()"
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
if { [is_queue $method] == 1 } {
puts "\tSubdb010: Skipping for method $method."
return
}
- cleanup $testdir NULL
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb010.db
+ set tfpath $testfile
+ set env NULL
+ } else {
+ set testfile subdb010.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+ cleanup $testdir $env
- set testfile $testdir/subdb010.db
+ set txn ""
set testdb DATABASE
+ set testdb2 DATABASE2
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
$args $testfile $testdb]
error_check_good db_open [is_valid_db $db] TRUE
error_check_good db_close [$db close] 0
- error_check_good file_exists_before [file exists $testfile] 1
- error_check_good db_remove [berkdb dbremove $testfile $testdb] 0
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
# File should still exist.
- error_check_good file_exists_after [file exists $testfile] 1
+ error_check_good file_exists_after [file exists $tfpath] 1
# But database should not.
set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
error_check_bad open_failed ret 0
error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
puts "\tSubdb010 succeeded."
}
diff --git a/bdb/test/sdb011.tcl b/bdb/test/sdb011.tcl
new file mode 100644
index 00000000000..862e32f73ed
--- /dev/null
+++ b/bdb/test/sdb011.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb011.tcl,v 11.9 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc subdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $max_files != 0 && [llength $file_list] > $max_files } {
+ set fend [expr $max_files - 1]
+ set file_list [lrange $file_list 0 $fend]
+ }
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/bdb/test/sdb012.tcl b/bdb/test/sdb012.tcl
new file mode 100644
index 00000000000..9c05d977daf
--- /dev/null
+++ b/bdb/test/sdb012.tcl
@@ -0,0 +1,428 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb012.tcl,v 1.3 2002/08/08 15:38:10 bostic Exp $
+#
+# TEST subdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc subdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort auto commit } {
+ foreach r2 { none abort auto commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/bdb/test/sdbscript.tcl b/bdb/test/sdbscript.tcl
index 1b099520e88..d1978ccb048 100644
--- a/bdb/test/sdbscript.tcl
+++ b/bdb/test/sdbscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbscript.tcl,v 11.7 2000/04/21 18:36:23 krinsky Exp $
+# $Id: sdbscript.tcl,v 11.9 2002/01/11 15:53:36 bostic Exp $
#
# Usage: subdbscript testfile subdbnumber factor
# testfile: name of DB itself
diff --git a/bdb/test/sdbtest001.tcl b/bdb/test/sdbtest001.tcl
index e3ff2b032d3..b8b4508c2a4 100644
--- a/bdb/test/sdbtest001.tcl
+++ b/bdb/test/sdbtest001.tcl
@@ -1,18 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbtest001.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+# $Id: sdbtest001.tcl,v 11.19 2002/05/22 15:42:42 sue Exp $
#
-# Sub DB All-Method Test 1
-# Make several subdb's of different access methods all in one DB.
-# Rotate methods and repeat [#762].
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdbtest001 { {nentries 10000} } {
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
source ./include.tcl
puts "Subdbtest001: many different subdb access methods in one"
@@ -41,16 +49,25 @@ proc subdbtest001 { {nentries 10000} } {
lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
foreach methods $method_list {
cleanup $testdir NULL
puts "\tSubdbtest001.a: create subdbs of different access methods:"
puts "\tSubdbtest001.a: $methods"
- set psize {8192 4096}
set nsubdbs [llength $methods]
set duplist ""
for { set i 0 } { $i < $nsubdbs } { incr i } {
lappend duplist -1
}
+ set psize [lindex $plist $pgsz]
+ incr pgsz
set newent [expr $nentries / $nsubdbs]
build_all_subdb $testfile $methods $psize $duplist $newent
@@ -95,7 +112,7 @@ proc subdbtest001 { {nentries 10000} } {
puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-first" "-next" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
filesort $t1 $t3
@@ -107,7 +124,7 @@ proc subdbtest001 { {nentries 10000} } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-last" "-prev" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
diff --git a/bdb/test/sdbtest002.tcl b/bdb/test/sdbtest002.tcl
index b8bad4e70e1..95717413a7b 100644
--- a/bdb/test/sdbtest002.tcl
+++ b/bdb/test/sdbtest002.tcl
@@ -1,19 +1,30 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbtest002.tcl,v 11.19 2000/08/25 14:21:53 sue Exp $
+# $Id: sdbtest002.tcl,v 11.26 2002/09/05 17:23:07 sandstro Exp $
#
-# Sub DB All-Method Test 2
-# Make several subdb's of different access methods all in one DB.
-# Fork of some child procs to each manipulate one subdb and when
-# they are finished, verify the contents of the databases.
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc subdbtest002 { {nentries 10000} } {
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
source ./include.tcl
puts "Subdbtest002: many different subdb access methods in one"
@@ -34,7 +45,7 @@ proc subdbtest002 { {nentries 10000} } {
cleanup $testdir NULL
puts "\tSubdbtest002.a: create subdbs of different access methods:"
puts "\t\t$methods"
- set psize {8192 4096}
+ set psize 4096
set nsubdbs [llength $methods]
set duplist ""
for { set i 0 } { $i < $nsubdbs } { incr i } {
@@ -65,7 +76,7 @@ proc subdbtest002 { {nentries 10000} } {
$testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
lappend pidlist $p
}
- watch_procs 5
+ watch_procs $pidlist 5
for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
set method [lindex $methods $subdb]
@@ -124,7 +135,7 @@ proc subdbtest002 { {nentries 10000} } {
puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-first" "-next" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
filesort $t1 $t3
@@ -136,7 +147,7 @@ proc subdbtest002 { {nentries 10000} } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
- open_and_dump_subfile $testfile NULL $txn $t1 $checkfunc \
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
dump_file_direction "-last" "-prev" sub$subdb.db
if { [string compare $method "-recno"] != 0 } {
diff --git a/bdb/test/sdbutils.tcl b/bdb/test/sdbutils.tcl
index 0cb33b28649..3221a422e18 100644
--- a/bdb/test/sdbutils.tcl
+++ b/bdb/test/sdbutils.tcl
@@ -1,21 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sdbutils.tcl,v 11.9 2000/05/22 12:51:38 bostic Exp $
+# $Id: sdbutils.tcl,v 11.14 2002/06/10 15:39:39 sue Exp $
#
proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
set nsubdbs [llength $dups]
- set plen [llength $psize]
set mlen [llength $methods]
set savearg $dbargs
for {set i 0} {$i < $nsubdbs} { incr i } {
set m [lindex $methods [expr $i % $mlen]]
set dbargs $savearg
- set p [lindex $psize [expr $i % $plen]]
subdb_build $dbname $nentries [lindex $dups $i] \
- $i $m $p sub$i.db $dbargs
+ $i $m $psize sub$i.db $dbargs
}
}
@@ -27,6 +25,13 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
puts "Method: $method"
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
# Create the database and open the dictionary
set oflags "-create -mode 0644 $omethod \
-pagesize $psize $dbargs $name $subdb"
@@ -54,16 +59,32 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
}
}
}
+ set txn ""
for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
incr count} {
for { set i 0 } { $i < $ndups } { incr i } {
set data [format "%04d" [expr $i * $dup_interval]]
- set ret [$db put $str [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $ndups == 0 } {
- set ret [$db put $str [chop_data $method NODUP]]
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
error_check_good put $ret 0
} elseif { $ndups < 0 } {
if { [is_record_based $method] == 1 } {
@@ -71,33 +92,38 @@ proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
set num [expr $nkeys * $dup_interval]
set num [expr $num + $count + 1]
- set ret [$db put $num [chop_data $method $str]]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
set kvals($num) [pad_data $method $str]
error_check_good put $ret 0
} else {
- set ret [$db put $str [chop_data $method $str]]
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
error_check_good put $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
close $did
error_check_good close:$name [$db close] 0
}
-proc do_join_subdb { db primary subdbs key } {
+proc do_join_subdb { db primary subdbs key oargs } {
source ./include.tcl
puts "\tJoining: $subdbs on $key"
# Open all the databases
- set p [berkdb_open -unknown $db $primary]
+ set p [eval {berkdb_open -unknown} $oargs $db $primary]
error_check_good "primary open" [is_valid_db $p] TRUE
set dblist ""
set curslist ""
foreach i $subdbs {
- set jdb [berkdb_open -unknown $db sub$i.db]
+ set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
lappend jlist [list $jdb $key]
diff --git a/bdb/test/sec001.tcl b/bdb/test/sec001.tcl
new file mode 100644
index 00000000000..eb4bcc24dd2
--- /dev/null
+++ b/bdb/test/sec001.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec001.tcl,v 11.7 2002/05/31 16:19:30 sue Exp $
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/bdb/test/sec002.tcl b/bdb/test/sec002.tcl
new file mode 100644
index 00000000000..d790162f1d7
--- /dev/null
+++ b/bdb/test/sec002.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec002.tcl,v 11.3 2002/04/24 19:04:59 bostic Exp $
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.b: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.c: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec002.d: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ set fid2 [open $testfile2 r+]
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ set root1 [read $fid1 $pagesize]
+ close $fid1
+ puts -nonewline $fid2 $root1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.e: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ set fid4 [open $testfile4 r+]
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ set root4 [read $fid4 $pagesize]
+ close $fid4
+ puts -nonewline $fid2 $root4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+ puts "\tSec002 complete."
+}
diff --git a/bdb/test/shelltest.tcl b/bdb/test/shelltest.tcl
new file mode 100644
index 00000000000..6190bac1f8d
--- /dev/null
+++ b/bdb/test/shelltest.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: shelltest.tcl,v 1.20 2002/04/19 15:42:20 bostic Exp $
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
diff --git a/bdb/test/si001.tcl b/bdb/test/si001.tcl
new file mode 100644
index 00000000000..1a2247c5f8b
--- /dev/null
+++ b/bdb/test/si001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si001.tcl,v 1.7 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex001
+# TEST Basic secondary index put/delete test
+proc sindex001 { methods {nentries 200} {tnum 1} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si002.tcl b/bdb/test/si002.tcl
new file mode 100644
index 00000000000..46ba86e7560
--- /dev/null
+++ b/bdb/test/si002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si002.tcl,v 1.6 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex002
+# TEST Basic cursor-based secondary index put/delete test
+proc sindex002 { methods {nentries 200} {tnum 2} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts "\tSindex00$tnum.c: Secondary c_pget/primary put overwrite loop"
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si003.tcl b/bdb/test/si003.tcl
new file mode 100644
index 00000000000..1cc8c884e75
--- /dev/null
+++ b/bdb/test/si003.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si003.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex003
+# TEST sindex001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc sindex003 { methods {nentries 200} {tnum 3} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [eval {berkdb_env -create -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSindex00$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts -nonewline "\tSindex00$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSindex00$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si004.tcl b/bdb/test/si004.tcl
new file mode 100644
index 00000000000..291100da6b3
--- /dev/null
+++ b/bdb/test/si004.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si004.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex004
+# TEST sindex002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc sindex004 { methods {nentries 200} {tnum 4} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline \
+ "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts -nonewline "\tSindex00$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/si005.tcl b/bdb/test/si005.tcl
new file mode 100644
index 00000000000..e5ed49175c9
--- /dev/null
+++ b/bdb/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si005.tcl,v 11.4 2002/04/29 17:12:03 sandstro Exp $
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/bdb/test/si006.tcl b/bdb/test/si006.tcl
new file mode 100644
index 00000000000..3a1dbb3c4f8
--- /dev/null
+++ b/bdb/test/si006.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $
+#
+# TEST sindex006
+# TEST Basic secondary index put/delete test with transactions
+proc sindex006 { methods {nentries 200} {tnum 6} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ puts " with transactions"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -auto_commit [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/sindex.tcl b/bdb/test/sindex.tcl
new file mode 100644
index 00000000000..fc2a0fc2f31
--- /dev/null
+++ b/bdb/test/sindex.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $
+#
+# Secondary index test driver and maintenance routines.
+#
+# Breaking from the usual convention, we put the driver function
+# for the secondary index tests here, in its own file. The reason
+# for this is that it's something which compartmentalizes nicely,
+# has little in common with other driver functions, and
+# is likely to be run on its own from time to time.
+#
+# The secondary index tests themselves live in si0*.tcl.
+
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# Run the secondary index tests.
+proc sindex { {verbose 0} args } {
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ sindex001 [list $pm $sm $sm] $n
+ sindex002 [list $pm $sm $sm] $n
+ # Skip tests 3 & 4 for large lists;
+ # they're not that interesting.
+ if { $n < 1000 } {
+ sindex003 [list $pm $sm $sm] $n
+ sindex004 [list $pm $sm $sm] $n
+ }
+
+ sindex006 [list $pm $sm $sm] $n
+ }
+ }
+ }
+
+ # Run secondary index join test. (There's no point in running
+ # this with both lengths, the primary is unhappy for now with fixed-
+ # length records (XXX), and we need unsorted dups in the secondaries.)
+ foreach pm { btree hash recno } {
+ foreach sm { btree hash } {
+ sindex005 [list $pm $sm $sm] 1000
+ }
+ sindex005 [list $pm btree hash] 1000
+ sindex005 [list $pm hash btree] 1000
+ }
+
+
+ # Run test with 50 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set i 0 } { $i < 50 } { incr i } {
+ # XXX this should incorporate hash after #3726
+ if { $i % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ sindex001 $methlist 500
+ sindex002 $methlist 500
+ sindex003 $methlist 500
+ sindex004 $methlist 500
+ }
+}
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_alwayscocacola }
+ }
+ return _s_noop
+}
+
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_alwayscocacola { a b } { return "Coca-Cola" }
+proc _s_noop { a b } { return $b }
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set sdb [lindex $sdbs $j]
+ set skey [[callback_n $j] $keys($i) $data($i)]
+ error_check_good sdb($j)_pget($i) \
+ [$sdb pget -get_both $skey $keys($i)] \
+ [list [list $skey $keys($i) $data($i)]]
+ }
+
+ # Make sure this secondary contains only $nentries
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nentries items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nentries
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ set sdbt [$sdb pget -get_both \
+ [[callback_n $j] $pkey $pdata] $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ set i 0
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries $i $nentries
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ set i 0
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries_bkwds $i $nentries
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/bdb/test/sysscript.tcl b/bdb/test/sysscript.tcl
index 1b7545e4c6b..810b0df6cef 100644
--- a/bdb/test/sysscript.tcl
+++ b/bdb/test/sysscript.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: sysscript.tcl,v 11.12 2000/05/22 12:51:38 bostic Exp $
+# $Id: sysscript.tcl,v 11.17 2002/07/29 17:05:24 sue Exp $
#
# System integration test script.
# This script runs a single process that tests the full functionality of
@@ -31,7 +31,6 @@ source ./include.tcl
source $test_path/test.tcl
source $test_path/testutils.tcl
-set alphabet "abcdefghijklmnopqrstuvwxyz"
set mypid [pid]
set usage "sysscript dir nfiles key_avg data_avg method"
@@ -64,7 +63,7 @@ puts "$data_avg average data length"
flush stdout
# Create local environment
-set dbenv [berkdb env -txn -home $dir]
+set dbenv [berkdb_env -txn -home $dir]
set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
if {$err != 0} {
puts $ret
@@ -74,7 +73,7 @@ if {$err != 0} {
# Now open the files
for { set i 0 } { $i < $nfiles } { incr i } {
set file test044.$i.db
- set db($i) [berkdb open -env $dbenv $method $file]
+ set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
if {$err != 0} {
puts $ret
diff --git a/bdb/test/test.tcl b/bdb/test/test.tcl
index 7678f2fcbfb..10ee9425b7a 100644
--- a/bdb/test/test.tcl
+++ b/bdb/test/test.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test.tcl,v 11.114 2001/01/09 21:28:52 sue Exp $
+# $Id: test.tcl,v 11.225 2002/09/10 18:51:38 sue Exp $
source ./include.tcl
@@ -16,6 +16,7 @@ if { [file exists $testdir] != 1 } {
global __debug_print
global __debug_on
+global __debug_test
global util_path
#
@@ -30,69 +31,52 @@ if { [string first "exec format error" $ret] != -1 } {
set util_path .
}
set __debug_print 0
-set __debug_on 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
# This is where the test numbering and parameters now live.
source $test_path/testparams.tcl
-for { set i 1 } { $i <= $deadtests } {incr i} {
- set name [format "dead%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $envtests } {incr i} {
- set name [format "env%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $recdtests } {incr i} {
- set name [format "recd%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $rpctests } {incr i} {
- set name [format "rpc%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $rsrctests } {incr i} {
- set name [format "rsrc%03d.tcl" $i]
- source $test_path/$name
-}
-for { set i 1 } { $i <= $runtests } {incr i} {
- set name [format "test%03d.tcl" $i]
- # Test numbering may be sparse.
- if { [file exists $test_path/$name] == 1 } {
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+foreach sub $subs {
+ if { [info exists num_test($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ continue
+ }
+ set end $num_test($sub)
+ for { set i 1 } { $i <= $end } {incr i} {
+ set name [format "%s%03d.tcl" $sub $i]
source $test_path/$name
}
}
-for { set i 1 } { $i <= $subdbtests } {incr i} {
- set name [format "sdb%03d.tcl" $i]
- source $test_path/$name
-}
source $test_path/archive.tcl
source $test_path/byteorder.tcl
source $test_path/dbm.tcl
source $test_path/hsearch.tcl
source $test_path/join.tcl
-source $test_path/lock001.tcl
-source $test_path/lock002.tcl
-source $test_path/lock003.tcl
-source $test_path/log.tcl
source $test_path/logtrack.tcl
-source $test_path/mpool.tcl
-source $test_path/mutex.tcl
source $test_path/ndbm.tcl
-source $test_path/sdbtest001.tcl
-source $test_path/sdbtest002.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sindex.tcl
source $test_path/testutils.tcl
-source $test_path/txn.tcl
source $test_path/upgrade.tcl
set dict $test_path/wordlist
set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
# Random number seed.
global rand_init
-set rand_init 1013
+set rand_init 101301
# Default record length and padding character for
# fixed record length access method(s)
@@ -103,6 +87,21 @@ set recd_debug 0
set log_log_record_types 0
set ohandles {}
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
# Set up any OS-specific values
global tcl_platform
set is_windows_test [is_substr $tcl_platform(os) "Win"]
@@ -112,41 +111,8 @@ set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
# From here on out, test.tcl contains the procs that are used to
# run all or part of the test suite.
-proc run_am { } {
- global runtests
- source ./include.tcl
-
- fileremove -f ALL.OUT
-
- # Access method tests.
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't require
- # so much memory.
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests"
- for { set j 1 } { $j <= $runtests } {incr j} {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_method -$i $j $j" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: [format "test%03d" $j] $i"
- close $o
- }
- }
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- subdb -$i 0 1" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
- close $o
- }
- }
-}
-
proc run_std { args } {
- global runtests
- global subdbtests
+ global num_test
source ./include.tcl
set exflgs [eval extractflags $args]
@@ -156,6 +122,7 @@ proc run_std { args } {
set display 1
set run 1
set am_only 0
+ set no_am 0
set std_only 1
set rflags {--}
foreach f $flags {
@@ -163,6 +130,10 @@ proc run_std { args } {
A {
set std_only 0
}
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
m {
set am_only 1
puts "run_std: access method tests only."
@@ -183,7 +154,7 @@ proc run_std { args } {
puts -nonewline "Test suite run started at: "
puts [clock format [clock seconds] -format "%H:%M %D"]
puts [berkdb version -string]
-
+
puts -nonewline $o "Test suite run started at: "
puts $o [clock format [clock seconds] -format "%H:%M %D"]
puts $o [berkdb version -string]
@@ -196,16 +167,17 @@ proc run_std { args } {
{"archive" "archive"}
{"locking" "lock"}
{"logging" "log"}
- {"memory pool" "mpool"}
+ {"memory pool" "memp"}
{"mutex" "mutex"}
{"transaction" "txn"}
{"deadlock detection" "dead"}
- {"subdatabase" "subdb_gen"}
+ {"subdatabase" "sdb"}
{"byte-order" "byte"}
{"recno backing file" "rsrc"}
{"DBM interface" "dbm"}
{"NDBM interface" "ndbm"}
{"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
}
if { $am_only == 0 } {
@@ -229,12 +201,22 @@ proc run_std { args } {
# so we don't require so much memory, but I think it's cleaner
# and more useful to do it down inside proc r than here,
# since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
puts "Running recovery tests"
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- r $rflags recd" >>& ALL.OUT } res] {
+ if [catch {
+ exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
set o [open ALL.OUT a]
- puts $o "FAIL: recd test"
+ puts $o "FAIL: recd tests"
close $o
}
@@ -255,38 +237,34 @@ proc run_std { args } {
}
}
- # Access method tests.
- #
- # XXX
- # Broken up into separate tclsh instantiations so we don't require
- # so much memory.
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests"
- for { set j 1 } { $j <= $runtests } {incr j} {
- if { $run == 0 } {
- set o [open ALL.OUT a]
- run_method -$i $j $j $display $run $o
- close $o
- }
- if { $run } {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_method -$i $j $j $display $run" \
- >>& ALL.OUT } res] {
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach i \
+ "btree hash queue queueext recno rbtree frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
set o [open ALL.OUT a]
- puts $o \
- "FAIL: [format "test%03d" $j] $i"
+ run_method -$i $j $j $display $run $o
close $o
}
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:\
+ [format "test%03d" $j] $i"
+ close $o
+ }
+ }
}
}
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- subdb -$i $display $run" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
- close $o
- }
}
# If not actually running, no need to check for failure.
@@ -296,14 +274,8 @@ proc run_std { args } {
return
}
- set failed 0
- set o [open ALL.OUT r]
- while { [gets $o line] >= 0 } {
- if { [regexp {^FAIL} $line] != 0 } {
- set failed 1
- }
- }
- close $o
+ set failed [check_failed_run ALL.OUT]
+
set o [open ALL.OUT a]
if { $failed == 0 } {
puts "Regression Tests Succeeded"
@@ -320,11 +292,22 @@ proc run_std { args } {
close $o
}
+proc check_failed_run { file {text "^FAIL"}} {
+ set failed 0
+ set o [open $file r]
+ while { [gets $o line] >= 0 } {
+ set ret [regexp $text $line]
+ if { $ret != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+
+ return $failed
+}
+
proc r { args } {
- global envtests
- global recdtests
- global subdbtests
- global deadtests
+ global num_test
source ./include.tcl
set exflgs [eval extractflags $args]
@@ -345,68 +328,42 @@ proc r { args } {
}
if {[catch {
- set l [ lindex $args 0 ]
- switch $l {
- archive {
+ set sub [ lindex $args 0 ]
+ switch $sub {
+ byte {
if { $display } {
- puts "eval archive [lrange $args 1 end]"
+ puts "run_test byteorder"
}
if { $run } {
check_handles
- eval archive [lrange $args 1 end]
+ run_test byteorder
}
}
- byte {
- foreach method \
- "-hash -btree -recno -queue -queueext -frecno" {
- if { $display } {
- puts "byteorder $method"
- }
- if { $run } {
- check_handles
- byteorder $method
- }
- }
- }
- dbm {
- if { $display } {
- puts "dbm"
- }
+ archive -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest -
+ sindex {
+ if { $display } { puts "r $sub" }
if { $run } {
check_handles
- dbm
+ $sub
}
}
- dead {
- for { set i 1 } { $i <= $deadtests } \
- { incr i } {
- if { $display } {
- puts "eval dead00$i\
- [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval dead00$i\
- [lrange $args 1 end]
- }
- }
- }
- env {
- for { set i 1 } { $i <= $envtests } {incr i} {
- if { $display } {
- puts "eval env00$i"
- }
- if { $run } {
- check_handles
- eval env00$i
- }
- }
- }
- hsearch {
- if { $display } { puts "hsearch" }
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } { run_subsystem $sub 1 0 }
if { $run } {
- check_handles
- hsearch
+ run_subsystem $sub
}
}
join {
@@ -419,7 +376,7 @@ proc r { args } {
}
join1 {
if { $display } { puts jointest }
- if { $run } {
+ if { $run } {
check_handles
jointest
}
@@ -467,147 +424,99 @@ proc r { args } {
jointest 512 3
}
}
- lock {
- if { $display } {
- puts \
- "eval locktest [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval locktest [lrange $args 1 end]
- }
- }
- log {
- if { $display } {
- puts "eval logtest [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval logtest [lrange $args 1 end]
- }
- }
- mpool {
- eval r $saveflags mpool1
- eval r $saveflags mpool2
- eval r $saveflags mpool3
- }
- mpool1 {
- if { $display } {
- puts "eval mpool [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mpool [lrange $args 1 end]
- }
- }
- mpool2 {
- if { $display } {
- puts "eval mpool\
- -mem system [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mpool\
- -mem system [lrange $args 1 end]
- }
- }
- mpool3 {
- if { $display } {
- puts "eval mpool\
- -mem private [lrange $args 1 end]"
- }
- if { $run } {
- eval mpool\
- -mem private [lrange $args 1 end]
- }
- }
- mutex {
- if { $display } {
- puts "eval mutex [lrange $args 1 end]"
- }
- if { $run } {
- check_handles
- eval mutex [lrange $args 1 end]
- }
- }
- ndbm {
- if { $display } { puts ndbm }
- if { $run } {
- check_handles
- ndbm
- }
- }
recd {
- if { $display } { puts run_recds }
- if { $run } {
- check_handles
- run_recds
- }
+ check_handles
+ run_recds $run $display [lrange $args 1 end]
}
- rpc {
- # RPC must be run as one unit due to server,
- # so just print "r rpc" in the display case.
- if { $display } { puts "r rpc" }
- if { $run } {
- check_handles
- eval rpc001
- check_handles
- eval rpc002
- if { [catch {run_rpcmethod -txn} ret]\
- != 0 } {
- puts $ret
+ rep {
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_repmethod 0 $j $j"
}
- foreach method \
- "hash queue queueext recno frecno rrecno rbtree btree" {
- if { [catch {run_rpcmethod \
- -$method} ret] != 0 } {
- puts $ret
- }
+ if { $run } {
+ eval run_test \
+ run_repmethod 0 $j $j
}
}
- }
- rsrc {
- if { $display } { puts "rsrc001\nrsrc002" }
- if { $run } {
- check_handles
- rsrc001
- check_handles
- rsrc002
+ for { set i 1 } \
+ { $i <= $num_test(rep) } {incr i} {
+ set test [format "%s%03d" $sub $i]
+ if { $i == 2 } {
+ if { $run } {
+ puts "Skipping rep002 \
+ (waiting on SR #6195)"
+ }
+ continue
+ }
+ if { $display } {
+ puts "run_test $test"
+ }
+ if { $run } {
+ run_test $test
+ }
}
}
- subdb {
- eval r $saveflags subdb_gen
-
- foreach method \
- "btree rbtree hash queue queueext recno frecno rrecno" {
- check_handles
- eval subdb -$method $display $run
+ rpc {
+ if { $display } { puts "r $sub" }
+ global rpc_svc svc_list
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc] } {
+ continue
+ }
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+ run_test run_rpcmethod
}
+ set rpc_svc $old_rpc_src
}
- subdb_gen {
+ sec {
if { $display } {
- puts "subdbtest001 ; verify_dir"
- puts "subdbtest002 ; verify_dir"
+ run_subsystem $sub 1 0
}
if { $run } {
- check_handles
- eval subdbtest001
- verify_dir
- check_handles
- eval subdbtest002
- verify_dir
+ run_subsystem $sub 0 1
+ }
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_secmethod $j $j"
+ puts "eval run_test \
+ run_secenv $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_secmethod $j $j
+ eval run_test \
+ run_secenv $j $j
+ }
}
}
- txn {
+ sdb {
if { $display } {
- puts "txntest [lrange $args 1 end]"
+ puts "eval r $saveflags sdbtest"
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ puts "eval run_test \
+ subdb $j $j"
+ }
}
if { $run } {
- check_handles
- eval txntest [lrange $args 1 end]
+ eval r $saveflags sdbtest
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ eval run_test subdb $j $j
+ }
}
}
-
btree -
rbtree -
hash -
@@ -640,16 +549,44 @@ proc r { args } {
}
}
+proc run_subsystem { prefix { display 0 } { run 1} } {
+ global num_test
+ if { [info exists num_test($prefix)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ for { set i 1 } { $i <= $num_test($prefix) } {incr i} {
+ set name [format "%s%03d" $prefix $i]
+ if { $display } {
+ puts "eval $name"
+ }
+ if { $run } {
+ check_handles
+ catch {eval $name}
+ }
+ }
+}
+
+proc run_test { testname args } {
+ source ./include.tcl
+ foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
+ check_handles
+ eval $testname -$method $args
+ verify_dir $testdir "" 1
+ }
+}
+
proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
{ outfile stdout } args } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
if { $run == 1 } {
puts $outfile "run_method: $method $start $stop $args"
@@ -659,7 +596,7 @@ proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
for { set i $start } { $i <= $stop } {incr i} {
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
@@ -698,34 +635,36 @@ proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
}
}
-proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
+proc run_rpcmethod { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
+ global is_envmethod
+ global rpc_svc
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_rpcmethod: $type $start $stop $largs"
+ puts "run_rpcmethod: $method $start $stop $largs"
set save_largs $largs
if { [string compare $rpc_server "localhost"] == 0 } {
- set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
} else {
- set dpid [exec rsh $rpc_server $rpc_path/berkeley_db_svc \
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
-h $rpc_testdir &]
}
puts "\tRun_rpcmethod.a: starting server, pid $dpid"
- tclsleep 2
+ tclsleep 10
remote_cleanup $rpc_server $rpc_testdir $testdir
set home [file tail $rpc_testdir]
- set txn ""
+ set is_envmethod 1
set use_txn 0
- if { [string first "txn" $type] != -1 } {
+ if { [string first "txn" $method] != -1 } {
set use_txn 1
}
if { $use_txn == 1 } {
@@ -737,7 +676,7 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set i 1
check_handles
remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000} -txn]
error_check_good env_open [is_valid_env $env] TRUE
@@ -746,14 +685,16 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set stat [catch {eval txn001_subb $ntxns $env} res]
}
error_check_good envclose [$env close] 0
+ set stat [catch {eval txn003} res]
} else {
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
check_handles
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
continue
}
remote_cleanup $rpc_server $rpc_testdir $testdir
@@ -761,16 +702,16 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
# Set server cachesize to 1Mb. Otherwise some
# tests won't fit (like test084 -btree).
#
- set env [eval {berkdb env -create -mode 0644 \
+ set env [eval {berkdb_env -create -mode 0644 \
-home $home -server $rpc_server \
-client_timeout 10000 \
- -cachesize {0 1048576 1} }]
+ -cachesize {0 1048576 1}}]
error_check_good env_open \
[is_valid_env $env] TRUE
append largs " -env $env "
puts "[timestamp]"
- eval $name $type $parms($name) $largs
+ eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts ""
}
@@ -789,37 +730,38 @@ proc run_rpcmethod { type {start 1} {stop 0} {largs ""} } {
set fnl [string first "\n" $errorInfo]
set theError [string range $errorInfo 0 [expr $fnl - 1]]
- exec $KILL $dpid
+ tclkill $dpid
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_rpcmethod: $type $i: $theError"
+ run_rpcmethod: $method $i: $theError"
} else {
error $theError;
}
}
- exec $KILL $dpid
-
+ set is_envmethod 0
+ tclkill $dpid
}
-proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
+proc run_rpcnoserver { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global num_test
global parms
- global runtests
+ global is_envmethod
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_rpcnoserver: $type $start $stop $largs"
+ puts "run_rpcnoserver: $method $start $stop $largs"
set save_largs $largs
remote_cleanup $rpc_server $rpc_testdir $testdir
set home [file tail $rpc_testdir]
- set txn ""
+ set is_envmethod 1
set use_txn 0
- if { [string first "txn" $type] != -1 } {
+ if { [string first "txn" $method] != -1 } {
set use_txn 1
}
if { $use_txn == 1 } {
@@ -831,7 +773,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
set i 1
check_handles
remote_cleanup $rpc_server $rpc_testdir $testdir
- set env [eval {berkdb env -create -mode 0644 -home $home \
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
-server $rpc_server -client_timeout 10000} -txn]
error_check_good env_open [is_valid_env $env] TRUE
@@ -846,8 +788,9 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
check_handles
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
continue
}
remote_cleanup $rpc_server $rpc_testdir $testdir
@@ -855,7 +798,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
# Set server cachesize to 1Mb. Otherwise some
# tests won't fit (like test084 -btree).
#
- set env [eval {berkdb env -create -mode 0644 \
+ set env [eval {berkdb_env -create -mode 0644 \
-home $home -server $rpc_server \
-client_timeout 10000 \
-cachesize {0 1048576 1} }]
@@ -864,7 +807,7 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
append largs " -env $env "
puts "[timestamp]"
- eval $name $type $parms($name) $largs
+ eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts ""
}
@@ -885,49 +828,72 @@ proc run_rpcnoserver { type {start 1} {stop 0} {largs ""} } {
set theError [string range $errorInfo 0 [expr $fnl - 1]]
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_rpcnoserver: $type $i: $theError"
+ run_rpcnoserver: $method $i: $theError"
} else {
error $theError;
}
+ set is_envmethod 0
}
}
#
-# Run method tests in one environment. (As opposed to run_envmethod1
-# which runs each test in its own, new environment.)
+# Run method tests in secure mode.
#
-proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
+proc run_secmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+
+ append largs " -encryptaes $passwd "
+ eval run_method $method $start $stop $display $run $outfile $largs
+}
+
+#
+# Run method tests in its own, new secure environment.
+#
+proc run_secenv { method {start 1} {stop 0} {largs ""} } {
global __debug_on
global __debug_print
+ global is_envmethod
+ global num_test
global parms
- global runtests
+ global passwd
source ./include.tcl
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
}
- puts "run_envmethod: $type $start $stop $largs"
+ puts "run_secenv: $method $start $stop $largs"
set save_largs $largs
env_cleanup $testdir
- set txn ""
+ set is_envmethod 1
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
check_handles
- set env [eval {berkdb env -create -mode 0644 \
- -home $testdir}]
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $testdir -encryptaes $passwd \
+ -cachesize {0 1048576 1}}]
error_check_good env_open [is_valid_env $env] TRUE
append largs " -env $env "
puts "[timestamp]"
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
- eval $name $type $parms($name) $largs
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $name $method $parms($name) $largs
+ append largs " -encrypt "
+ eval $name $method $parms($name) $largs
+
if { $__debug_print != 0 } {
puts ""
}
@@ -939,7 +905,7 @@ proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
set largs $save_largs
error_check_good envclose [$env close] 0
error_check_good envremove [berkdb envremove \
- -home $testdir] 0
+ -home $testdir -encryptaes $passwd] 0
}
} res]
if { $stat != 0} {
@@ -949,22 +915,476 @@ proc run_envmethod { type {start 1} {stop 0} {largs ""} } {
set theError [string range $errorInfo 0 [expr $fnl - 1]]
if {[string first FAIL $errorInfo] == -1} {
error "FAIL:[timestamp]\
- run_envmethod: $type $i: $theError"
+ run_secenv: $method $i: $theError"
} else {
error $theError;
}
+ set is_envmethod 0
}
}
-proc subdb { method display run {outfile stdout} args} {
- global subdbtests testdir
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
global parms
+ global passwd
+ global rand_init
- for { set i 1 } {$i <= $subdbtests} {incr i} {
+ berkdb srand $rand_init
+ set c [string index $test 0]
+ if { $c == "s" } {
+ set i [string range $test 1 end]
set name [format "subdb%03d" $i]
+ } else {
+ set i $test
+ set name [format "test%03d" $i]
+ }
+ puts "run_reptest: $method $name"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
if { [info exists parms($name)] != 1 } {
- puts "[format Subdb%03d $i] disabled in\
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline \
+ "Repl: $name: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method {numcl 0} {start 1} {stop 0} {display 0}
+ {run 1} {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ berkdb srand $rand_init
+
+ #
+ # We want to run replication both normally and with crypto.
+ # So run it once and then run again with crypto.
+ #
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_repmethod $method \
+ 0 $i $i 0 1 stdout $largs"
+ }
+ }
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout } { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_envmethod $method \
+ $i $i 0 1 stdout $largs"
+ }
+ }
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Run both subdb and normal tests for as long as there are
+ # some of each type. Start with the subdbs:
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ # Subdb tests are done, now run through the regular tests:
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc subdb { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout} args} {
+ global num_test testdir
+ global parms
+
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
testparams.tcl; skipping."
continue
}
@@ -983,37 +1403,63 @@ proc subdb { method display run {outfile stdout} args} {
}
}
-proc run_recd { method {start 1} {stop 0} args } {
+proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } {
global __debug_on
global __debug_print
+ global __debug_test
global parms
- global recdtests
+ global num_test
global log_log_record_types
source ./include.tcl
if { $stop == 0 } {
- set stop $recdtests
+ set stop $num_test(recd)
+ }
+ if { $run == 1 } {
+ puts "run_recd: $method $start $stop $args"
}
- puts "run_recd: $method $start $stop $args"
if {[catch {
for { set i $start } { $i <= $stop } {incr i} {
- check_handles
- puts "[timestamp]"
set name [format "recd%03d" $i]
- # By redirecting stdout to stdout, we make exec
- # print output rather than simply returning it.
- exec $tclsh_path << "source $test_path/test.tcl; \
- set log_log_record_types $log_log_record_types; \
- eval $name $method" >@ stdout
- if { $__debug_print != 0 } {
- puts ""
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Recd%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
}
- if { $__debug_on != 0 } {
- debug
+ if { $display } {
+ puts "eval $name $method $parms($name) $args"
+ }
+ if { $run } {
+ check_handles
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types \
+ $log_log_record_types; eval $name \
+ $method $parms($name) $args" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
}
- flush stdout
- flush stderr
}
} res] != 0} {
global errorInfo;
@@ -1029,7 +1475,7 @@ proc run_recd { method {start 1} {stop 0} args } {
}
}
-proc run_recds { } {
+proc run_recds { {run 1} {display 0} args } {
global log_log_record_types
set log_log_record_types 1
@@ -1037,18 +1483,19 @@ proc run_recds { } {
foreach method \
"btree rbtree hash queue queueext recno frecno rrecno" {
check_handles
- if { [catch \
- {run_recd -$method} ret ] != 0 } {
+ if { [catch {eval \
+ run_recd -$method 1 0 $run $display $args} ret ] != 0 } {
puts $ret
}
}
- logtrack_summary
+ if { $run } {
+ logtrack_summary
+ }
set log_log_record_types 0
}
proc run_all { args } {
- global runtests
- global subdbtests
+ global num_test
source ./include.tcl
fileremove -f ALL.OUT
@@ -1058,6 +1505,8 @@ proc run_all { args } {
set display 1
set run 1
set am_only 0
+ set parallel 0
+ set nparalleltests 0
set rflags {--}
foreach f $flags {
switch $f {
@@ -1091,51 +1540,60 @@ proc run_all { args } {
lappend args -A
eval {run_std} $args
- set test_pagesizes { 512 8192 65536 }
+ set test_pagesizes [get_test_pagesizes]
set args [lindex $exflgs 0]
set save_args $args
foreach pgsz $test_pagesizes {
set args $save_args
- append args " -pagesize $pgsz"
+ append args " -pagesize $pgsz -chksum"
if { $am_only == 0 } {
# Run recovery tests.
#
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
# XXX These too are broken into separate tclsh
- # instantiations so we don't require so much
+ # instantiations so we don't require so much
# memory, but I think it's cleaner
# and more useful to do it down inside proc r than here,
# since "r recd" gets done a lot and needs to work.
- puts "Running recovery tests with pagesize $pgsz"
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- r $rflags recd $args" >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o "FAIL: recd test"
- close $o
- }
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
}
-
+
# Access method tests.
#
# XXX
- # Broken up into separate tclsh instantiations so
+ # Broken up into separate tclsh instantiations so
# we don't require so much memory.
foreach i \
"btree rbtree hash queue queueext recno frecno rrecno" {
puts "Running $i tests with pagesize $pgsz"
- for { set j 1 } { $j <= $runtests } {incr j} {
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
if { $run == 0 } {
set o [open ALL.OUT a]
- run_method -$i $j $j $display \
- $run $o $args
+ eval {run_method -$i $j $j $display \
+ $run $o} $args
close $o
}
if { $run } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl; \
- run_method -$i $j $j $display \
- $run stdout $args" \
+ eval {run_method -$i $j $j \
+ $display $run stdout} $args" \
>>& ALL.OUT } res] {
set o [open ALL.OUT a]
puts $o \
@@ -1149,47 +1607,82 @@ proc run_all { args } {
#
# Run subdb tests with varying pagesizes too.
#
+ for { set j 1 } { $j <= $num_test(sdb) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {subdb -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {subdb -$i $j $j $display \
+ $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in a txn env"
+ for { set j 1 } { $j <= $num_test(test) } { incr j } {
if { $run == 0 } {
set o [open ALL.OUT a]
- subdb -$i $display $run $o $args
+ run_envmethod -$i $j $j $display \
+ $run $o $args
close $o
}
- if { $run == 1 } {
+ if { $run } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl; \
- subdb -$i $display $run stdout $args" \
+ run_envmethod -$i $j $j \
+ $display $run stdout $args" \
>>& ALL.OUT } res] {
set o [open ALL.OUT a]
- puts $o "FAIL: subdb -$i test"
+ puts $o \
+ "FAIL: run_envmethod $i $j $j"
close $o
}
}
}
}
- set args $save_args
#
- # Run access method tests at default page size in one env.
+ # Run tests using proc r. The replication tests have been
+ # moved from run_std to run_all.
#
- foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
- puts "Running $i tests in an env"
- if { $run == 0 } {
+ set test_list {
+ {"replication" "rep"}
+ {"security" "sec"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ append test_list {{"RPC" "rpc"}}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
set o [open ALL.OUT a]
- run_envmethod1 -$i 1 $runtests $display \
- $run $o $args
+ puts $o "FAIL: $cmd test"
close $o
}
- if { $run } {
- if [catch {exec $tclsh_path \
- << "source $test_path/test.tcl; \
- run_envmethod1 -$i 1 $runtests $display \
- $run stdout $args" \
- >>& ALL.OUT } res] {
- set o [open ALL.OUT a]
- puts $o \
- "FAIL: run_envmethod1 $i"
- close $o
- }
- }
}
# If not actually running, no need to check for failure.
@@ -1229,58 +1722,97 @@ proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
{ outfile stdout } args } {
global __debug_on
global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
global parms
- global runtests
source ./include.tcl
+ set stopsdb $num_test(sdb)
if { $stop == 0 } {
- set stop $runtests
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
}
if { $run == 1 } {
puts "run_envmethod1: $method $start $stop $args"
}
- set txn ""
+ set is_envmethod 1
if { $run == 1 } {
check_handles
env_cleanup $testdir
error_check_good envremove [berkdb envremove -home $testdir] 0
- set env [eval {berkdb env -create -mode 0644 -home $testdir}]
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-mode 0644 -home $testdir}]
error_check_good env_open [is_valid_env $env] TRUE
append largs " -env $env "
}
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
set stat [catch {
for { set i $start } { $i <= $stop } {incr i} {
set name [format "test%03d" $i]
if { [info exists parms($name)] != 1 } {
- puts "[format Test%03d $i] disabled in\
- testparams.tcl; skipping."
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
continue
}
- if { $display } {
- puts -nonewline $outfile "eval $name $method"
- puts -nonewline $outfile " $parms($name) $args"
- puts $outfile " ; verify_dir $testdir \"\" 1"
- }
if { $run } {
- check_handles $outfile
puts $outfile "[timestamp]"
eval $name $method $parms($name) $largs
if { $__debug_print != 0 } {
puts $outfile ""
}
if { $__debug_on != 0 } {
- debug
+ debug $__debug_test
}
}
flush stdout
flush stderr
}
} res]
- if { $run == 1 } {
- error_check_good envclose [$env close] 0
- }
if { $stat != 0} {
global errorInfo;
@@ -1293,5 +1825,39 @@ proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
error $theError;
}
}
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 32768} }
+ 8192 { return {512 32768} }
+ 32768 { return {512 8192} }
+ default { return {512 8192 32768} }
+ }
+ error_check_good NOTREACHED 0 1
}
diff --git a/bdb/test/test001.tcl b/bdb/test/test001.tcl
index fa8e112d100..f0b562bbf24 100644
--- a/bdb/test/test001.tcl
+++ b/bdb/test/test001.tcl
@@ -1,45 +1,85 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test001.tcl,v 11.17 2000/12/06 16:08:05 bostic Exp $
+# $Id: test001.tcl,v 11.28 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 1 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; retrieve each.
-# After all are entered, retrieve all; compare output to original.
-# Close file, reopen, do retrieve and re-verify.
-proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} {noclean 0} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
- if { $start != 0 } {
- puts "\tStarting at $start"
- }
-
# Create the database and open the dictionary
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
set env NULL
} else {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ # Sadly enough, we are using start in two different ways.
+ # In test090, it is used to test really big records numbers
+ # in queue. In replication, it is used to be able to run
+ # different iterations of this test using different key/data
+ # pairs. We try to hide all that magic here.
+ puts "\tStarting at $start"
+
+ if { $tnum != 90 } {
+ set did [open $dict]
+ for { set nlines 0 } { [gets $did str] != -1 } \
+ { incr nlines} {
+ }
+ close $did
+ if { $start + $nentries > $nlines } {
+ set start [expr $nlines - $nentries]
+ }
+ }
}
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
- cleanup $testdir $env
+ if { $noclean == 0 } {
+ cleanup $testdir $env
+ }
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args $omethod $testfile]
+ -create -mode 0644} $args $omethod $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -47,8 +87,6 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
set gflags ""
set txn ""
- set nentries [expr $nentries + $start]
-
if { [is_record_based $method] == 1 } {
set checkfunc test001_recno.check
append gflags " -recno"
@@ -57,20 +95,46 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
}
puts "\tTest0$tnum.a: put/get loop"
# Here is the loop where we put and get each key/data pair
- set count $start
+ set count 0
+ if { $start != 0 && $tnum != 90 } {
+ # Skip over "start" entries
+ for { set count 0 } { $count < $start } { incr count } {
+ gets $did str
+ }
+ set count 0
+ }
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
- set key [expr $count + 1]
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
set kvals($key) [pad_data $method $str]
} else {
set key $str
set str [reverse $str]
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval \
{$db put} $txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good \
@@ -86,30 +150,56 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
error_check_good getbothBAD [llength $ret] 0
incr count
- if { [expr $count + 1] == 0 } {
- incr count
- }
}
close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: dump file"
dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
if { [is_record_based $method] == 1 } {
set oid [open $t2 w]
- for {set i [expr $start + 1]} {$i <= $nentries} {set i [incr i]} {
- if { $i == 0 } {
- incr i
+ # If this is test 90, we're checking wrap and we really
+ # only added nentries number of items starting at start.
+ # However, if this isn't 90, then we started at start and
+ # added an addition nentries number of items.
+ if { $tnum == 90 } {
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ } else {
+ for { set i 1 } { $i <= $nentries + $start } {incr i} {
+ puts $oid $i
}
- puts $oid $i
}
close $oid
} else {
set q q
- filehead $nentries $dict $t2
+ # We assume that when this is used with start != 0, the
+ # test database accumulates data
+ filehead [expr $nentries + $start] $dict $t2
}
filesort $t2 $t3
file rename -force $t3 $t2
@@ -120,7 +210,7 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
puts "\tTest0$tnum.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
filesort $t1 $t3
@@ -132,7 +222,7 @@ proc test001 { method {nentries 10000} {start 0} {tnum "01"} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test002.tcl b/bdb/test/test002.tcl
index 882240b77bb..bc28994d6a7 100644
--- a/bdb/test/test002.tcl
+++ b/bdb/test/test002.tcl
@@ -1,17 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test002.tcl,v 11.13 2000/08/25 14:21:53 sue Exp $
+# $Id: test002.tcl,v 11.19 2002/05/22 15:42:43 sue Exp $
#
-# DB Test 2 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and a fixed, medium length data string;
-# retrieve each. After all are entered, retrieve all; compare output
-# to original. Close file, reopen, do retrieve and re-verify.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
proc test002 { method {nentries 10000} args } {
global datastr
@@ -21,8 +25,7 @@ proc test002 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test002: $method ($args) $nentries key <fixed data> pairs"
-
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -34,14 +37,28 @@ proc test002 { method {nentries 10000} args } {
set testfile test002.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
# Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -63,8 +80,16 @@ proc test002 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
@@ -76,7 +101,15 @@ proc test002 { method {nentries 10000} args } {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary
@@ -100,7 +133,7 @@ proc test002 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again.
puts "\tTest002.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test002.check \
+ open_and_dump_file $testfile $env $t1 test002.check \
dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -111,7 +144,7 @@ proc test002 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest002.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test002.check \
+ open_and_dump_file $testfile $env $t1 test002.check \
dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test003.tcl b/bdb/test/test003.tcl
index 013af2d419c..c7bfe6c15ad 100644
--- a/bdb/test/test003.tcl
+++ b/bdb/test/test003.tcl
@@ -1,14 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test003.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+# $Id: test003.tcl,v 11.25 2002/05/22 18:32:18 sue Exp $
#
-# DB Test 3 {access method}
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
proc test003 { method args} {
global names
source ./include.tcl
@@ -23,6 +30,8 @@ proc test003 { method args} {
puts "Test003: $method ($args) filename=key filecontents=data pairs"
# Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -34,6 +43,12 @@ proc test003 { method args} {
set testfile test003.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -42,7 +57,7 @@ proc test003 { method args} {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args $omethod $testfile]
+ -create -mode 0644} $args $omethod $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
set gflags ""
@@ -55,11 +70,14 @@ proc test003 { method args} {
}
# Here is the loop where we put and get each key/data pair
- set file_list [ glob \
- { $test_path/../*/*.[ch] } $test_path/*.tcl *.{a,o,lo,exe} \
- $test_path/file.1 ]
-
- puts "\tTest003.a: put/get loop"
+ set file_list [get_file_list]
+ if { $limit } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 1 $limit]
+ }
+ }
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
set count 0
foreach f $file_list {
if { [string compare [file type $f] "file"] != 0 } {
@@ -78,9 +96,17 @@ proc test003 { method args} {
fconfigure $fid -translation binary
set data [read $fid]
close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $data]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Should really catch errors
set fid [open $t4 w]
@@ -104,7 +130,15 @@ proc test003 { method args} {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the entries in the
@@ -135,7 +169,7 @@ proc test003 { method args} {
# Now, reopen the file and run the last test again.
puts "\tTest003.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_bin_file_direction "-first" "-next"
if { [is_record_based $method] == 1 } {
@@ -147,8 +181,7 @@ proc test003 { method args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest003.d: close, open, and dump file in reverse direction"
-
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_bin_file_direction "-last" "-prev"
if { [is_record_based $method] == 1 } {
diff --git a/bdb/test/test004.tcl b/bdb/test/test004.tcl
index 0b076d6cfb7..7bea6f88eca 100644
--- a/bdb/test/test004.tcl
+++ b/bdb/test/test004.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test004.tcl,v 11.15 2000/08/25 14:21:54 sue Exp $
+# $Id: test004.tcl,v 11.21 2002/05/22 18:32:35 sue Exp $
#
-# DB Test 4 {access method}
-# Check that cursor operations work. Create a database.
-# Read through the database sequentially using cursors and
-# delete each element.
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
source ./include.tcl
@@ -18,33 +22,47 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
set tnum test00$reopen
- puts -nonewline "$tnum:\
- $method ($args) $nentries delete small key; medium data pairs"
- if {$reopen == 5} {
- puts "(with close)"
- } else {
- puts ""
- }
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
if { $eindex == -1 } {
- set testfile $testdir/test004.db
+ set testfile $testdir/$tnum.db
set env NULL
} else {
- set testfile test004.db
+ set testfile $tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
}
+
# Create the database and open the dictionary
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} $args {$omethod $testfile}]
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -71,8 +89,17 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
set datastr [ make_data_str $str ]
- set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good "$tnum:put" $ret \
@@ -93,6 +120,11 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
# Now we will get each key from the DB and compare the results
# to the original, then delete it.
set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set c [eval {$db cursor} $txn]
set count 0
@@ -117,6 +149,9 @@ proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
}
close $outf
error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary
if { [is_record_based $method] == 1 } {
diff --git a/bdb/test/test005.tcl b/bdb/test/test005.tcl
index 4cb5d88dfe2..f3e37f2149d 100644
--- a/bdb/test/test005.tcl
+++ b/bdb/test/test005.tcl
@@ -1,14 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test005.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+# $Id: test005.tcl,v 11.7 2002/01/11 15:53:40 bostic Exp $
#
-# DB Test 5 {access method}
-# Check that cursor operations work. Create a database; close database and
-# reopen it. Then read through the database sequentially using cursors and
-# delete each element.
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
proc test005 { method {nentries 10000} args } {
eval {test004 $method $nentries 5 0} $args
}
diff --git a/bdb/test/test006.tcl b/bdb/test/test006.tcl
index 9364d2a4f60..fbaebfe8ac8 100644
--- a/bdb/test/test006.tcl
+++ b/bdb/test/test006.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test006.tcl,v 11.13 2000/08/25 14:21:54 sue Exp $
+# $Id: test006.tcl,v 11.19 2002/05/22 15:42:44 sue Exp $
#
-# DB Test 6 {access method}
-# Keyed delete test.
-# Create database.
-# Go through database, deleting all entries by key.
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
source ./include.tcl
@@ -23,15 +27,8 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set tname Test0$tnum
set dbname test0$tnum
}
- puts -nonewline "$tname: $method ($args) "
- puts -nonewline "$nentries equal small key; medium data pairs"
- if {$reopen == 1} {
- puts " (with close)"
- } else {
- puts ""
- }
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -43,6 +40,25 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set testfile $dbname.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
}
set pflags ""
@@ -50,14 +66,14 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set txn ""
set count 0
if { [is_record_based $method] == 1 } {
- append gflags " -recno"
+ append gflags " -recno"
}
# Here is the loop where we put and get each key/data pair
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -70,9 +86,17 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
set datastr [make_data_str $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set ret [eval {$db get} $gflags {$key}]
error_check_good "$tname: put $datastr got $ret" \
@@ -108,8 +132,16 @@ proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
error_check_good "$tname: get $datastr got $ret" \
$ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db del} $txn {$key}]
error_check_good db_del:$key $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
diff --git a/bdb/test/test007.tcl b/bdb/test/test007.tcl
index 305740f0369..1e99d107a2d 100644
--- a/bdb/test/test007.tcl
+++ b/bdb/test/test007.tcl
@@ -1,13 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test007.tcl,v 11.5 2000/05/22 12:51:38 bostic Exp $
+# $Id: test007.tcl,v 11.8 2002/01/11 15:53:40 bostic Exp $
#
-# DB Test 7 {access method}
-# Check that delete operations work. Create a database; close database and
-# reopen it. Then issues delete by key for each entry.
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
proc test007 { method {nentries 10000} {tnum 7} args} {
eval {test006 $method $nentries 1 $tnum} $args
}
diff --git a/bdb/test/test008.tcl b/bdb/test/test008.tcl
index 34144391ccc..0af97a40110 100644
--- a/bdb/test/test008.tcl
+++ b/bdb/test/test008.tcl
@@ -1,15 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test008.tcl,v 11.17 2000/10/19 17:35:39 sue Exp $
+# $Id: test008.tcl,v 11.23 2002/05/22 15:42:45 sue Exp $
#
-# DB Test 8 {access method}
-# Take the source files and dbtest executable and enter their names as the
-# key with their contents as data. After all are entered, begin looping
-# through the entries; deleting some pairs and then readding them.
-proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen 8} {debug 0} args} {
source ./include.tcl
set tnum test00$reopen
@@ -29,6 +37,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -40,6 +49,11 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set testfile $tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -48,7 +62,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -57,7 +71,7 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set txn ""
# Here is the loop where we put and get each key/data pair
- set file_list [glob ../*/*.c ./*.o ./*.lo ./*.exe]
+ set file_list [get_file_list]
set count 0
puts "\tTest00$reopen.a: Initial put/get loop"
@@ -65,9 +79,25 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
set names($count) $f
set key $f
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good Test00$reopen:diff($f,$t4) \
[filecmp $f $t4] 0
@@ -88,11 +118,27 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
puts "\tTest00$reopen.b: Delete re-add loop"
foreach i "1 2 4 8 16" {
for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db del} $txn {$names($ndx)}]
error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -104,7 +150,15 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
# Now, reopen the file and make sure the key/data pairs look right.
puts "\tTest00$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set oid [open $t2.tmp w]
foreach f $file_list {
@@ -120,7 +174,15 @@ proc test008 { method {nentries 10000} {reopen 8} {debug 0} args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest00$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
diff --git a/bdb/test/test009.tcl b/bdb/test/test009.tcl
index e9c01875f77..7ef46d8c818 100644
--- a/bdb/test/test009.tcl
+++ b/bdb/test/test009.tcl
@@ -1,15 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test009.tcl,v 11.4 2000/05/22 12:51:38 bostic Exp $
+# $Id: test009.tcl,v 11.8 2002/05/22 15:42:45 sue Exp $
#
-# DB Test 9 {access method}
-# Check that we reuse overflow pages. Create database with lots of
-# big key/data pairs. Go through and delete and add keys back
-# randomly. Then close the DB and make sure that we have everything
-# we think we should.
-proc test009 { method {nentries 10000} args} {
- eval {test008 $method $nentries 9 0} $args
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method 9 0} $args
}
diff --git a/bdb/test/test010.tcl b/bdb/test/test010.tcl
index b3aedb2bee9..0b5f5531795 100644
--- a/bdb/test/test010.tcl
+++ b/bdb/test/test010.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test010.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test010.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 10 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# After all are entered, retrieve all; verify output.
-# Close file, reopen, do retrieve and re-verify.
-# This does not work for recno
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
source ./include.tcl
@@ -25,9 +27,8 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
return
}
- puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,7 +40,23 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+ puts "Test0$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
@@ -47,7 +64,7 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -58,17 +75,30 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
set count 0
# Here is the loop where we put and get each key/data pair
- set dbc [eval {$db cursor} $txn]
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now retrieve all the keys matching this key
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -87,9 +117,13 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
incr x
}
error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
incr count
}
- error_check_good cursor_close [$dbc close] 0
close $did
# Now we will get each key from the DB and compare the results
@@ -99,7 +133,15 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
for { set i 1 } { $i <= $ndups } {incr i} {
lappend dlist $i
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
set q q
@@ -115,7 +157,15 @@ proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
filesort $t1 $t3
diff --git a/bdb/test/test011.tcl b/bdb/test/test011.tcl
index 444f6240e92..63e2203efe4 100644
--- a/bdb/test/test011.tcl
+++ b/bdb/test/test011.tcl
@@ -1,18 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test011.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+# $Id: test011.tcl,v 11.27 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 11 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# Then do some key_first/key_last add_before, add_after operations.
-# This does not work for recno
-# To test if dups work when they fall off the main page, run this with
-# a very tiny page size.
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
global dlist
global rand_init
@@ -27,9 +32,6 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
if { [is_record_based $method] == 1 } {
test011_recno $method $nentries $tnum $args
return
- } else {
- puts -nonewline "Test0$tnum: $method $nentries small dup "
- puts "key/data pairs, cursor ops"
}
if {$ndups < 5} {
set ndups 5
@@ -41,6 +43,7 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -52,13 +55,30 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+
+ puts -nonewline "Test0$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} [concat $args "-dup"] {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -74,7 +94,6 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
# 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
# add before and add after.
puts "\tTest0$tnum.a: put and get duplicate keys."
- set dbc [eval {$db cursor} $txn]
set i ""
for { set i 1 } { $i <= $ndups } { incr i 2 } {
lappend dlist $i
@@ -83,12 +102,26 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i 2 } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$str $datastr}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now retrieve all the keys matching this key
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str ]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -108,16 +141,27 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
incr x 2
}
error_check_good Test0$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
- error_check_good curs_close [$dbc close] 0
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: \
traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
set q q
@@ -135,7 +179,15 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
puts "\tTest0$tnum.c: \
traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now compare the keys to see if they match the dictionary entries
filesort $t1 $t3
@@ -143,24 +195,56 @@ proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
[filecmp $t3 $t2] 0
puts "\tTest0$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-keyfirst" 0 0
set dlist [linsert $dlist 0 0]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
lappend dlist [expr $maxodd - 1]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-before" 2 3
set dlist [linsert $dlist 2 2]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest0$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
add_dup $db $txn $nentries "-after" 4 4
set dlist [linsert $dlist 4 4]
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -209,6 +293,7 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set txnenv 0
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
set env NULL
@@ -216,6 +301,18 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
set testfile test0$tnum.db
incr eindex
set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -226,7 +323,7 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
append largs " -renumber"
}
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $largs {$omethod $testfile}]
+ -create -mode 0644} $largs {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -247,13 +344,26 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
# Seed the database with an initial record
gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good put $ret 0
set count 1
set dlist "NULL $str"
# Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
puts "\tTest0$tnum.a: put and get entries"
while { [gets $did str] != -1 && $count < $nentries } {
@@ -312,6 +422,9 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
}
close $did
error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Create check key file.
set oid [open $t2 w]
@@ -321,20 +434,28 @@ proc test011_recno { method {nentries 10000} {tnum 11} largs } {
close $oid
puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good Test0$tnum:diff($t2,$t1) \
[filecmp $t2 $t1] 0
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test011_check \
+ open_and_dump_file $testfile $env $t1 test011_check \
dump_file_direction "-first" "-next"
error_check_good Test0$tnum:diff($t2,$t1) \
[filecmp $t2 $t1] 0
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test011_check \
+ open_and_dump_file $testfile $env $t1 test011_check \
dump_file_direction "-last" "-prev"
filesort $t1 $t3 -n
diff --git a/bdb/test/test012.tcl b/bdb/test/test012.tcl
index 87127901e19..e7237d27267 100644
--- a/bdb/test/test012.tcl
+++ b/bdb/test/test012.tcl
@@ -1,14 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test012.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test012.tcl,v 11.20 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 12 {access method}
-# Take the source files and dbtest executable and enter their contents as
-# the key with their names as data. After all are entered, retrieve all;
-# compare output to original. Close file, reopen, do retrieve and re-verify.
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
proc test012 { method args} {
global names
source ./include.tcl
@@ -24,6 +29,7 @@ proc test012 { method args} {
puts "Test012: $method ($args) filename=data filecontents=key pairs"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -35,6 +41,11 @@ proc test012 { method args} {
set testfile test012.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -44,7 +55,7 @@ proc test012 { method args} {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -52,22 +63,37 @@ proc test012 { method args} {
set txn ""
# Here is the loop where we put and get each key/data pair
- set file_list [glob $test_path/../\[a-z\]*/*.c \
- $test_path/./*.lo ./*.exe]
+ set file_list [get_file_list]
puts "\tTest012.a: put/get loop"
set count 0
foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
put_file_as_key $db $txn $pflags $f
set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the data to see if they match the .o and dbtest files
@@ -85,7 +111,7 @@ proc test012 { method args} {
# Now, reopen the file and run the last test again.
puts "\tTest012.c: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test012.check \
+ open_and_dump_file $testfile $env $t1 test012.check \
dump_binkey_file_direction "-first" "-next"
filesort $t1 $t3
@@ -95,7 +121,7 @@ proc test012 { method args} {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest012.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test012.check\
+ open_and_dump_file $testfile $env $t1 test012.check\
dump_binkey_file_direction "-last" "-prev"
filesort $t1 $t3
diff --git a/bdb/test/test013.tcl b/bdb/test/test013.tcl
index 5812cf8f64d..96d7757b0d8 100644
--- a/bdb/test/test013.tcl
+++ b/bdb/test/test013.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test013.tcl,v 11.18 2000/08/25 14:21:54 sue Exp $
+# $Id: test013.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 13 {access method}
-#
-# 1. Insert 10000 keys and retrieve them (equal key/data pairs).
-# 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
-# 3. Actually overwrite each one with its datum reversed.
-#
-# No partial testing here.
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make surethat NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
proc test013 { method {nentries 10000} args } {
global errorCode
global errorInfo
@@ -23,9 +26,8 @@ proc test013 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,14 +39,28 @@ proc test013 { method {nentries 10000} args } {
set testfile test013.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -70,6 +86,11 @@ proc test013 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
@@ -77,6 +98,9 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $gflags $txn {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -93,6 +117,11 @@ proc test013 { method {nentries 10000} args } {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags \
{-nooverwrite $key [chop_data $method $str]}]
error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
@@ -101,6 +130,9 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -116,6 +148,11 @@ proc test013 { method {nentries 10000} args } {
set key $str
}
set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} \
$txn $pflags {$key [chop_data $method $rstr]}]
error_check_good put $r 0
@@ -124,13 +161,24 @@ proc test013 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
# Now make sure that everything looks OK
puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -153,7 +201,7 @@ proc test013 { method {nentries 10000} args } {
puts "\tTest013.e: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-first" "-next"
if { [is_record_based $method] == 0 } {
@@ -166,7 +214,7 @@ proc test013 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest013.f: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction "-last" "-prev"
if { [is_record_based $method] == 0 } {
diff --git a/bdb/test/test014.tcl b/bdb/test/test014.tcl
index 3ad5335dd0a..00d69d3352e 100644
--- a/bdb/test/test014.tcl
+++ b/bdb/test/test014.tcl
@@ -1,17 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test014.tcl,v 11.19 2000/08/25 14:21:54 sue Exp $
+# $Id: test014.tcl,v 11.24 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 14 {access method}
-#
-# Partial put test, small data, replacing with same size. The data set
-# consists of the first nentries of the dictionary. We will insert them
-# (and retrieve them) as we do in test 1 (equal key/data pairs). Then
-# we'll try to perform partial puts of some characters at the beginning,
-# some at the end, and some at the middle.
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
proc test014 { method {nentries 10000} args } {
set fixed 0
set args [convert_args $method $args]
@@ -71,6 +74,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -82,6 +86,18 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
set testfile test014.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -89,7 +105,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set gflags ""
@@ -117,7 +133,15 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
global dvals
# initial put
- set ret [$db put $key $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbput $ret 0
set offset [string length $str]
@@ -133,11 +157,28 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
a[set offset]x[set chars]a[set increase] \
$str $data]
set offset [expr $offset + $chars]
- set ret [$db put -partial [list $offset 0] $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
} else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
partial_put $method $db $txn \
$gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
incr count
}
@@ -145,7 +186,15 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
# Now make sure that everything looks OK
puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -168,7 +217,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
puts "\tTest014.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn \
+ open_and_dump_file $testfile $env \
$t1 test014.check dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -182,7 +231,7 @@ proc test014_body { method flagp chars increase {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest014.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
test014.check dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test015.tcl b/bdb/test/test015.tcl
index 61abddd3799..f129605a405 100644
--- a/bdb/test/test015.tcl
+++ b/bdb/test/test015.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test015.tcl,v 11.20 2000/08/25 14:21:54 sue Exp $
+# $Id: test015.tcl,v 11.27 2002/05/31 16:57:25 sue Exp $
#
-# DB Test 15 {access method}
-# Partial put test when item does not exist.
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
proc test015 { method {nentries 7500} { start 0 } args } {
- global fixed_len
+ global fixed_len testdir
set low_range 50
set mid_range 100
@@ -43,6 +44,15 @@ proc test015 { method {nentries 7500} { start 0 } args } {
puts -nonewline "$this: "
eval [concat test015_body $method [lindex $entry 1] \
$nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+puts "Verifying testdir $testdir"
+
+ error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
}
}
@@ -55,6 +65,7 @@ proc test015_init { } {
proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
global dvals
global fixed_len
+ global testdir
source ./include.tcl
set args [convert_args $method $args]
@@ -71,6 +82,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
puts "Put $rcount strings random offsets between $off_low and $off_hi"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -82,14 +94,27 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set testfile test015.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ set retdir $testdir
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -97,7 +122,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set txn ""
set count 0
- puts "\tTest015.a: put/get loop"
+ puts "\tTest015.a: put/get loop for $nentries entries"
# Here is the loop where we put and get each key/data pair
# Each put is a partial put of a record that does not exist.
@@ -148,9 +173,17 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
set slen [expr $fixed_len - $off]
set data [eval "binary format a$slen" {$data}]
}
- set ret [eval {$db put} \
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
{-partial [list $off [string length $data]] $key $data}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
@@ -158,7 +191,15 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
# Now make sure that everything looks OK
puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
@@ -183,7 +224,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
puts "\tTest015.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
$checkfunc dump_file_direction "-first" "-next"
if { [string compare $omethod "-recno"] != 0 } {
@@ -196,7 +237,7 @@ proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest015.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 \
+ open_and_dump_file $testfile $env $t1 \
$checkfunc dump_file_direction "-last" "-prev"
if { [string compare $omethod "-recno"] != 0 } {
diff --git a/bdb/test/test016.tcl b/bdb/test/test016.tcl
index def3c114693..af289f866f4 100644
--- a/bdb/test/test016.tcl
+++ b/bdb/test/test016.tcl
@@ -1,19 +1,20 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test016.tcl,v 11.17 2000/08/25 14:21:54 sue Exp $
+# $Id: test016.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
#
-# DB Test 16 {access method}
-# Partial put test where partial puts make the record smaller.
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and a fixed, medium length data string;
-# retrieve each. After all are entered, go back and do partial puts,
-# replacing a random-length string with the key value.
-# Then verify.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
proc test016 { method {nentries 10000} args } {
global datastr
@@ -31,9 +32,8 @@ proc test016 { method {nentries 10000} args } {
return
}
- puts "Test016: $method ($args) $nentries partial put shorten"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -45,13 +45,27 @@ proc test016 { method {nentries 10000} args } {
set testfile test016.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -64,7 +78,6 @@ proc test016 { method {nentries 10000} args } {
}
# Here is the loop where we put and get each key/data pair
-
puts "\tTest016.a: put/get loop"
set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
@@ -73,6 +86,11 @@ proc test016 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $datastr]}]
error_check_good put $ret 0
@@ -80,6 +98,9 @@ proc test016 { method {nentries 10000} args } {
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -103,12 +124,20 @@ proc test016 { method {nentries 10000} args } {
set s2 [string toupper $key]
set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {-partial \
[list $repl_off $repl_len] $key [chop_data $method $s2]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -116,7 +145,15 @@ proc test016 { method {nentries 10000} args } {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary
@@ -139,7 +176,7 @@ proc test016 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again.
puts "\tTest016.d: close, open, and dump file"
- open_and_dump_file $testfile $env $txn $t1 test016.check \
+ open_and_dump_file $testfile $env $t1 test016.check \
dump_file_direction "-first" "-next"
if { [ is_record_based $method ] == 0 } {
@@ -150,7 +187,7 @@ proc test016 { method {nentries 10000} args } {
# Now, reopen the file and run the last test again in reverse direction.
puts "\tTest016.e: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 test016.check \
+ open_and_dump_file $testfile $env $t1 test016.check \
dump_file_direction "-last" "-prev"
if { [ is_record_based $method ] == 0 } {
diff --git a/bdb/test/test017.tcl b/bdb/test/test017.tcl
index 95fe82e081c..1f99aa328fb 100644
--- a/bdb/test/test017.tcl
+++ b/bdb/test/test017.tcl
@@ -1,22 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test017.tcl,v 11.13 2000/12/11 17:42:18 sue Exp $
-#
-# DB Test 17 {access method}
-# Run duplicates with small page size so that we test off page duplicates.
-# Then after we have an off-page database, test with overflow pages too.
+# $Id: test017.tcl,v 11.23 2002/06/20 19:01:02 sue Exp $
#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $method"
return
}
@@ -29,9 +29,8 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
}
}
- puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -43,6 +42,11 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -52,7 +56,7 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
@@ -60,17 +64,22 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
set txn ""
set count 0
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ set file_list [lrange $file_list 0 $flen]
+ }
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
set ovfl ""
# Here is the loop where we put and get each key/data pair
- set dbc [eval {$db cursor} $txn]
- puts -nonewline \
- "\tTest0$tnum.a: Creating duplicates with "
+ puts -nonewline "\tTest0$tnum.a: Creating duplicates with "
if { $contents != 0 } {
puts "file contents as key/data"
} else {
puts "file name as key/data"
}
- set file_list [glob ../*/*.c ./*.lo]
foreach f $file_list {
if { $contents != 0 } {
set fid [open $f r]
@@ -85,9 +94,17 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
}
for { set i 1 } { $i <= $ndups } { incr i } {
set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
#
@@ -101,6 +118,12 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_bad $f:dbget_dups [llength $ret] 0
error_check_good $f:dbget_dups1 [llength $ret] $ndups
set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
for {set ret [$dbc get "-set" $str]} \
{[llength $ret] != 0} \
{set ret [$dbc get "-next"] } {
@@ -119,9 +142,12 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
incr x
}
error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
- error_check_good cursor_close [$dbc close] 0
# Now we will get each key from the DB and compare the results
# to the original.
@@ -145,19 +171,33 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
fileremove $t2.tmp
fileremove $t4.tmp
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if {$contents == 0} {
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
# Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t4) \
- [filecmp $t3 $t4] 0
+ error_check_good Test0$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
}
error_check_good db_close [$db close] 0
@@ -165,13 +205,20 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_good dbopen [is_valid_db $db] TRUE
puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if {$contents == 0} {
# Now compare the keys to see if they match the filenames
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
}
error_check_good db_close [$db close] 0
@@ -204,6 +251,7 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
error_check_good db_close [$db close] 0
return
}
+
puts "\tTest0$tnum.e: Add overflow duplicate entries"
set ovfldup [expr $ndups + 1]
foreach f $ovfl {
@@ -214,20 +262,41 @@ proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
fconfigure $fid -translation binary
set fdata [read $fid]
close $fid
- set data $ovfldup:$fdata
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {$f $data}]
error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+
puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
filesort $t1 $t3
- error_check_good Test0$tnum:diff($t3,$t2) \
- [filecmp $t3 $t2] 0
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
set stat [$db stat]
- error_check_bad overflow1 \
- [is_substr $stat "{{Overflow pages} 0}"] 1
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test018.tcl b/bdb/test/test018.tcl
index 95493da2d03..8fc8a14e95e 100644
--- a/bdb/test/test018.tcl
+++ b/bdb/test/test018.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test018.tcl,v 11.3 2000/02/14 03:00:18 bostic Exp $
+# $Id: test018.tcl,v 11.6 2002/01/11 15:53:43 bostic Exp $
#
-# DB Test 18 {access method}
-# Run duplicates with small page size so that we test off page duplicates.
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
proc test018 { method {nentries 10000} args} {
puts "Test018: Off page duplicate tests"
eval {test011 $method $nentries 19 18 -pagesize 512} $args
diff --git a/bdb/test/test019.tcl b/bdb/test/test019.tcl
index 4031ae2dc16..aa3a58a0bcd 100644
--- a/bdb/test/test019.tcl
+++ b/bdb/test/test019.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test019.tcl,v 11.14 2000/08/25 14:21:54 sue Exp $
+# $Id: test019.tcl,v 11.21 2002/05/22 15:42:47 sue Exp $
#
-# Test019 { access_method nentries }
-# Test the partial get functionality.
+# TEST test019
+# TEST Partial get test.
proc test019 { method {nentries 10000} args } {
global fixed_len
global rand_init
@@ -14,9 +14,8 @@ proc test019 { method {nentries 10000} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test019: $method ($args) $nentries partial get test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -28,11 +27,25 @@ proc test019 { method {nentries 10000} args } {
set testfile test019.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test019: $method ($args) $nentries partial get test"
+
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
berkdb srand $rand_init
@@ -57,6 +70,11 @@ proc test019 { method {nentries 10000} args } {
}
set repl [berkdb random_int $fixed_len 100]
set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn {-nooverwrite $key $data}]
error_check_good dbput:$key $ret 0
@@ -64,6 +82,9 @@ proc test019 { method {nentries 10000} args } {
error_check_good \
dbget:$key $ret [list [list $key [pad_data $method $data]]]
set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
close $did
@@ -76,18 +97,23 @@ proc test019 { method {nentries 10000} args } {
} else {
set key $str
}
- set data [replicate $str $kvals($key)]
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
- if { [is_fixed_length $method] == 1 } {
- set maxndx $fixed_len
- } else {
- set maxndx [expr [string length $data] - 1]
- }
set beg [berkdb random_int 0 [expr $maxndx - 1]]
- set len [berkdb random_int 1 [expr $maxndx - $beg]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} \
$txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# In order for tcl to handle this, we have to overwrite the
# last character with a NULL. That makes the length one less
@@ -95,12 +121,10 @@ proc test019 { method {nentries 10000} args } {
set k [lindex [lindex $ret 0] 0]
set d [lindex [lindex $ret 0] 1]
error_check_good dbget_key $k $key
- # If $d contains some of the padding, we want to get rid of it.
- set firstnull [string first "\0" $d]
- if { $firstnull == -1 } { set firstnull [string length $d] }
- error_check_good dbget_data \
- [string range $d 0 [expr $firstnull - 1]] \
+
+ error_check_good dbget_data $d \
[string range $data $beg [expr $beg + $len - 1]]
+
}
error_check_good db_close [$db close] 0
close $did
diff --git a/bdb/test/test020.tcl b/bdb/test/test020.tcl
index 1961d0e02dd..9b6d939acad 100644
--- a/bdb/test/test020.tcl
+++ b/bdb/test/test020.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test020.tcl,v 11.12 2000/10/19 23:15:22 ubell Exp $
+# $Id: test020.tcl,v 11.17 2002/05/22 15:42:47 sue Exp $
#
-# DB Test 20 {access method}
-# Test in-memory databases.
+# TEST test020
+# TEST In-Memory database tests.
proc test020 { method {nentries 10000} args } {
source ./include.tcl
@@ -17,12 +17,11 @@ proc test020 { method {nentries 10000} args } {
puts "Test020 skipping for method $method"
return
}
- puts "Test020: $method ($args) $nentries equal key/data pairs"
-
# Create the database and open the dictionary
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# Check if we are using an env.
@@ -31,10 +30,24 @@ proc test020 { method {nentries 10000} args } {
} else {
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod}]
+ -create -mode 0644} $args {$omethod}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -60,19 +73,35 @@ proc test020 { method {nentries 10000} args } {
} else {
set key $str
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
diff --git a/bdb/test/test021.tcl b/bdb/test/test021.tcl
index f9a1fe32f7e..56936da389a 100644
--- a/bdb/test/test021.tcl
+++ b/bdb/test/test021.tcl
@@ -1,25 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test021.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+# $Id: test021.tcl,v 11.15 2002/05/22 15:42:47 sue Exp $
#
-# DB Test 21 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self, reversed as key and self as data.
-# After all are entered, retrieve each using a cursor SET_RANGE, and getting
-# about 20 keys sequentially after it (in some cases we'll run out towards
-# the end of the file).
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
proc test021 { method {nentries 10000} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "Test021: $method ($args) $nentries equal key/data pairs"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,13 +32,27 @@ proc test021 { method {nentries 10000} args } {
set testfile test021.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -65,9 +80,17 @@ proc test021 { method {nentries 10000} args } {
set key [reverse $str]
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} \
$txn $pflags {$key [chop_data $method $str]}]
error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
close $did
@@ -81,6 +104,11 @@ proc test021 { method {nentries 10000} args } {
error_check_good dbopen [is_valid_db $db] TRUE
# Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -112,6 +140,10 @@ proc test021 { method {nentries 10000} args } {
}
incr i
}
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
close $did
}
diff --git a/bdb/test/test022.tcl b/bdb/test/test022.tcl
index f9a4c96637e..d25d7ecdffe 100644
--- a/bdb/test/test022.tcl
+++ b/bdb/test/test022.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test022.tcl,v 11.10 2000/08/25 14:21:55 sue Exp $
+# $Id: test022.tcl,v 11.14 2002/05/22 15:42:48 sue Exp $
#
-# Test022: Test of DB->get_byteswapped
+# TEST test022
+# TEST Test of DB->getbyteswapped().
proc test022 { method args } {
source ./include.tcl
@@ -14,6 +15,7 @@ proc test022 { method args } {
puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test022 { method args } {
set testfile2 "test022b.db"
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
diff --git a/bdb/test/test023.tcl b/bdb/test/test023.tcl
index c222bdd83c5..c37539a0f55 100644
--- a/bdb/test/test023.tcl
+++ b/bdb/test/test023.tcl
@@ -1,14 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test023.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test023.tcl,v 11.18 2002/05/22 15:42:48 sue Exp $
#
-# Duplicate delete test.
-# Add a key with duplicates (first time on-page, second time off-page)
-# Number the dups.
-# Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
proc test023 { method args } {
global alphabet
global dupnum
@@ -26,6 +28,7 @@ proc test023 { method args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,19 +40,29 @@ proc test023 { method args } {
set testfile test023.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644 -dup} $args {$omethod $testfile}]
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set pflags ""
set gflags ""
set txn ""
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
foreach i { onpage offpage } {
if { $i == "onpage" } {
@@ -159,7 +172,7 @@ proc test023 { method args } {
puts "\tTest023.f: Count keys, overwrite current, count again"
# At this point we should have 17 keys the (initial 20 minus
# 3 deletes)
- set dbc2 [$db cursor]
+ set dbc2 [eval {$db cursor} $txn]
error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
set count_check 0
@@ -178,6 +191,7 @@ proc test023 { method args } {
incr count_check
}
error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
# Done, delete all the keys for next iteration
set ret [eval {$db del} $txn {$key}]
@@ -190,6 +204,9 @@ proc test023 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test024.tcl b/bdb/test/test024.tcl
index f0b6762cd2f..bbdc8fb2253 100644
--- a/bdb/test/test024.tcl
+++ b/bdb/test/test024.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test024.tcl,v 11.14 2000/08/25 14:21:55 sue Exp $
+# $Id: test024.tcl,v 11.19 2002/05/22 15:42:48 sue Exp $
#
-# DB Test 24 {method nentries}
-# Test the Btree and Record number get-by-number functionality.
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
proc test024 { method {nentries 10000} args} {
source ./include.tcl
global rand_init
@@ -25,6 +26,7 @@ proc test024 { method {nentries 10000} args} {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,6 +38,18 @@ proc test024 { method {nentries 10000} args} {
set testfile test024.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -59,11 +73,11 @@ proc test024 { method {nentries 10000} args} {
set sorted_keys [lsort $keys]
# Create the database
if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -recnum} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
} else {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
}
@@ -84,12 +98,20 @@ proc test024 { method {nentries 10000} args} {
} else {
set key $k
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $k]}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
error_check_good \
get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Now we will get each key from the DB and compare the results
@@ -111,13 +133,21 @@ proc test024 { method {nentries 10000} args} {
set gflags " -recno"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
error_check_good Test024.c:diff($t1,$t2) \
@@ -128,12 +158,20 @@ proc test024 { method {nentries 10000} args} {
set db [eval {berkdb_open -rdonly} $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k 1 } { $k <= $count } { incr k } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $oid
error_check_good db_close [$db close] 0
error_check_good Test024.d:diff($t1,$t2) \
@@ -155,12 +193,20 @@ proc test024 { method {nentries 10000} args} {
close $oid
set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set k $count } { $k > 0 } { incr k -1 } {
- set ret [eval {$db get} $txn $gflags {$k}]
+ set ret [eval {$db get} $txn $gflags {$k}]
puts $oid [lindex [lindex $ret 0] 1]
error_check_good recnum_get [lindex [lindex $ret 0] 1] \
[pad_data $method [lindex $sorted_keys [expr $k - 1]]]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $oid
error_check_good db_close [$db close] 0
error_check_good Test024.e:diff($t1,$t2) \
@@ -175,12 +221,20 @@ proc test024 { method {nentries 10000} args} {
set kval [lindex $keys [expr $kndx - 1]]
set recno [expr [lsearch $sorted_keys $kval] + 1]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_record_based $method] == 1 } {
set ret [eval {$db del} $txn {$recno}]
} else {
set ret [eval {$db del} $txn {$kval}]
}
error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Remove the key from the key list
set ndx [expr $kndx - 1]
@@ -192,12 +246,20 @@ proc test024 { method {nentries 10000} args} {
}
# Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $do_renumber == 1 && $recno != $count } {
set r [expr $recno - 1]
set ret [eval {$db get} $txn $gflags {$recno}]
error_check_good get_after_del \
[lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Decrement count
incr count -1
diff --git a/bdb/test/test025.tcl b/bdb/test/test025.tcl
index 9f8deecb488..180a1aa2939 100644
--- a/bdb/test/test025.tcl
+++ b/bdb/test/test025.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test025.tcl,v 11.11 2000/11/16 23:56:18 ubell Exp $
+# $Id: test025.tcl,v 11.19 2002/05/24 15:24:54 sue Exp $
#
-# DB Test 25 {method nentries}
-# Test the DB_APPEND flag.
+# TEST test025
+# TEST DB_APPEND flag test.
proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
global kvals
source ./include.tcl
@@ -25,6 +25,7 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,12 +37,24 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -58,22 +71,42 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
gets $did str
set k [expr $count + 1]
set kvals($k) [pad_data $method $str]
- set ret [eval {$db put} $txn $k {[chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
-
+
while { [gets $did str] != -1 && $count < $nentries } {
set k [expr $count + 1]
set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
error_check_good db_put $ret $k
set ret [eval {$db get} $txn $gflags {$k}]
error_check_good \
get $ret [list [list $k [pad_data $method $str]]]
- incr count
- if { [expr $count + 1] == 0 } {
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
incr count
}
}
@@ -82,18 +115,26 @@ proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: close, open, and dump file"
# Now, reopen the file and run the last test again.
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction -first -next
# Now, reopen the file and run the last test again in the
# reverse direction.
puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
- open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ open_and_dump_file $testfile $env $t1 $checkfunc \
dump_file_direction -last -prev
}
diff --git a/bdb/test/test026.tcl b/bdb/test/test026.tcl
index 6c19c60a2e5..ce65e925d35 100644
--- a/bdb/test/test026.tcl
+++ b/bdb/test/test026.tcl
@@ -1,14 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test026.tcl,v 11.13 2000/11/17 19:07:51 sue Exp $
+# $Id: test026.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
#
-# DB Test 26 {access method}
-# Keyed delete test through cursor.
-# If ndups is small; this will test on-page dups; if it's large, it
-# will test off-page dups.
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
source ./include.tcl
@@ -20,10 +24,8 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
puts "Test0$tnum skipping for method $method"
return
}
- puts "Test0$tnum: $method ($args) $nentries keys\
- with $ndups dups; cursor delete test"
-
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -35,8 +37,25 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
set pflags ""
set gflags ""
@@ -46,16 +65,24 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put loop"
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod -dup $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
set datastr [ make_data_str $str ]
for { set j 1 } { $j <= $ndups} {incr j} {
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $j$datastr]}]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
incr count
}
}
@@ -68,6 +95,11 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
# Now we will sequentially traverse the database getting each
# item and deleting it.
set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -97,16 +129,27 @@ proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
error_check_good db_del:$key $ret 0
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum.c: Verify empty file"
# Double check that file is now empty
set db [eval {berkdb_open} $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
set ret [$dbc get -first]
error_check_good get_on_empty [string length $ret] 0
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test027.tcl b/bdb/test/test027.tcl
index ae4bf64fb3e..a0f6dfa4dcb 100644
--- a/bdb/test/test027.tcl
+++ b/bdb/test/test027.tcl
@@ -1,13 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test027.tcl,v 11.4 2000/05/22 12:51:39 bostic Exp $
+# $Id: test027.tcl,v 11.7 2002/01/11 15:53:45 bostic Exp $
#
-# DB Test 27 {access method}
-# Check that delete operations work. Create a database; close database and
-# reopen it. Then issues delete by key for each entry.
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
proc test027 { method {nentries 100} args} {
eval {test026 $method $nentries 100 27} $args
}
diff --git a/bdb/test/test028.tcl b/bdb/test/test028.tcl
index b460dd53a98..a546744fdac 100644
--- a/bdb/test/test028.tcl
+++ b/bdb/test/test028.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test028.tcl,v 11.12 2000/08/25 14:21:55 sue Exp $
+# $Id: test028.tcl,v 11.20 2002/07/01 15:03:45 krinsky Exp $
#
-# Put after cursor delete test.
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
proc test028 { method args } {
global dupnum
global dupstr
global alphabet
- global errorInfo
source ./include.tcl
set args [convert_args $method $args]
@@ -30,6 +31,7 @@ proc test028 { method args } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,11 +43,16 @@ proc test028 { method args } {
set testfile test028.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set ndups 20
@@ -57,6 +64,11 @@ proc test028 { method args } {
set gflags " -recno"
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_substr $dbc $db] 1
@@ -129,8 +141,8 @@ proc test028 { method args } {
puts "\tTest028.g: Insert key with duplicates"
for { set count 0 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} \
- $txn {$key [chop_data $method $count$dupstr]}]
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
error_check_good db_put $ret 0
}
@@ -161,7 +173,6 @@ proc test028 { method args } {
if { $count == [expr $ndups - 1] } {
puts "\tTest028.k:\
Duplicate No_Overwrite test"
- set $errorInfo ""
set ret [eval {$db put} $txn \
{-nooverwrite $key $dupstr}]
error_check_good db_put [is_substr \
@@ -179,7 +190,8 @@ proc test028 { method args } {
$txn {-nooverwrite $key 0$dupstr}]
error_check_good db_put $ret 0
for { set count 1 } { $count < $ndups } { incr count } {
- set ret [eval {$db put} $txn {$key $count$dupstr}]
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
error_check_good db_put $ret 0
}
@@ -192,8 +204,10 @@ proc test028 { method args } {
error_check_good db_del $ret 0
}
}
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test029.tcl b/bdb/test/test029.tcl
index c10815b0bf3..8e4b8aa6e41 100644
--- a/bdb/test/test029.tcl
+++ b/bdb/test/test029.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test029.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test029.tcl,v 11.20 2002/06/29 13:44:44 bostic Exp $
#
-# DB Test 29 {method nentries}
-# Test the Btree and Record number renumbering.
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
proc test029 { method {nentries 10000} args} {
source ./include.tcl
@@ -26,6 +26,7 @@ proc test029 { method {nentries 10000} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,6 +38,20 @@ proc test029 { method {nentries 10000} args} {
set testfile test029.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -64,11 +79,11 @@ proc test029 { method {nentries 10000} args} {
# Create the database
if { [string compare $omethod "-btree"] == 0 } {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -recnum} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
} else {
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
}
@@ -89,14 +104,19 @@ proc test029 { method {nentries 10000} args} {
} else {
set key $k
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$key [chop_data $method $k]}]
error_check_good dbput $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
- if { [string compare [lindex [lindex $ret 0] 1] $k] != 0 } {
- puts "Test029: put key-data $key $k got $ret"
- return
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
}
}
@@ -110,8 +130,16 @@ proc test029 { method {nentries 10000} args} {
set key $first_key
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db del} $txn {$key}]
error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now we are ready to retrieve records based on
# record number
@@ -120,28 +148,50 @@ proc test029 { method {nentries 10000} args} {
}
# First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} $txn $gflags {$last_keynum}]
error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
error_check_good \
getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Create a cursor; we need it for the next test and we
# need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# OK, now re-put the first key and make sure that we
# renumber the last key appropriately.
if { [string compare $omethod "-btree"] == 0 } {
- set ret [eval {$db put} $txn {$key [chop_data $method $first_key]}]
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
error_check_good db_put $ret 0
} else {
# Recno
- set ret [eval {$dbc get} $txn {-first}]
- set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
error_check_bad dbc_put:DB_BEFORE $ret 0
}
@@ -153,7 +203,7 @@ proc test029 { method {nentries 10000} args} {
# Now delete the first key in the database using a cursor
puts "\tTest029.d: delete with cursor and verify renumber"
- set ret [eval {$dbc get} $txn {-first}]
+ set ret [$dbc get -first]
error_check_good dbc_first $ret [list [list $key $first_key]]
# Now delete at the cursor
@@ -175,10 +225,10 @@ proc test029 { method {nentries 10000} args} {
puts "\tTest029.e: put with cursor and verify renumber"
if { [string compare $omethod "-btree"] == 0 } {
set ret [eval {$dbc put} \
- $txn $pflags {-current $first_key}]
+ $pflags {-current $first_key}]
error_check_good dbc_put:DB_CURRENT $ret 0
} else {
- set ret [eval {$dbc put} $txn $pflags {-before $first_key}]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
error_check_bad dbc_put:DB_BEFORE $ret 0
}
@@ -188,5 +238,8 @@ proc test029 { method {nentries 10000} args} {
get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test030.tcl b/bdb/test/test030.tcl
index 7395adf82bd..d91359f07a0 100644
--- a/bdb/test/test030.tcl
+++ b/bdb/test/test030.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test030.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test030.tcl,v 11.18 2002/05/22 15:42:50 sue Exp $
#
-# DB Test 30: Test DB_NEXT_DUP Functionality.
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
proc test030 { method {nentries 10000} args } {
global rand_init
source ./include.tcl
@@ -18,11 +19,10 @@ proc test030 { method {nentries 10000} args } {
puts "Test030 skipping for method $method"
return
}
-
- puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -36,20 +36,34 @@ proc test030 { method {nentries 10000} args } {
set cntfile cntfile.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644 -dup} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
# Use a second DB to keep track of how many duplicates
# we enter per key
- set cntdb [eval {berkdb_open -create -truncate \
+ set cntdb [eval {berkdb_open -create \
-mode 0644} $args {-btree $cntfile}]
error_check_good dbopen:cntfile [is_valid_db $db] TRUE
@@ -64,15 +78,30 @@ proc test030 { method {nentries 10000} args } {
set did [open $dict]
puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
while { [gets $did str] != -1 && $count < $nentries } {
set ndup [berkdb random_int 1 10]
for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
set ret [eval {$cntdb put} \
- $txn $pflags {$str [chop_data $method $ndup]}]
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
set datastr $i:$str
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
@@ -132,8 +161,16 @@ proc test030 { method {nentries 10000} args } {
set lastkey $k
# Figure out how may dups we should have
- set ret [eval {$cntdb get} $txn $pflags {$k}]
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
set howmany 1
for { set ret [$dbc get -nextdup] } \
@@ -186,6 +223,9 @@ proc test030 { method {nentries 10000} args } {
}
error_check_good cnt_curs_close [$cnt_dbc close] 0
error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good cnt_file_close [$cntdb close] 0
error_check_good db_file_close [$db close] 0
}
diff --git a/bdb/test/test031.tcl b/bdb/test/test031.tcl
index 35041541fa7..0006deb2d99 100644
--- a/bdb/test/test031.tcl
+++ b/bdb/test/test031.tcl
@@ -1,21 +1,25 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test031.tcl,v 11.17 2000/11/06 19:31:55 sue Exp $
+# $Id: test031.tcl,v 11.24 2002/06/26 06:22:44 krinsky Exp $
#
-# DB Test 31 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend random five-char strings (see test032)
-# that we force the duplicate sorting code to do something.
-# Along the way, test that we cannot insert duplicate duplicates
-# using DB_NODUPDATA.
-# By setting ndups large, we can make this an off-page test
-# After all are entered, retrieve all; verify output.
-# Close file, reopen, do retrieve and re-verify.
-# This does not work for recno
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
global alphabet
global rand_init
@@ -27,6 +31,7 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set omethod [convert_method $method]
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -40,6 +45,19 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -47,19 +65,19 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
cleanup $testdir $env
puts "Test0$tnum: \
- $method ($args) $nentries small sorted dup key/data pairs"
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
if { [is_record_based $method] == 1 || \
[is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $omethod"
return
}
- set db [eval {berkdb_open -create -truncate \
+ set db [eval {berkdb_open -create \
-mode 0644} $args {$omethod -dup -dupsort $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set check_db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {-hash $checkdb}]
+ -create -mode 0644} $args {-hash $checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -69,8 +87,13 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
# Re-initialize random string generator
randstring_init $ndups
@@ -132,13 +155,21 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open(2) [is_substr $dbc $db] 1
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
set lastkey "THIS WILL NEVER BE A KEY VALUE"
# no need to delete $lastkey
@@ -189,8 +220,11 @@ proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
set ret [$check_c get -first]
error_check_good check_c:get:$ret [llength $ret] 0
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test032.tcl b/bdb/test/test032.tcl
index 1504ec5cc2d..2076b744851 100644
--- a/bdb/test/test032.tcl
+++ b/bdb/test/test032.tcl
@@ -1,20 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test032.tcl,v 11.15 2000/08/25 14:21:55 sue Exp $
+# $Id: test032.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 32 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# test the failure case.
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
global alphabet rand_init
source ./include.tcl
@@ -25,6 +27,7 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
berkdb srand $rand_init
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,6 +41,19 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -45,19 +61,19 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
cleanup $testdir $env
puts "Test0$tnum:\
- $method ($args) $nentries small sorted dup key/data pairs"
+ $method ($args) $nentries small sorted $ndups dup key/data pairs"
if { [is_record_based $method] == 1 || \
[is_rbtree $method] == 1 } {
puts "Test0$tnum skipping for method $omethod"
return
}
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup -dupsort} $args {$testfile} ]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
set check_db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {-hash $checkdb}]
+ -create -mode 0644} $args {-hash $checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -67,8 +83,13 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
# Re-initialize random string generator
randstring_init $ndups
@@ -101,8 +122,8 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
break
}
if {[string compare $lastdup $datastr] > 0} {
- error_check_good sorted_dups($lastdup,$datastr)\
- 0 1
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
}
incr x
set lastdup $datastr
@@ -112,14 +133,22 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set check_c [eval {$check_db cursor} $txn]
error_check_good check_c_open(2) \
- [is_substr $check_c $check_db] 1
+ [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
for {set ret [$check_c get -first]} \
@@ -138,10 +167,11 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
}
$db sync
+
# Now repeat the above test using cursor ops
puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
for {set ret [$check_c get -first]} \
@@ -155,7 +185,11 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
set data $pref:$k
set ret [eval {$dbc get} {-get_both $k $data}]
error_check_good \
- get_both_key:$k $ret [list [list $k $data]]
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $pref}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
}
}
@@ -188,8 +222,10 @@ proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test033.tcl b/bdb/test/test033.tcl
index ed46e6bda04..a7796ce99d6 100644
--- a/bdb/test/test033.tcl
+++ b/bdb/test/test033.tcl
@@ -1,31 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test033.tcl,v 11.11 2000/10/25 15:45:20 sue Exp $
+# $Id: test033.tcl,v 11.24 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 33 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and data; add duplicate
-# records for each.
-# After all are entered, retrieve all; verify output by doing
-# DB_GET_BOTH on existing and non-existing keys.
-# This does not work for recno
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Test0$tnum: $method ($args) $nentries small dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $omethod"
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
return
}
- # Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,67 +38,139 @@ proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test0$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644 \
- $omethod -dup} $args {$testfile}]
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
error_check_good dbopen [is_valid_db $db] TRUE
- set did [open $dict]
set pflags ""
set gflags ""
set txn ""
- set count 0
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
puts "\tTest0$tnum.a: Put/get loop."
# Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
- for { set i 1 } { $i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db put} \
- $txn $pflags {$str [chop_data $method $datastr]}]
- error_check_good db_put $ret 0
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
}
# Now retrieve all the keys matching this key and dup
- for {set i 1} {$i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good "Test0$tnum:dup#" [lindex \
- [lindex $ret 0] 1] [pad_data $method $datastr]
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
}
-
- # Now retrieve non-existent dup (i is ndups + 1)
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good Test0$tnum:dupfailure [llength $ret] 0
incr count
}
+
close $did
- set did [open $dict]
- set count 0
puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
while { [gets $did str] != -1 && $count < $nentries } {
- # Now retrieve all the keys matching this key and dup
- for {set i 1} {$i <= $ndups } { incr i } {
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good "Test0$tnum:dup#" \
- [lindex [lindex $ret 0] 1] $datastr
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
}
-
- # Now retrieve non-existent dup (i is ndups + 1)
- set datastr $i:$str
- set ret [eval {$db get} $txn {-get_both $str $datastr}]
- error_check_good Test0$tnum:dupfailure [llength $ret] 0
incr count
}
close $did
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/bdb/test/test034.tcl b/bdb/test/test034.tcl
index b82f369f791..647ad940815 100644
--- a/bdb/test/test034.tcl
+++ b/bdb/test/test034.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 1999, 2000
+# Copyright (c) 1998-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test034.tcl,v 11.4 2000/02/14 03:00:19 bostic Exp $
+# $Id: test034.tcl,v 11.8 2002/01/11 15:53:46 bostic Exp $
#
-# DB Test 34 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test034
+# TEST test032 with off-page duplicates
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
proc test034 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test032 $method $nentries 20 34 -pagesize 512} $args
diff --git a/bdb/test/test035.tcl b/bdb/test/test035.tcl
index e2afef4afb3..06796b1e9aa 100644
--- a/bdb/test/test035.tcl
+++ b/bdb/test/test035.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test035.tcl,v 11.3 2000/02/14 03:00:19 bostic Exp $
+# $Id: test035.tcl,v 11.8 2002/07/22 17:00:39 sue Exp $
#
-# DB Test 35 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test035 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test033 $method $nentries 20 35 -pagesize 512} $args
-
# Test with multiple pages of off-page duplicates
eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
}
diff --git a/bdb/test/test036.tcl b/bdb/test/test036.tcl
index 4d859c0652a..4e54f363ff8 100644
--- a/bdb/test/test036.tcl
+++ b/bdb/test/test036.tcl
@@ -1,27 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test036.tcl,v 11.13 2000/08/25 14:21:55 sue Exp $
+# $Id: test036.tcl,v 11.18 2002/05/22 15:42:51 sue Exp $
#
-# DB Test 36 {access method}
-# Put nentries key/data pairs (from the dictionary) using a cursor
-# and KEYFIRST and KEYLAST (this tests the case where use use cursor
-# put for non-existent keys).
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
proc test036 { method {nentries 10000} args } {
source ./include.tcl
set args [convert_args $method $args]
set omethod [convert_method $method]
-
- puts "Test036: $method ($args) $nentries equal key/data pairs"
if { [is_record_based $method] == 1 } {
puts "Test036 skipping for method recno"
return
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,13 +33,27 @@ proc test036 { method {nentries 10000} args } {
set testfile test036.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $args {$omethod $testfile}]
+ -create -mode 0644} $args {$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -56,8 +70,13 @@ proc test036 { method {nentries 10000} args } {
}
puts "\tTest036.a: put/get loop KEYFIRST"
# Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_substr $dbc $db] 1
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
@@ -67,7 +86,7 @@ proc test036 { method {nentries 10000} args } {
} else {
set key $str
}
- set ret [eval {$dbc put} $txn $pflags {-keyfirst $key $str}]
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
error_check_good put $ret 0
set ret [eval {$db get} $txn $gflags {$key}]
@@ -75,10 +94,18 @@ proc test036 { method {nentries 10000} args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor [is_substr $dbc $db] 1
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
if { [is_record_based $method] == 1 } {
global kvals
@@ -96,12 +123,23 @@ proc test036 { method {nentries 10000} args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now we will get each key from the DB and compare the results
# to the original.
puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now compare the keys to see if they match the dictionary (or ints)
diff --git a/bdb/test/test037.tcl b/bdb/test/test037.tcl
index 31528c6ee54..0b2e2989949 100644
--- a/bdb/test/test037.tcl
+++ b/bdb/test/test037.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test037.tcl,v 11.11 2000/08/25 14:21:55 sue Exp $
+# $Id: test037.tcl,v 11.18 2002/03/15 16:30:54 sue Exp $
#
-# Test037: RMW functionality.
+# TEST test037
+# TEST Test DB_RMW
proc test037 { method {nentries 100} args } {
+ global encrypt
+
source ./include.tcl
set eindex [lsearch -exact $args "-env"]
#
@@ -21,6 +24,8 @@ proc test037 { method {nentries 100} args } {
puts "Test037: RMW $method"
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
set omethod [convert_method $method]
# Create the database
@@ -28,7 +33,7 @@ proc test037 { method {nentries 100} args } {
set testfile test037.db
set local_env \
- [berkdb env -create -mode 0644 -txn -home $testdir]
+ [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
error_check_good dbenv [is_valid_env $local_env] TRUE
set db [eval {berkdb_open \
@@ -73,9 +78,9 @@ proc test037 { method {nentries 100} args } {
puts "\tTest037.b: Setting up environments"
# Open local environment
- set env_cmd [concat berkdb env -create -txn -home $testdir]
+ set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
set local_env [eval $env_cmd]
- error_check_good dbenv [is_valid_widget $local_env env] TRUE
+ error_check_good dbenv [is_valid_env $local_env] TRUE
# Open local transaction
set local_txn [$local_env txn]
@@ -101,11 +106,11 @@ proc test037 { method {nentries 100} args } {
set did [open $dict]
set rkey 0
- set db [berkdb_open -env $local_env $testfile]
+ set db [berkdb_open -auto_commit -env $local_env $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
set rdb [send_cmd $f1 \
- "berkdb_open -env $remote_env -mode 0644 $testfile"]
- error_check_good remote:dbopen [is_valid_widget $rdb db] TRUE
+ "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
puts "\tTest037.d: Testing without RMW"
@@ -142,12 +147,12 @@ proc test037 { method {nentries 100} args } {
# Open local transaction
set local_txn [$local_env txn]
error_check_good \
- txn_open [is_valid_widget $local_txn $local_env.txn] TRUE
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
# Open remote transaction
set remote_txn [send_cmd $f1 "$remote_env txn"]
error_check_good remote:txn_open \
- [is_valid_widget $remote_txn $remote_env.txn] TRUE
+ [is_valid_txn $remote_txn $remote_env] TRUE
# Now, get a key and try to "get" it from both DBs.
error_check_bad "gets on new open" [gets $did str] -1
diff --git a/bdb/test/test038.tcl b/bdb/test/test038.tcl
index 2a726f1bcd9..3babde8fe0b 100644
--- a/bdb/test/test038.tcl
+++ b/bdb/test/test038.tcl
@@ -1,20 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test038.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+# $Id: test038.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 38 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# remove each duplicate and try DB_GET_BOTH again.
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
global alphabet
global rand_init
@@ -25,7 +27,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,6 +47,19 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
@@ -47,18 +68,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
puts "Test0$tnum: \
$method ($args) $nentries small sorted dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $method"
- return
- }
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup -dupsort} $args {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
- set check_db [berkdb_open \
- -create -truncate -mode 0644 -hash $checkdb]
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $args {$checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -68,8 +84,13 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
set dups ""
for { set i 1 } { $i <= $ndups } { incr i } {
@@ -125,14 +146,22 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now check the duplicates, then delete then recheck
puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_substr $check_c $check_db] 1
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < $ndups} {incr ndx} {
for {set ret [$check_c get -first]} \
@@ -145,16 +174,37 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
set nn [expr $ndx * 3]
set pref [string range $d $nn [expr $nn + 1]]
set data $pref:$k
- set ret [eval {$dbc get} $txn {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good \
get_both_key:$k [lindex [lindex $ret 0] 0] $k
error_check_good \
get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
set ret [$dbc del]
error_check_good del $ret 0
+
set ret [eval {$db get} $txn {-get_both $k $data}]
error_check_good error_case:$k [llength $ret] 0
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
if {$ndx != 0} {
set n [expr ($ndx - 1) * 3]
set pref [string range $d $n [expr $n + 1]]
@@ -167,8 +217,11 @@ proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test039.tcl b/bdb/test/test039.tcl
index 957468ce542..2bbc83ebe05 100644
--- a/bdb/test/test039.tcl
+++ b/bdb/test/test039.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test039.tcl,v 11.11 2000/08/25 14:21:56 sue Exp $
+# $Id: test039.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 39 {access method}
-# Use the first 10,000 entries from the dictionary.
-# Insert each with self as key and "ndups" duplicates
-# For the data field, prepend the letters of the alphabet
-# in a random order so that we force the duplicate sorting
-# code to do something.
-# By setting ndups large, we can make this an off-page test
-# After all are entered; test the DB_GET_BOTH functionality
-# first by retrieving each dup in the file explicitly. Then
-# remove each duplicate and try DB_GET_BOTH again.
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
global alphabet
global rand_init
@@ -25,7 +28,13 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -39,26 +48,35 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set checkdb checkdb.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
set t2 $testdir/t2
set t3 $testdir/t3
cleanup $testdir $env
- puts "Test0$tnum: $method $nentries small unsorted dup key/data pairs"
- if { [is_record_based $method] == 1 || \
- [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $method"
- return
- }
+ puts "Test0$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup} $args {$testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
- set check_db \
- [berkdb_open -create -truncate -mode 0644 -hash $checkdb]
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
set pflags ""
@@ -68,8 +86,13 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
# Here is the loop where we put and get each key/data pair
puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
while { [gets $did str] != -1 && $count < $nentries } {
set dups ""
for { set i 1 } { $i <= $ndups } { incr i } {
@@ -124,14 +147,22 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
incr count
}
error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
close $did
# Now check the duplicates, then delete then recheck
puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
set check_c [eval {$check_db cursor} $txn]
- error_check_good cursor_open [is_substr $check_c $check_db] 1
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
for {set ndx 0} {$ndx < $ndups} {incr ndx} {
for {set ret [$check_c get -first]} \
@@ -144,8 +175,7 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set nn [expr $ndx * 3]
set pref [string range $d $nn [expr $nn + 1]]
set data $pref:$k
- set ret \
- [eval {$dbc get} $txn $gflags {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good \
get_both_key:$k [lindex [lindex $ret 0] 0] $k
error_check_good \
@@ -154,24 +184,28 @@ proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
set ret [$dbc del]
error_check_good del $ret 0
- set ret \
- [eval {$dbc get} $txn $gflags {-get_both $k $data}]
- error_check_good error_case:$k [llength $ret] 0
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
if {$ndx != 0} {
set n [expr ($ndx - 1) * 3]
set pref [string range $d $n [expr $n + 1]]
set data $pref:$k
- set ret [eval {$dbc get} \
- $txn $gflags {-get_both $k $data}]
+ set ret [$dbc get -get_both $k $data]
error_check_good error_case:$k [llength $ret] 0
}
}
}
error_check_good check_c:close [$check_c close] 0
- error_check_good check_db:close [$check_db close] 0
-
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test040.tcl b/bdb/test/test040.tcl
index 912e1735d8e..1856f78fc2e 100644
--- a/bdb/test/test040.tcl
+++ b/bdb/test/test040.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1998, 1999, 2000
+# Copyright (c) 1998-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test040.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+# $Id: test040.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
#
-# DB Test 40 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test040 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test038 $method $nentries 20 40 -pagesize 512} $args
diff --git a/bdb/test/test041.tcl b/bdb/test/test041.tcl
index bba89f49b5a..fdcbdbef3d7 100644
--- a/bdb/test/test041.tcl
+++ b/bdb/test/test041.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test041.tcl,v 11.3 2000/02/14 03:00:20 bostic Exp $
+# $Id: test041.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
#
-# DB Test 41 {access method}
-# DB_GET_BOTH functionality with off-page duplicates.
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
proc test041 { method {nentries 10000} args} {
# Test with off-page duplicates
eval {test039 $method $nentries 20 41 -pagesize 512} $args
diff --git a/bdb/test/test042.tcl b/bdb/test/test042.tcl
index 232cb3a6b0e..9f444b8349c 100644
--- a/bdb/test/test042.tcl
+++ b/bdb/test/test042.tcl
@@ -1,27 +1,26 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test042.tcl,v 11.24 2000/08/25 14:21:56 sue Exp $
+# $Id: test042.tcl,v 11.37 2002/09/05 17:23:07 sandstro Exp $
#
-# DB Test 42 {access method}
-#
-# Multiprocess DB test; verify that locking is working for the concurrent
-# access method product.
-#
-# Use the first "nentries" words from the dictionary. Insert each with self
-# as key and a fixed, medium length data string. Then fire off multiple
-# processes that bang on the database. Each one should try to read and write
-# random keys. When they rewrite, they'll append their pid to the data string
-# (sometimes doing a rewrite sometimes doing a partial put). Some will use
-# cursors to traverse through a few keys before finding one to write.
-
-set datastr abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
proc test042 { method {nentries 1000} args } {
- global datastr
- source ./include.tcl
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -32,10 +31,25 @@ proc test042 { method {nentries 1000} args } {
puts "Test042 skipping for env $env"
return
}
+
set args [convert_args $method $args]
- set omethod [convert_method $method]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
- puts "Test042: CDB Test $method $nentries"
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
# Set initial parameters
set do_exit 0
@@ -62,44 +76,24 @@ proc test042 { method {nentries 1000} args } {
env_cleanup $testdir
- set env [berkdb env -create -cdb -home $testdir]
- error_check_good dbenv [is_valid_widget $env env] TRUE
-
- set db [eval {berkdb_open -env $env -create -truncate \
- -mode 0644 $omethod} $oargs {$testfile}]
- error_check_good dbopen [is_valid_widget $db db] TRUE
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
- set did [open $dict]
-
- set pflags ""
- set gflags ""
- set txn ""
- set count 0
-
- # Here is the loop where we put each key/data pair
- puts "\tTest042.a: put/get loop"
- while { [gets $did str] != -1 && $count < $nentries } {
- if { [is_record_based $method] == 1 } {
- set key [expr $count + 1]
- } else {
- set key $str
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $testfile 0
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $testfile $i
}
- set ret [eval {$db put} \
- $txn $pflags {$key [chop_data $method $datastr]}]
- error_check_good put:$db $ret 0
- incr count
}
- close $did
- error_check_good close:$db [$db close] 0
-
- # Database is created, now set up environment
# Remove old mpools and Open/create the lock and mpool regions
error_check_good env:close:$env [$env close] 0
set ret [berkdb envremove -home $testdir]
error_check_good env_remove $ret 0
- set env [berkdb env -create -cdb -home $testdir]
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
error_check_good dbenv [is_valid_widget $env env] TRUE
if { $do_exit == 1 } {
@@ -112,16 +106,21 @@ proc test042 { method {nentries 1000} args } {
set pidlist {}
for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $testfile$i
+ } else {
+ set tf ${testfile}0
+ }
puts "exec $tclsh_path $test_path/wrap.tcl \
mdbscript.tcl $testdir/test042.$i.log \
- $method $testdir $testfile $nentries $iter $i $procs &"
+ $method $testdir $tf $nentries $iter $i $procs &"
set p [exec $tclsh_path $test_path/wrap.tcl \
mdbscript.tcl $testdir/test042.$i.log $method \
- $testdir $testfile $nentries $iter $i $procs &]
+ $testdir $tf $nentries $iter $i $procs &]
lappend pidlist $p
}
puts "Test042: $procs independent processes now running"
- watch_procs
+ watch_procs $pidlist
# Check for test failure
set e [eval findfail [glob $testdir/test042.*.log]]
@@ -147,3 +146,36 @@ proc rand_key { method nkeys renum procs} {
return [berkdb random_int 0 [expr $nkeys - 1]]
}
}
+
+proc test042_dbinit { env nentries method oargs tf ext } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$tf$ext}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf$ext"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/bdb/test/test043.tcl b/bdb/test/test043.tcl
index 274ec1b7184..eea7ec86d54 100644
--- a/bdb/test/test043.tcl
+++ b/bdb/test/test043.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test043.tcl,v 11.12 2000/08/25 14:21:56 sue Exp $
+# $Id: test043.tcl,v 11.17 2002/05/22 15:42:52 sue Exp $
#
-# DB Test 43 {method nentries}
-# Test the Record number implicit creation and renumbering options.
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
proc test043 { method {nentries 10000} args} {
source ./include.tcl
@@ -22,6 +23,7 @@ proc test043 { method {nentries 10000} args} {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,11 +35,23 @@ proc test043 { method {nentries 10000} args} {
set testfile test043.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
# Create the database
- set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ set db [eval {berkdb_open -create -mode 0644} $args \
{$omethod $testfile}]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -53,16 +67,29 @@ proc test043 { method {nentries 10000} args} {
}
puts "\tTest043.a: insert keys at $interval record intervals"
while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$count [chop_data $method $count]}]
error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set last $count
incr count $interval
}
puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
- error_check_good "$db cursor" [is_substr $dbc $db] 1
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
set check 1
for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
@@ -158,5 +185,8 @@ proc test043 { method {nentries 10000} args} {
}
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test044.tcl b/bdb/test/test044.tcl
index 0be7a704961..67cf3ea24b8 100644
--- a/bdb/test/test044.tcl
+++ b/bdb/test/test044.tcl
@@ -1,25 +1,31 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test044.tcl,v 11.26 2000/10/27 13:23:56 sue Exp $
+# $Id: test044.tcl,v 11.32 2002/07/16 20:53:04 bostic Exp $
#
-# DB Test 44 {access method}
-# System integration DB test: verify that locking, recovery, checkpoint,
-# and all the other utilities basically work.
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
#
-# The test consists of $nprocs processes operating on $nfiles files. A
-# transaction consists of adding the same key/data pair to some random
-# number of these files. We generate a bimodal distribution in key
-# size with 70% of the keys being small (1-10 characters) and the
-# remaining 30% of the keys being large (uniform distribution about
-# mean $key_avg). If we generate a key, we first check to make sure
-# that the key is not already in the dataset. If it is, we do a lookup.
-#
-# XXX This test uses grow-only files currently!
+# XXX
+# This test uses grow-only files currently!
proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
source ./include.tcl
+ global encrypt
global rand_init
set args [convert_args $method $args]
@@ -35,6 +41,10 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
puts "Test044 skipping for env $env"
return
}
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
puts "Test044: system integration test db $method $nprocs processes \
on $nfiles files"
@@ -62,7 +72,7 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
# Create an environment
puts "\tTest044.a: creating environment and $nfiles files"
- set dbenv [berkdb env -create -txn -home $testdir]
+ set dbenv [berkdb_env -create -txn -home $testdir]
error_check_good env_open [is_valid_env $dbenv] TRUE
# Create a bunch of files
@@ -97,7 +107,7 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
set cycle 1
set ncycles 3
while { $cycle <= $ncycles } {
- set dbenv [berkdb env -create -txn -home $testdir]
+ set dbenv [berkdb_env -create -txn -home $testdir]
error_check_good env_open [is_valid_env $dbenv] TRUE
# Fire off deadlock detector and checkpointer
@@ -128,16 +138,13 @@ proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
#
error_check_good env_close [$dbenv close] 0
- exec $KILL -9 $ddpid
- exec $KILL -9 $cppid
- #
- # Use catch so that if any of the children died, we don't
- # stop the script
- #
+ tclkill $ddpid
+ tclkill $cppid
+
foreach p $pidlist {
- set e [catch {eval exec \
- [concat $KILL -9 $p]} res]
+ tclkill $p
}
+
# Check for test failure
set e [eval findfail [glob $testdir/test044.*.log]]
error_check_good "FAIL: error message(s) in log files" $e 0
diff --git a/bdb/test/test045.tcl b/bdb/test/test045.tcl
index 65f031d0290..3825135facd 100644
--- a/bdb/test/test045.tcl
+++ b/bdb/test/test045.tcl
@@ -1,11 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test045.tcl,v 11.17 2000/10/19 23:15:22 ubell Exp $
+# $Id: test045.tcl,v 11.24 2002/02/07 17:50:10 sue Exp $
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
#
-# DB Test 45 Run the random db tester on the specified access method.
# Options are:
# -adds <maximum number of keys before you disable adds>
# -cursors <number of cursors>
@@ -17,11 +23,7 @@
# -keyavg <average key size>
proc test045 { method {nops 10000} args } {
source ./include.tcl
-
- if { [is_frecno $method] == 1 } {
- puts "\tSkipping Test045 for method $method."
- return
- }
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -33,6 +35,10 @@ proc test045 { method {nops 10000} args } {
return
}
set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
set omethod [convert_method $method]
puts "Test045: Random tester on $method for $nops operations"
@@ -63,7 +69,7 @@ proc test045 { method {nops 10000} args } {
-errpct { incr i; set errpct [lindex $args $i] }
-init { incr i; set init [lindex $args $i] }
-keyavg { incr i; set keyavg [lindex $args $i] }
- -extent { incr i;
+ -extent { incr i;
lappend oargs "-extent" "100" }
default { lappend oargs [lindex $args $i] }
}
@@ -77,7 +83,7 @@ proc test045 { method {nops 10000} args } {
# Run the script with 3 times the number of initial elements to
# set it up.
set db [eval {berkdb_open \
- -create -truncate -mode 0644 $omethod} $oargs {$f}]
+ -create -mode 0644 $omethod} $oargs {$f}]
error_check_good dbopen:$f [is_valid_db $db] TRUE
set r [$db close]
@@ -90,7 +96,7 @@ proc test045 { method {nops 10000} args } {
if { $init != 0 } {
set n [expr 3 * $init]
exec $tclsh_path \
- $test_path/dbscript.tcl $f $n \
+ $test_path/dbscript.tcl $method $f $n \
1 $init $n $keyavg $dataavg $dups 0 -1 \
> $testdir/test045.init
}
@@ -101,11 +107,11 @@ proc test045 { method {nops 10000} args } {
puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
# Now the database is initialized, run a test
puts "$tclsh_path\
- $test_path/dbscript.tcl $f $nops $cursors $delete $adds \
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
$keyavg $dataavg $dups $errpct > $testdir/test045.log"
exec $tclsh_path \
- $test_path/dbscript.tcl $f \
+ $test_path/dbscript.tcl $method $f \
$nops $cursors $delete $adds $keyavg \
$dataavg $dups $errpct \
> $testdir/test045.log
diff --git a/bdb/test/test046.tcl b/bdb/test/test046.tcl
index 3bfed3ef5d8..4136f30aaa7 100644
--- a/bdb/test/test046.tcl
+++ b/bdb/test/test046.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test046.tcl,v 11.26 2000/08/25 14:21:56 sue Exp $
+# $Id: test046.tcl,v 11.33 2002/05/24 15:24:55 sue Exp $
#
-# DB Test 46: Overwrite test of small/big key/data with cursor checks.
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
proc test046 { method args } {
global alphabet
global errorInfo
@@ -33,6 +34,7 @@ proc test046 { method args } {
}
puts "\tTest046: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -44,6 +46,11 @@ proc test046 { method args } {
set testfile test046.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
@@ -52,28 +59,43 @@ proc test046 { method args } {
set db [eval {berkdb_open} $oflags $testfile.a]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
# Fill page w/ small key/data pairs
puts "\tTest046: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_record_based $method] == 1} {
- set ret [$db put $i $data$i]
+ set ret [eval {$db put} $txn {$i $data$i}]
} elseif { $i < 10 } {
- set ret [$db put [set key]00$i [set data]00$i]
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
} elseif { $i < 100 } {
- set ret [$db put [set key]0$i [set data]0$i]
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
} else {
- set ret [$db put $key$i $data$i]
+ set ret [eval {$db put} $txn {$key$i $data$i}]
}
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
# get db order of keys
for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
set ret [$dbc get -next]} {
@@ -92,7 +114,7 @@ proc test046 { method args } {
# delete before cursor(n-1), make sure it is gone
set i [expr $i - 1]
- error_check_good db_del [$db del $key_set($i)] 0
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
# use set_range to get first key starting at n-1, should
# give us nth--but only works for btree
@@ -120,7 +142,7 @@ proc test046 { method args } {
puts "\t\tTest046.a.2: Delete cursor item by key."
# nth key, which cursor should be on now
set i [incr i]
- set ret [$db del $key_set($i)]
+ set ret [eval {$db del} $txn {$key_set($i)}]
error_check_good db_del $ret 0
# this should return n+1 key/data, curr has nth key/data
@@ -155,7 +177,7 @@ proc test046 { method args } {
set ret [$dbc get -prev]
error_check_bad dbc_get:prev [llength $curr] 0
# delete *after* cursor pos.
- error_check_good db:del [$db del $key_set([incr i])] 0
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
# make sure item is gone, try to get it
if { [string compare $omethod "-btree"] == 0} {
@@ -211,12 +233,12 @@ proc test046 { method args } {
puts "\t\tTest046.c.1: Insert by key before the cursor."
# i is at curs pos, i=n+1, we want to go BEFORE
set i [incr i -1]
- set ret [$db put $key_set($i) $data_set($i)]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
error_check_good db_put:before $ret 0
puts "\t\tTest046.c.2: Insert by key after the cursor."
set i [incr i +2]
- set ret [$db put $key_set($i) $data_set($i)]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
error_check_good db_put:after $ret 0
puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
@@ -224,6 +246,9 @@ proc test046 { method args } {
set i [incr i -1]
error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db:close [$db close] 0
if { [is_record_based $method] == 1} {
puts "\t\tSkipping the rest of test for method $method."
@@ -233,7 +258,12 @@ proc test046 { method args } {
# Reopen without printing __db_errs.
set db [eval {berkdb_open_noerr} $oflags $testfile.a]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor [is_valid_cursor $dbc $db] TRUE
# should fail with EINVAL (deleted cursor)
@@ -254,7 +284,7 @@ proc test046 { method args } {
Insert by cursor before/after existent cursor."
# can't use before after w/o dup except renumber in recno
# first, restore an item so they don't fail
- #set ret [$db put $key_set($i) $data_set($i)]
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
#error_check_good db_put $ret 0
#set ret [$dbc get -set $key_set($i)]
@@ -275,21 +305,37 @@ proc test046 { method args } {
# overwrites
puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db:close [$db close] 0
set db [eval {berkdb_open} $oflags $testfile.d]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set nkeys 20
-
# Fill page w/ small key/data pairs
puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i < $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
# Prepare cursor on item
set ret [$dbc get -first]
error_check_bad dbc_get:first [llength $ret] 0
@@ -347,14 +393,14 @@ proc test046 { method args } {
if { [string compare $type key_over] == 0 } {
puts "\t\tTest046.d.$i: Key\
Overwrite:($i_pair) by ($w_pair)."
- set ret [$db put \
+ set ret [eval {$db put} $txn \
$"key_init[lindex $i_pair 0]" \
$"data_over[lindex $w_pair 1]"]
error_check_good \
dbput:over:i($i_pair):o($w_pair) $ret 0
# check value
- set ret [$db \
- get $"key_init[lindex $i_pair 0]"]
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
error_check_bad \
db:get:check [llength $ret] 0
error_check_good db:get:compare_data \
@@ -382,6 +428,9 @@ proc test046 { method args } {
puts "\tTest046.d.3: Cleanup for next part of test."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { [is_rbtree $method] == 1} {
@@ -394,10 +443,6 @@ proc test046 { method args } {
set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
set ndups 20
@@ -406,14 +451,31 @@ proc test046 { method args } {
puts "\tTest046.e.2:\
Put $nkeys small key/data pairs and $ndups sorted dups."
for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { $i < 10 } {
- set ret [$db put [set key]0$i [set data]0$i]
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
} else {
- set ret [$db put $key$i $data$i]
+ set ret [eval {$db put} $txn {$key$i $data$i}]
}
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
# get db order of keys
for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
set ret [$dbc get -next]} {
@@ -431,15 +493,15 @@ proc test046 { method args } {
for { set i 0 } { $i < $ndups } { incr i } {
if { $i < 10 } {
- set ret [$db put $keym DUPLICATE_0$i]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
} else {
- set ret [$db put $keym DUPLICATE_$i]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
}
error_check_good db_put:DUP($i) $ret 0
}
puts "\tTest046.e.3: Check duplicate duplicates"
- set ret [$db put $keym DUPLICATE_00]
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
# get dup ordering
@@ -479,11 +541,24 @@ proc test046 { method args } {
#error_check_good \
# dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# restore deleted keys
- error_check_good db_put:1 [$db put $keym $dup_set($i)] 0
- error_check_good db_put:2 [$db put $keym $dup_set([incr i])] 0
- error_check_good db_put:3 [$db put $keym $dup_set([incr i])] 0
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# tested above
@@ -491,7 +566,13 @@ proc test046 { method args } {
error_check_good dbclose [$db close] 0
set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
error_check_good dbopen [is_valid_db $db] TRUE
- error_check_good db_cursor [is_substr [set dbc [$db cursor]] $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -set $keym]
error_check_bad dbc_get:set [llength $ret] 0
@@ -519,7 +600,7 @@ proc test046 { method args } {
set i 0
# use "spam" to prevent a duplicate duplicate.
- set ret [$db put $keym $dup_set($i)spam]
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
error_check_good db_put:before $ret 0
# make sure cursor was maintained
set ret [$dbc get -current]
@@ -530,7 +611,7 @@ proc test046 { method args } {
puts "\t\tTest046.g.2: Insert by key after cursor."
set i [expr $i + 2]
# use "eggs" to prevent a duplicate duplicate
- set ret [$db put $keym $dup_set($i)eggs]
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
error_check_good db_put:after $ret 0
# make sure cursor was maintained
set ret [$dbc get -current]
@@ -559,19 +640,29 @@ proc test046 { method args } {
puts "\t\tTest046.h.2: New db (no dupsort)."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
- set db [berkdb_open \
- -create -dup $omethod -mode 0644 -truncate $testfile.h]
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h]
error_check_good db_open [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
for {set i 0} {$i < $nkeys} {incr i} {
if { $i < 10 } {
- error_check_good db_put [$db put key0$i datum0$i] 0
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
} else {
- error_check_good db_put [$db put key$i datum$i] 0
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
}
if { $i == 0 } {
for {set j 0} {$j < $ndups} {incr j} {
@@ -581,9 +672,11 @@ proc test046 { method args } {
set keyput key$i
}
if { $j < 10 } {
- set ret [$db put $keyput DUP_datum0$j]
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
} else {
- set ret [$db put $keyput DUP_datum$j]
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
}
error_check_good dbput:dup $ret 0
}
@@ -711,6 +804,9 @@ proc test046 { method args } {
puts "\tTest046.i: Cleaning up from test."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest046 complete."
diff --git a/bdb/test/test047.tcl b/bdb/test/test047.tcl
index 9d11cd3db83..61c1d0864c5 100644
--- a/bdb/test/test047.tcl
+++ b/bdb/test/test047.tcl
@@ -1,15 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test047.tcl,v 11.10 2000/08/25 14:21:56 sue Exp $
+# $Id: test047.tcl,v 11.19 2002/08/05 19:23:51 sandstro Exp $
#
-# DB Test 47: test of the SET_RANGE interface to DB->c_get.
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
proc test047 { method args } {
source ./include.tcl
set tstn 047
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method"
@@ -27,6 +29,7 @@ proc test047 { method args } {
puts "\tTest$tstn.a: Create $method database."
set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
@@ -41,27 +44,45 @@ proc test047 { method args } {
set testfile2 test0$tstn.b.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 -dup $args $method"
+ set oflags "-create -mode 0644 -dup $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
set nkeys 20
# Fill page w/ small key/data pairs
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
set i 0
set ret [$dbc get -set_range $key$i]
@@ -77,13 +98,14 @@ proc test047 { method args } {
puts "\tTest$tstn.d: \
Use another cursor to fix item on page, delete by db."
- set dbcurs2 [$db cursor]
- error_check_good db:cursor2 [is_substr $dbcurs2 $db] 1
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
error_check_bad dbc_get(2):set [llength $ret] 0
set curr $ret
- error_check_good db:del [$db del [lindex [lindex $ret 0] 0]] 0
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
# make sure item is gone
set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
@@ -93,6 +115,9 @@ proc test047 { method args } {
puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
error_check_good dbc:close [$dbc close] 0
error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
# open db
@@ -103,27 +128,48 @@ proc test047 { method args } {
puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
for {set i 0} { $i < $nkeys } {incr i} {
# a pair
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set j 0
for {set i 0} { $i < $nkeys } {incr i} {
# a dup set for same 1 key
- set ret [$db put $key$i DUP_$data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest$tstn.g: \
Get dups key w/ SET_RANGE, pin onpage with another cursor."
set i 0
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -set_range $key$i]
error_check_bad dbc_get:set_range [llength $ret] 0
- set dbc2 [$db cursor]
- error_check_good db_cursor2 [is_substr $dbc2 $db] 1
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
set ret2 [$dbc2 get -set_range $key$i]
error_check_bad dbc2_get:set_range [llength $ret] 0
@@ -138,14 +184,13 @@ proc test047 { method args } {
error_check_good dbc_close [$dbc close] 0
error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
set db [eval {berkdb_open} $oflags $testfile2]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
- set dbc2 [$db cursor]
- error_check_good db_cursor2 [is_substr $dbc2 $db] 1
set nkeys 10
set ndups 1000
@@ -153,18 +198,36 @@ proc test047 { method args } {
puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
for {set i 0} { $i < $nkeys } { incr i} {
# a pair
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput $ret 0
# dups for single pair
if { $i == 0} {
for {set j 0} { $j < $ndups } { incr j } {
- set ret [$db put $key$i DUP_$data$i:$j]
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
error_check_good dbput:dup $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
puts "\tTest$tstn.j: \
Get key of first dup with SET_RANGE, fix with 2 curs."
set ret [$dbc get -set_range $key$i]
@@ -186,6 +249,9 @@ proc test047 { method args } {
puts "\tTest$tstn.l: Cleanup."
error_check_good dbc_close [$dbc close] 0
error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test048.tcl b/bdb/test/test048.tcl
index 84c7c47b721..2131f6f553c 100644
--- a/bdb/test/test048.tcl
+++ b/bdb/test/test048.tcl
@@ -1,16 +1,18 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test048.tcl,v 11.11 2000/12/11 17:42:18 sue Exp $
+# $Id: test048.tcl,v 11.18 2002/07/29 20:27:49 sandstro Exp $
#
-# Test048: Cursor stability across btree splits.
+# TEST test048
+# TEST Cursor stability across Btree splits.
proc test048 { method args } {
global errorCode
source ./include.tcl
set tstn 048
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -35,6 +37,7 @@ proc test048 { method args } {
set flags ""
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -46,11 +49,16 @@ proc test048 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $method"
+ set oflags "-create -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -59,20 +67,34 @@ proc test048 { method args } {
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put key000$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# get db ordering, set cursors
puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for {set i 0; set ret [$db get key000$i]} {\
$i < $nkeys && [llength $ret] != 0} {\
incr i; set ret [$db get key000$i]} {
set key_set($i) [lindex [lindex $ret 0] 0]
set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
set dbc_set($i) $dbc
- error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
set ret [$dbc_set($i) get -set $key_set($i)]
error_check_bad dbc_set($i)_get:set [llength $ret] 0
}
@@ -82,18 +104,21 @@ proc test048 { method args } {
puts "\tTest$tstn.d: Add $mkeys pairs to force split."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 100 } {
- set ret [$db put key0$i $data$i]
+ set ret [eval {$db put} $txn {key0$i $data$i}]
} elseif { $i >= 10 } {
- set ret [$db put key00$i $data$i]
+ set ret [eval {$db put} $txn {key00$i $data$i}]
} else {
- set ret [$db put key000$i $data$i]
+ set ret [eval {$db put} $txn {key000$i $data$i}]
}
error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.e: Make sure split happened."
- error_check_bad stat:check-split [is_substr [$db stat] \
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
"{{Internal pages} 0}"] 1
+ }
puts "\tTest$tstn.f: Check to see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
@@ -107,19 +132,18 @@ proc test048 { method args } {
puts "\tTest$tstn.g: Delete added keys to force reverse split."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 100 } {
- error_check_good db_del:$i [$db del key0$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
} elseif { $i >= 10 } {
- error_check_good db_del:$i [$db del key00$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
} else {
- error_check_good db_del:$i [$db del key000$i] 0
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
}
}
- puts "\tTest$tstn.h: Verify reverse split."
- error_check_good stat:check-reverse_split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
-
- puts "\tTest$tstn.i: Verify cursor reference."
+ puts "\tTest$tstn.h: Verify cursor reference."
for {set i 0} { $i < $nkeys } {incr i} {
set ret [$dbc_set($i) get -current]
error_check_bad dbc$i:get:current [llength $ret] 0
@@ -128,11 +152,18 @@ proc test048 { method args } {
error_check_good dbc$i:get(match) $ret $ret2
}
- puts "\tTest$tstn.j: Cleanup."
+ puts "\tTest$tstn.i: Cleanup."
# close cursors
for {set i 0} { $i < $nkeys } {incr i} {
error_check_good dbc_close:$i [$dbc_set($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tstn.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
error_check_good dbclose [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test049.tcl b/bdb/test/test049.tcl
index aaea3b200bf..3040727c469 100644
--- a/bdb/test/test049.tcl
+++ b/bdb/test/test049.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test049.tcl,v 11.15 2000/08/25 14:21:56 sue Exp $
+# $Id: test049.tcl,v 11.21 2002/05/22 15:42:53 sue Exp $
#
-# Test 049: Test of each cursor routine with unitialized cursors
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
proc test049 { method args } {
global errorInfo
global errorCode
@@ -17,7 +18,7 @@ proc test049 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- puts "\tTest$tstn: Test of cursor routines with unitialized cursors."
+ puts "\tTest$tstn: Test of cursor routines with uninitialized cursors."
set key "key"
set data "data"
@@ -30,6 +31,7 @@ proc test049 { method args } {
}
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,34 +43,53 @@ proc test049 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $rflags $omethod $args"
+ set oflags "-create -mode 0644 $rflags $omethod $args"
if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
append oflags " -dup"
}
set db [eval {berkdb_open_noerr} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- set dbc_u [$db cursor]
- error_check_good db:cursor [is_substr $dbc_u $db] 1
-
set nkeys 10
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $key$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
error_check_good dbput:$i $ret 0
if { $i == 1 } {
for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
- set ret [$db put $key$i DUPLICATE$j]
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
error_check_good dbput:dup:$j $ret 0
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
puts "\tTest$tstn.c: Test dbc->get interfaces..."
set i 0
foreach flag { current first last next prev nextdup} {
@@ -112,7 +133,7 @@ proc test049 { method args } {
# now uninitialize cursor
error_check_good dbc_close [$dbc_u close] 0
- set dbc_u [$db cursor]
+ set dbc_u [eval {$db cursor} $txn]
error_check_good \
db_cursor [is_substr $dbc_u $db] 1
}
@@ -154,6 +175,9 @@ proc test049 { method args } {
error_check_good dbc_del [is_substr $errorCode EINVAL] 1
error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test050.tcl b/bdb/test/test050.tcl
index 4a2d8c8fdc0..dfaeddd035c 100644
--- a/bdb/test/test050.tcl
+++ b/bdb/test/test050.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test050.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+# $Id: test050.tcl,v 11.21 2002/05/24 14:15:13 bostic Exp $
#
-# Test050: Overwrite test of small/big key/data with cursor checks for RECNO
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
proc test050 { method args } {
global alphabet
global errorInfo
@@ -30,6 +31,7 @@ proc test050 { method args } {
set flags ""
puts "\tTest$tstn: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,18 +43,19 @@ proc test050 { method args } {
set testfile test0$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $omethod"
+ set oflags "-create -mode 0644 $args $omethod"
set db [eval {berkdb_open_noerr} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
@@ -60,9 +63,26 @@ proc test050 { method args } {
#
puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $i [chop_data $method $data$i]]
- error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
}
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# get db order of keys
for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
@@ -83,8 +103,16 @@ proc test050 { method args } {
puts "\t\tTest$tstn.a.1:\
Insert with uninitialized cursor (should fail)."
error_check_good dbc_close [$dbc close] 0
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
catch {$dbc put -before DATA1} ret
error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
@@ -169,8 +197,8 @@ proc test050 { method args } {
if { [string compare $type by_key] == 0 } {
puts "\t\tTest$tstn.b.$i:\
Overwrite:($pair):$type"
- set ret [$db put \
- 1 OVER$pair$data[lindex $pair 1]]
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
error_check_good dbput:over:($pair) $ret 0
} else {
# This is a cursor overwrite
@@ -185,7 +213,9 @@ proc test050 { method args } {
puts "\tTest$tstn.c: Cleanup and close cursor."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
- puts "\tTest$tstn complete."
}
diff --git a/bdb/test/test051.tcl b/bdb/test/test051.tcl
index 6994526e214..830b7630788 100644
--- a/bdb/test/test051.tcl
+++ b/bdb/test/test051.tcl
@@ -1,17 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test051.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
-#
-# Test51:
-# Test of the fixed recno method.
-# 0. Test various flags (legal and illegal) to open
-# 1. Test partial puts where dlen != size (should fail)
-# 2. Partial puts for existent record -- replaces at beg, mid, and
-# end of record, as well as full replace
+# $Id: test051.tcl,v 11.21 2002/05/24 13:43:24 sue Exp $
#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
proc test051 { method { args "" } } {
global fixed_len
global errorInfo
@@ -28,6 +27,7 @@ proc test051 { method { args "" } } {
}
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -41,19 +41,23 @@ proc test051 { method { args "" } } {
set testfile1 test051a.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args"
+ set oflags "-create -mode 0644 $args"
# Test various flags (legal and illegal) to open
puts "\tTest051.a: Test correct flag behavior on open."
set errorCode NONE
foreach f { "-dup" "-dup -dupsort" "-recnum" } {
puts "\t\tTest051.a: Test flag $f"
- error_check_good dbopen:flagtest:catch \
- [catch {set db \
- [eval {berkdb_open_noerr} $oflags $f $omethod \
- $testfile]} ret] 1
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
error_check_good \
dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
set errorCode NONE
@@ -66,24 +70,28 @@ proc test051 { method { args "" } } {
$db close
} else {
error_check_good \
- dbopen:flagtest:catch [catch {set db [eval \
- {berkdb_open_noerr} $oflags $f \
- $omethod $testfile]} ret] 1
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
error_check_good \
dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
}
-
# Test partial puts where dlen != size (should fail)
# it is an error to specify a partial put w/ different
# dlen and size in fixed length recno/queue
set key 1
set data ""
+ set txn ""
set test_char "a"
set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
puts "\tTest051.b: Partial puts with dlen != size."
foreach dlen { 1 16 20 32 } {
foreach doff { 0 10 20 32 } {
@@ -91,8 +99,8 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
size: [expr $dlen+1]"
set data [repeat $test_char [expr $dlen + 1]]
- error_check_good catch:put 1 [catch {$db \
- put -partial [list $doff $dlen] $key $data} ret]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
#
# We don't get back the server error string just
# the result.
@@ -109,8 +117,8 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
size: [expr $dlen-1]"
set data [repeat $test_char [expr $dlen - 1]]
- error_check_good catch:put 1 [catch {$db \
- put -partial [list $doff $dlen] $key $data} ret]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
if { $eindex == -1 } {
error_check_good "dbput:partial: dlen > size" \
[is_substr $errorInfo "Length improper"] 1
@@ -121,6 +129,9 @@ proc test051 { method { args "" } } {
}
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
$db close
# Partial puts for existent record -- replaces at beg, mid, and
@@ -132,14 +143,24 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.f: First try a put and then a full replace."
set data [repeat "a" $fixed_len]
- set ret [$db put 1 $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
error_check_good dbput $ret 0
- error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
set data [repeat "b" $fixed_len]
- set ret [$db put -partial [list 0 $fixed_len] 1 $data]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
error_check_good dbput $ret 0
- error_check_good dbget $data [lindex [lindex [$db get -recno 1] 0] 1]
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set data "InitialData"
set pdata "PUT"
@@ -154,12 +175,21 @@ proc test051 { method { args "" } } {
puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
foreach doff $offlist {
incr key
- set ret [$db put $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good dbput:init $ret 0
puts "\t\t Test051.g: Replace at offset $doff."
- set ret [$db put -partial [list $doff $dlen] $key $pdata]
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
if { $doff == 0} {
set beg ""
@@ -186,6 +216,4 @@ proc test051 { method { args "" } } {
}
$db close
-
- puts "\tTest051 complete."
}
diff --git a/bdb/test/test052.tcl b/bdb/test/test052.tcl
index 820c99a2bd5..1f386449630 100644
--- a/bdb/test/test052.tcl
+++ b/bdb/test/test052.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test052.tcl,v 11.10 2000/10/06 19:29:52 krinsky Exp $
+# $Id: test052.tcl,v 11.16 2002/07/08 20:48:58 sandstro Exp $
#
-# Test52
-# Renumbering recno test.
+# TEST test052
+# TEST Renumbering record Recno test.
proc test052 { method args } {
global alphabet
global errorInfo
@@ -27,6 +27,7 @@ proc test052 { method args } {
set flags ""
puts "\tTest052: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,27 +39,45 @@ proc test052 { method args } {
set testfile test052.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set oflags "-create -truncate -mode 0644 $args $omethod"
+ set oflags "-create -mode 0644 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- # open curs to db
- set dbc [$db cursor]
- error_check_good db_cursor [is_substr $dbc $db] 1
-
# keep nkeys even
set nkeys 20
# Fill page w/ small key/data pairs
puts "\tTest052: Fill page with $nkeys small key/data pairs."
for { set i 1 } { $i <= $nkeys } { incr i } {
- set ret [$db put $i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
}
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# get db order of keys
for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
@@ -79,7 +98,7 @@ proc test052 { method args } {
# delete by key before current
set i [incr i -1]
- error_check_good db_del:before [$db del $keys($i)] 0
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
# with renumber, current's data should be constant, but key==--key
set i [incr i +1]
error_check_good dbc:data \
@@ -94,7 +113,7 @@ proc test052 { method args } {
error_check_bad dbc:get [llength $ret] 0
error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
$darray([expr $i + 1])
- error_check_good db_del:curr [$db del $keys($i)] 0
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
set ret [$dbc get -current]
# After a delete, cursor should return DB_NOTFOUND.
@@ -114,7 +133,7 @@ proc test052 { method args } {
# should be { keys($nkeys/2), darray($nkeys/2 + 2) }
set i [expr $nkeys/2]
# deleting data for key after current (key $nkeys/2 + 1)
- error_check_good db_del [$db del $keys([expr $i + 1])] 0
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
# current should be constant
set ret [$dbc get -current]
@@ -248,6 +267,9 @@ proc test052 { method args } {
$ret [list [list $keys($i) $darray($i)]]
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest052 complete."
diff --git a/bdb/test/test053.tcl b/bdb/test/test053.tcl
index e3a908c90d8..3e217a2b55f 100644
--- a/bdb/test/test053.tcl
+++ b/bdb/test/test053.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test053.tcl,v 11.12 2000/12/11 17:24:55 sue Exp $
+# $Id: test053.tcl,v 11.18 2002/05/24 15:24:55 sue Exp $
#
-# Test53: test of the DB_REVSPLITOFF flag in the btree and
-# Btree-w-recnum methods
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
proc test053 { method args } {
global alphabet
global errorCode
@@ -31,6 +32,7 @@ proc test053 { method args } {
set flags ""
puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -42,12 +44,17 @@ proc test053 { method args } {
set testfile test053.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
set oflags \
- "-create -truncate -revsplitoff -pagesize 1024 $args $omethod"
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -77,8 +84,16 @@ proc test053 { method args } {
} else {
set key $keyroot$j
}
- set ret [$db put $key $data]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -89,16 +104,29 @@ proc test053 { method args } {
puts "\tTest053.d: Delete all but one key per page."
for {set i 0} { $i < $npages } {incr i } {
for {set j 1} { $j < $nkeys } {incr j } {
- set ret [$db del $key_set($i)0$j]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
puts "\tTest053.e: Check to make sure all pages are still there."
error_check_good page_count:check \
[is_substr [$db stat] "{Leaf pages} $npages"] 1
- set dbc [$db cursor]
- error_check_good db:cursor [is_substr $dbc $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
# walk cursor through tree forward, backward.
# delete one key, repeat
@@ -125,7 +153,7 @@ proc test053 { method args } {
puts "\t\tTest053.f.$i:\
Walk through tree with record numbers."
for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
- set curr [$db get -recno $j]
+ set curr [eval {$db get} $txn {-recno $j}]
error_check_bad \
db_get:recno:$j [llength $curr] 0
error_check_good db_get:recno:keys:$j \
@@ -135,10 +163,10 @@ proc test053 { method args } {
}
puts "\tTest053.g.$i:\
Delete single key ([expr $npages - $i] keys left)."
- set ret [$db del $key_set($i)00]
+ set ret [eval {$db del} $txn {$key_set($i)00}]
error_check_good dbdel $ret 0
error_check_good del:check \
- [llength [$db get $key_set($i)00]] 0
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
}
# end for loop, verify db_notfound
@@ -149,7 +177,7 @@ proc test053 { method args } {
for {set i 0} { $i < $npages} {incr i} {
puts "\tTest053.i.$i:\
Restore single key ([expr $i + 1] keys in tree)."
- set ret [$db put $key_set($i)00 $data]
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
error_check_good dbput $ret 0
puts -nonewline \
@@ -177,7 +205,7 @@ proc test053 { method args } {
puts "\t\tTest053.k.$i:\
Walk through tree with record numbers."
for {set j 1} {$j <= [expr $i + 1]} {incr j} {
- set curr [$db get -recno $j]
+ set curr [eval {$db get} $txn {-recno $j}]
error_check_bad \
db_get:recno:$j [llength $curr] 0
error_check_good db_get:recno:keys:$j \
@@ -188,6 +216,9 @@ proc test053 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "Test053 complete."
diff --git a/bdb/test/test054.tcl b/bdb/test/test054.tcl
index 7308f995645..f53f5a658bf 100644
--- a/bdb/test/test054.tcl
+++ b/bdb/test/test054.tcl
@@ -1,32 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test054.tcl,v 11.15 2000/08/25 14:21:57 sue Exp $
+# $Id: test054.tcl,v 11.23 2002/06/17 18:41:29 sue Exp $
#
-# Test054:
-#
-# This test checks for cursor maintenance in the presence of deletes.
-# There are N different scenarios to tests:
-# 1. No duplicates. Cursor A deletes a key, do a GET for the key.
-# 2. No duplicates. Cursor is positioned right before key K, Delete K,
-# do a next on the cursor.
-# 3. No duplicates. Cursor is positioned on key K, do a regular delete of K.
-# do a current get on K.
-# 4. Repeat 3 but do a next instead of current.
-#
-# 5. Duplicates. Cursor A is on the first item of a duplicate set, A
-# does a delete. Then we do a non-cursor get.
-# 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# do a delete of the entire Key. Test cursor current.
-# 7. Continue last test and try cursor next.
-# 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
-# Cursor B is in the same duplicate set and deletes a different item.
-# Verify that the cursor is in the right place.
-# 9. Cursors A and B are in the place in the same duplicate set. A deletes
-# its item. Do current on B.
-# 10. Continue 8 and do a next on B.
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
proc test054 { method args } {
global errorInfo
source ./include.tcl
@@ -34,7 +34,7 @@ proc test054 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644"
+ append args " -create -mode 0644"
puts "Test054 ($method $args):\
interspersed cursor and normal operations"
if { [is_record_based $method] == 1 } {
@@ -42,18 +42,29 @@ proc test054 { method args } {
return
}
- # Create the database and open the dictionary
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
if { $eindex == -1 } {
- set testfile $testdir/test054.db
+ set testfile $testdir/test054-nodup.db
set env NULL
} else {
- set testfile test054.db
- incr eindex
+ set testfile test054-nodup.db
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -64,15 +75,28 @@ proc test054 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:nodup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
# Retrieve keys sequentially so we can figure out their order
set i 1
for {set d [$curs get -first] } \
@@ -82,7 +106,7 @@ proc test054 { method args } {
incr i
}
- # TEST CASE 1
+ # Test case #1.
puts "\tTest054.a1: Delete w/cursor, regular get"
# Now set the cursor on the middle on.
@@ -94,7 +118,7 @@ proc test054 { method args } {
error_check_good curs_get:DB_SET:data $d datum$key_set(2)
# Now do the delete
- set r [eval {$curs del} $txn]
+ set r [$curs del]
error_check_good curs_del $r 0
# Now do the get
@@ -103,17 +127,33 @@ proc test054 { method args } {
# Free up the cursor.
error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
- # TEST CASE 2
+ # Test case #2.
puts "\tTest054.a2: Cursor before K, delete K, cursor next"
# Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
# Retrieve keys sequentially so we can figure out their order
set i 1
@@ -143,7 +183,7 @@ proc test054 { method args } {
error_check_good curs_get:DB_NEXT:key $k $key_set(3)
error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
- # TEST CASE 3
+ # Test case #3.
puts "\tTest054.a3: Cursor on K, delete K, cursor current"
# delete item 3
@@ -153,18 +193,34 @@ proc test054 { method args } {
set ret [$curs get -current]
error_check_good current_after_del $ret [list [list [] []]]
error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
puts "\tTest054.a4: Cursor on K, delete K, cursor next"
# Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
error_check_good put $r 0
set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# Create the new cursor and put it on 1
set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
set r [$curs get -set $key_set(1)]
error_check_bad cursor_get:DB_SET [llength $r] 0
set k [lindex [lindex $r 0] 0]
@@ -186,6 +242,9 @@ proc test054 { method args } {
# Close cursor
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
# Now get ready for duplicate tests
@@ -197,19 +256,49 @@ proc test054 { method args } {
puts "\tTest054.b: Duplicate Tests"
append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
set i 1
for {set d [$curs get -first] } \
{[llength $d] != 0 } \
@@ -224,7 +313,7 @@ proc test054 { method args } {
error_check_good dup:put $r 0
}
- # TEST CASE 5
+ # Test case #5.
puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
# Now set the cursor on the first of the duplicate set.
@@ -243,7 +332,7 @@ proc test054 { method args } {
set r [eval {$db get} $txn {$key_set(2)}]
error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
- # TEST CASE 6
+ # Test case #6.
puts "\tTest054.b2: Now get the next duplicate from the cursor."
# Now do next on cursor
@@ -254,12 +343,12 @@ proc test054 { method args } {
error_check_good curs_get:DB_NEXT:key $k $key_set(2)
error_check_good curs_get:DB_NEXT:data $d dup_1
- # TEST CASE 3
+ # Test case #3.
puts "\tTest054.b3: Two cursors in set; each delete different items"
# Open a new cursor.
set curs2 [eval {$db cursor} $txn]
- error_check_good curs_open [is_substr $curs2 $db] 1
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
# Set on last of duplicate set.
set r [$curs2 get -set $key_set(3)]
@@ -365,5 +454,8 @@ proc test054 { method args } {
# Close cursor
error_check_good curs_close [$curs close] 0
error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test055.tcl b/bdb/test/test055.tcl
index fc5ce4e98bd..25134dca4be 100644
--- a/bdb/test/test055.tcl
+++ b/bdb/test/test055.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test055.tcl,v 11.11 2000/08/25 14:21:57 sue Exp $
+# $Id: test055.tcl,v 11.16 2002/05/22 15:42:55 sue Exp $
#
-# Test055:
-# This test checks basic cursor operations.
-# There are N different scenarios to tests:
-# 1. (no dups) Set cursor, retrieve current.
-# 2. (no dups) Set cursor, retrieve next.
-# 3. (no dups) Set cursor, retrieve prev.
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
proc test055 { method args } {
global errorInfo
source ./include.tcl
@@ -21,6 +22,7 @@ proc test055 { method args } {
puts "Test055: $method interspersed cursor and normal operations"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -32,6 +34,11 @@ proc test055 { method args } {
set testfile test055.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -39,28 +46,41 @@ proc test055 { method args } {
set txn ""
puts "\tTest055.a: No duplicates"
- set db [eval {berkdb_open -create -truncate -mode 0644 $omethod } \
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
$args {$testfile}]
error_check_good db_open:nodup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:nodup [is_substr $curs $db] 1
-
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
for {set d [$curs get -first] } { [llength $d] != 0 } {\
set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
incr i
}
- # TEST CASE 1
+ # Test case #1.
puts "\tTest055.a1: Set cursor, retrieve current"
# Now set the cursor on the middle on.
@@ -81,7 +101,7 @@ proc test055 { method args } {
error_check_good \
curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
- # TEST CASE 2
+ # Test case #2.
puts "\tTest055.a2: Set cursor, retrieve previous"
set r [$curs get -prev]
error_check_bad cursor_get:DB_PREV [llength $r] 0
@@ -91,10 +111,10 @@ proc test055 { method args } {
error_check_good \
curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
- #TEST CASE 3
+ # Test case #3.
puts "\tTest055.a2: Set cursor, retrieve next"
- # Now set the cursor on the middle on.
+ # Now set the cursor on the middle one.
set r [$curs get -set $key_set(2)]
error_check_bad cursor_get:DB_SET [llength $r] 0
set k [lindex [lindex $r 0] 0]
@@ -114,5 +134,8 @@ proc test055 { method args } {
# Close cursor and database.
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test056.tcl b/bdb/test/test056.tcl
index ade3890c3f9..ef310332ed1 100644
--- a/bdb/test/test056.tcl
+++ b/bdb/test/test056.tcl
@@ -1,12 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test056.tcl,v 11.13 2000/08/25 14:21:57 sue Exp $
+# $Id: test056.tcl,v 11.18 2002/05/22 15:42:55 sue Exp $
#
-# Test056
-# Check if deleting a key when a cursor is on a duplicate of that key works.
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
proc test056 { method args } {
global errorInfo
source ./include.tcl
@@ -14,7 +16,7 @@ proc test056 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644 -dup "
+ append args " -create -mode 0644 -dup "
if { [is_record_based $method] == 1 || [is_rbtree $method] } {
puts "Test056: skipping for method $method"
return
@@ -22,6 +24,7 @@ proc test056 { method args } {
puts "Test056: $method delete of key in presence of cursor"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,6 +36,11 @@ proc test056 { method args } {
set testfile test056.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -42,18 +50,31 @@ proc test056 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good db_open:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
puts "\tTest056.a: Key delete with cursor on duplicate."
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
for {set d [$curs get -first] } { [llength $d] != 0 } {
set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
@@ -141,5 +162,8 @@ proc test056 { method args } {
error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test057.tcl b/bdb/test/test057.tcl
index 1dc350e32a5..04fb09ef260 100644
--- a/bdb/test/test057.tcl
+++ b/bdb/test/test057.tcl
@@ -1,16 +1,17 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test057.tcl,v 11.17 2000/08/25 14:21:57 sue Exp $
+# $Id: test057.tcl,v 11.22 2002/05/22 15:42:56 sue Exp $
#
-# Test057:
-# Check if we handle the case where we delete a key with the cursor on it
-# and then add the same key. The cursor should not get the new item
-# returned, but the item shouldn't disappear.
-# Run test tests, one where the overwriting put is done with a put and
-# one where it's done with a cursor put.
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST Check if we handle the case where we delete a key with the cursor on
+# TEST it and then add the same key. The cursor should not get the new item
+# TEST returned, but the item shouldn't disappear.
+# TEST Run test tests, one where the overwriting put is done with a put and
+# TEST one where it's done with a cursor put.
proc test057 { method args } {
global errorInfo
source ./include.tcl
@@ -18,7 +19,7 @@ proc test057 { method args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
- append args " -create -truncate -mode 0644 -dup "
+ append args " -create -mode 0644 -dup "
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
puts "Test057: skipping for method $method"
return
@@ -26,6 +27,7 @@ proc test057 { method args } {
puts "Test057: $method delete and replace in presence of cursor."
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -37,6 +39,11 @@ proc test057 { method args } {
set testfile test057.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -46,20 +53,33 @@ proc test057 { method args } {
set db [eval {berkdb_open} $args {$omethod $testfile}]
error_check_good dbopen:dup [is_valid_db $db] TRUE
- set curs [eval {$db cursor} $txn]
- error_check_good curs_open:dup [is_substr $curs $db] 1
-
puts "\tTest057.a: Set cursor, delete cursor, put with key."
# Put three keys in the database
for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $flags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
- for {set d [$curs get -first] } {[llength $d] != 0 } {\
- set d [$curs get -next] } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
set key_set($i) [lindex [lindex $d 0] 0]
incr i
}
@@ -108,7 +128,7 @@ proc test057 { method args } {
puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
set curs2 [eval {$db cursor} $txn]
- error_check_good curs2_open [is_substr $curs2 $db] 1
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
# Set both cursors on the 4rd key
set r [$curs get -set $key_set(3)]
@@ -221,5 +241,8 @@ proc test057 { method args } {
error_check_good curs2_close [$curs2 close] 0
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test058.tcl b/bdb/test/test058.tcl
index 00870a6b5f8..daf164fd6e2 100644
--- a/bdb/test/test058.tcl
+++ b/bdb/test/test058.tcl
@@ -1,10 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test058.tcl,v 11.14 2000/08/25 14:21:57 sue Exp $
+# $Id: test058.tcl,v 11.20 2002/02/22 15:26:27 sandstro Exp $
#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
proc test058 { method args } {
source ./include.tcl
@@ -18,6 +20,8 @@ proc test058 { method args } {
return
}
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
set omethod [convert_method $method]
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
@@ -28,12 +32,12 @@ proc test058 { method args } {
# environment
env_cleanup $testdir
- set eflags "-create -txn -home $testdir"
- set env [eval {berkdb env} $eflags]
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags]
error_check_good env [is_valid_env $env] TRUE
# db open
- set flags "-create -mode 0644 -dup -env $env $args"
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
set db [eval {berkdb_open} $flags $omethod "test058.db"]
error_check_good dbopen [is_valid_db $db] TRUE
diff --git a/bdb/test/test059.tcl b/bdb/test/test059.tcl
index f9988c4e20b..596ea7a3c94 100644
--- a/bdb/test/test059.tcl
+++ b/bdb/test/test059.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test059.tcl,v 11.12 2000/08/25 14:21:57 sue Exp $
-#
-# Test059:
-# Make sure that we handle retrieves of zero-length data items correctly.
-# The following ops, should allow a partial data retrieve of 0-length.
-# db_get
-# db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+# $Id: test059.tcl,v 11.18 2002/06/11 15:10:16 sue Exp $
#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
proc test059 { method args } {
source ./include.tcl
@@ -20,6 +20,7 @@ proc test059 { method args } {
puts "Test059: $method 0-length partial data retrieval"
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,6 +32,11 @@ proc test059 { method args } {
set testfile test059.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -44,20 +50,33 @@ proc test059 { method args } {
}
puts "\tTest059.a: Populate a database"
- set oflags "-create -truncate -mode 0644 $omethod $args $testfile"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
set db [eval {berkdb_open} $oflags]
error_check_good db_create [is_substr $db db] 1
# Put ten keys in the database
for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set r [eval {$db put} $txn $pflags {$key datum$key}]
error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Retrieve keys sequentially so we can figure out their order
set i 1
- set curs [$db cursor]
- error_check_good db_curs [is_substr $curs $db] 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
for {set d [$curs get -first] } { [llength $d] != 0 } {
set d [$curs get -next] } {
@@ -68,7 +87,7 @@ proc test059 { method args } {
puts "\tTest059.a: db get with 0 partial length retrieve"
# Now set the cursor on the middle one.
- set ret [eval {$db get -partial {0 0}} $gflags {$key_set(5)}]
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
error_check_bad db_get_0 [llength $ret] 0
puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
@@ -124,5 +143,8 @@ proc test059 { method args } {
}
error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test060.tcl b/bdb/test/test060.tcl
index 7f7cc71f00b..4a18c97f42f 100644
--- a/bdb/test/test060.tcl
+++ b/bdb/test/test060.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test060.tcl,v 11.6 2000/08/25 14:21:57 sue Exp $
+# $Id: test060.tcl,v 11.10 2002/05/22 15:42:56 sue Exp $
#
-# Test060: Test of the DB_EXCL flag to DB->open.
-# 1) Attempt to open and create a nonexistent database; verify success.
-# 2) Attempt to reopen it; verify failure.
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
proc test060 { method args } {
global errorCode
source ./include.tcl
@@ -18,6 +19,7 @@ proc test060 { method args } {
puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
# Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -29,6 +31,11 @@ proc test060 { method args } {
set testfile test060.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
diff --git a/bdb/test/test061.tcl b/bdb/test/test061.tcl
index c3187268e39..65544e88deb 100644
--- a/bdb/test/test061.tcl
+++ b/bdb/test/test061.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test061.tcl,v 11.12 2000/10/27 13:23:56 sue Exp $
+# $Id: test061.tcl,v 11.18 2002/02/22 15:26:27 sandstro Exp $
#
-# Test061: Test of transaction abort and commit for in-memory databases.
-# a) Put + abort: verify absence of data
-# b) Put + commit: verify presence of data
-# c) Overwrite + abort: verify that data is unchanged
-# d) Overwrite + commit: verify that data has changed
-# e) Delete + abort: verify that data is still present
-# f) Delete + commit: verify that data has been deleted
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
proc test061 { method args } {
global alphabet
+ global encrypt
global errorCode
+ global passwd
source ./include.tcl
#
@@ -32,6 +35,8 @@ proc test061 { method args } {
puts "Test061 skipping for method $method"
return
}
+ set encargs ""
+ set args [split_encargs $args encargs]
puts "Test061: Transaction abort and commit test for in-memory data."
puts "Test061: $method $args"
@@ -52,12 +57,12 @@ proc test061 { method args } {
env_cleanup $testdir
# create environment
- set eflags "-create -txn -home $testdir"
- set dbenv [eval {berkdb env} $eflags]
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags]
error_check_good dbenv [is_valid_env $dbenv] TRUE
# db open -- no file specified, in-memory database
- set flags "-create $args $omethod"
+ set flags "-auto_commit -create $args $omethod"
set db [eval {berkdb_open -env} $dbenv $flags]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -202,14 +207,20 @@ proc test061 { method args } {
error_check_good env_close [eval {$dbenv close}] 0
# Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
puts "\tTest061.g: Running db_recover -h"
- set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
if { $ret != 0 } {
puts "FAIL: db_recover outputted $res"
}
error_check_good db_recover $ret 0
puts "\tTest061.h: Running db_recover -c -h"
- set ret [catch {exec $util_path/db_recover -c -h $testdir} res]
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
error_check_good db_recover-c $ret 0
}
diff --git a/bdb/test/test062.tcl b/bdb/test/test062.tcl
index 43a5e1d3939..5cacd98a2c0 100644
--- a/bdb/test/test062.tcl
+++ b/bdb/test/test062.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test062.tcl,v 11.13 2000/12/20 19:02:36 sue Exp $
+# $Id: test062.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
#
-# DB Test 62: Test of partial puts onto duplicate pages.
-# Insert the first 200 words into the dictionary 200 times each with
-# self as key and <random letter>:self as data. Use partial puts to
-# append self again to data; verify correctness.
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
global alphabet
global rand_init
@@ -19,7 +20,12 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
# Create the database and open the dictionary
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -31,16 +37,25 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
puts "Test0$tnum:\
- $method ($args) Partial puts and duplicates."
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "Test0$tnum skipping for method $omethod"
- return
- }
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod -dup} $args {$testfile} ]
error_check_good dbopen [is_valid_db $db] TRUE
set did [open $dict]
@@ -52,25 +67,35 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
# Here is the loop where we put each key/data pair
puts "\tTest0$tnum.a: Put loop (initialize database)"
- set dbc [eval {$db cursor} $txn]
- error_check_good cursor_open [is_substr $dbc $db] 1
while { [gets $did str] != -1 && $count < $nentries } {
for { set i 1 } { $i <= $ndups } { incr i } {
set pref \
[string index $alphabet [berkdb random_int 0 25]]
set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set ret [eval {$db put} \
$txn $pflags {$str [chop_data $method $datastr]}]
error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
set keys($count) $str
incr count
}
- error_check_good cursor_close [$dbc close] 0
close $did
puts "\tTest0$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
set dbc [eval {$db cursor} $txn]
error_check_good cursor_open [is_substr $dbc $db] 1
@@ -91,21 +116,21 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
set doff [expr [string length $d] + 2]
set dlen 0
error_check_good data_and_key_sanity $d $k
-
+
set ret [$dbc get -current]
error_check_good before_sanity \
[lindex [lindex $ret 0] 0] \
[string range [lindex [lindex $ret 0] 1] 2 end]
-
+
error_check_good partial_put [eval {$dbc put -current \
-partial [list $doff $dlen] $d}] 0
-
+
set ret [$dbc get -current]
error_check_good partial_put_correct \
[lindex [lindex $ret 0] 1] $orig_d$d
}
}
-
+
puts "\tTest0$tnum.c: Double-checking get loop."
# Double-check that each datum in the regular db has
# been appropriately modified.
@@ -121,5 +146,8 @@ proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test063.tcl b/bdb/test/test063.tcl
index 2b9c4c4c763..2e8726c8f96 100644
--- a/bdb/test/test063.tcl
+++ b/bdb/test/test063.tcl
@@ -1,13 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test063.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+# $Id: test063.tcl,v 11.17 2002/05/24 15:24:55 sue Exp $
#
-# DB Test 63: Test that the DB_RDONLY flag is respected.
-# Attempt to both DB->put and DBC->c_put into a database
-# that has been opened DB_RDONLY, and check for failure.
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
proc test063 { method args } {
global errorCode
source ./include.tcl
@@ -16,6 +17,7 @@ proc test063 { method args } {
set omethod [convert_method $method]
set tnum 63
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test063 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -36,6 +43,7 @@ proc test063 { method args } {
set data2 "more_data"
set gflags ""
+ set txn ""
if { [is_record_based $method] == 1 } {
set key "1"
@@ -47,18 +55,26 @@ proc test063 { method args } {
# Create a test database.
puts "\tTest0$tnum.a: Creating test database."
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_create [is_valid_db $db] TRUE
# Put and get an item so it's nonempty.
- set ret [eval {$db put} $key [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
error_check_good initial_put $ret 0
- set dbt [eval {$db get} $gflags $key]
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good initial_get $dbt \
[list [list $key [pad_data $method $data]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
if { $eindex == -1 } {
@@ -74,19 +90,33 @@ proc test063 { method args } {
set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
- set dbt [eval {$db get} $gflags $key]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good db_get $dbt \
[list [list $key [pad_data $method $data]]]
- set ret [catch {eval {$db put} $key2 [chop_data $method $data]} res]
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
error_check_good put_failed $ret 1
error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set errorCode "NONE"
puts "\tTest0$tnum.c: Attempting cursor put."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
error_check_good cursor_set [$dbc get -first] $dbt
@@ -94,17 +124,17 @@ proc test063 { method args } {
error_check_good c_put_failed $ret 1
error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
- set dbt [eval {$db get} $gflags $key2]
+ set dbt [eval {$db get} $gflags {$key2}]
error_check_good db_get_key2 $dbt ""
puts "\tTest0$tnum.d: Attempting ordinary delete."
set errorCode "NONE"
- set ret [catch {eval {$db del} $key} 1]
+ set ret [catch {eval {$db del} $txn {$key}} 1]
error_check_good del_failed $ret 1
error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
- set dbt [eval {$db get} $gflags $key]
+ set dbt [eval {$db get} $txn $gflags {$key}]
error_check_good db_get_key $dbt \
[list [list $key [pad_data $method $data]]]
@@ -124,6 +154,9 @@ proc test063 { method args } {
puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
set db [eval {berkdb_open} $omethod $args $testfile]
diff --git a/bdb/test/test064.tcl b/bdb/test/test064.tcl
index ad39f4b2256..c306b0d9d46 100644
--- a/bdb/test/test064.tcl
+++ b/bdb/test/test064.tcl
@@ -1,14 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test064.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+# $Id: test064.tcl,v 11.13 2002/05/22 15:42:57 sue Exp $
#
-# DB Test 64: Test of DB->get_type
-# Create a database of type specified by method.
-# Make sure DB->get_type returns the right thing with both a
-# normal and DB_UNKNOWN open.
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
proc test064 { method args } {
source ./include.tcl
@@ -16,6 +17,7 @@ proc test064 { method args } {
set omethod [convert_method $method]
set tnum 64
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -27,6 +29,11 @@ proc test064 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -34,7 +41,7 @@ proc test064 { method args } {
# Create a test database.
puts "\tTest0$tnum.a: Creating test database of type $method."
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_create [is_valid_db $db] TRUE
diff --git a/bdb/test/test065.tcl b/bdb/test/test065.tcl
index 5f236ebbd04..ea29b4d2db7 100644
--- a/bdb/test/test065.tcl
+++ b/bdb/test/test065.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test065.tcl,v 11.8 2000/08/25 14:21:58 sue Exp $
+# $Id: test065.tcl,v 11.16 2002/08/22 18:18:50 sandstro Exp $
#
-# DB Test 65: Test of DB->stat(DB_RECORDCOUNT)
+# TEST test065
+# TEST Test of DB->stat(DB_FASTSTAT)
proc test065 { method args } {
source ./include.tcl
global errorCode
global alphabet
+ set nentries 10000
set args [convert_args $method $args]
set omethod [convert_method $method]
set tnum 65
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -26,37 +29,48 @@ proc test065 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- puts "Test0$tnum: $method ($args) DB->stat(DB_RECORDCOUNT) test."
+ puts "Test0$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
puts "\tTest0$tnum.a: Create database and check it while empty."
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
$omethod $args $testfile]
error_check_good db_open [is_valid_db $db] TRUE
- set ret [catch {eval $db stat -recordcount} res]
+ set ret [catch {eval $db stat -faststat} res]
error_check_good db_close [$db close] 0
if { ([is_record_based $method] && ![is_queue $method]) \
|| [is_rbtree $method] } {
- error_check_good recordcount_ok [lindex [lindex $res 0] 1] 0
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
} else {
- error_check_good \
- recordcount_notok [is_substr $errorCode "EINVAL"] 1
puts "\tTest0$tnum: Test complete for method $method."
return
}
# If we've got this far, we're on an access method for
- # which DB_RECORDCOUNT makes sense. Thus, we no longer
+ # which record counts makes sense. Thus, we no longer
# catch EINVALs, and no longer care about __db_errs.
set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
- puts "\tTest0$tnum.b: put 10000 keys."
+ puts "\tTest0$tnum.b: put $nentries keys."
if { [is_record_based $method] } {
set gflags " -recno "
@@ -66,80 +80,119 @@ proc test065 { method args } {
set keypfx "key"
}
+ set txn ""
set data [pad_data $method $alphabet]
- for { set ndx 1 } { $ndx <= 10000 } { incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
- error_check_good \
- recordcount_after_puts [lindex [lindex $ret 0] 1] 10000
-
- puts "\tTest0$tnum.c: delete 9000 keys."
- for { set ndx 1 } { $ndx <= 9000 } { incr ndx } {
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest0$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
if { [is_rrecno $method] == 1 } {
# if we're renumbering, when we hit key 5001 we'll
# have deleted 5000 and we'll croak! So delete key
# 1, repeatedly.
- set ret [eval {$db del} [concat $keypfx 1]]
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
} else {
- set ret [eval {$db del} $keypfx$ndx]
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
}
error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
- # We allow renumbering--thus the stat should return 1000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 1000
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
} else {
# No renumbering--no change in RECORDCOUNT!
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
}
- puts "\tTest0$tnum.d: put 8000 new keys at the beginning."
- for { set ndx 1 } { $ndx <= 8000 } {incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ puts "\tTest0$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rrecno $method] == 1 } {
- # With renumbering we're back up to 8000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 8000
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
} elseif { [is_rbtree $method] == 1 } {
- # Total records in a btree is now 9000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 9000
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
} else {
# No renumbering--still no change in RECORDCOUNT.
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 10000
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
}
- puts "\tTest0$tnum.e: put 8000 new keys off the end."
- for { set ndx 9001 } { $ndx <= 17000 } {incr ndx } {
- set ret [eval {$db put} $keypfx$ndx $data]
+ puts "\tTest0$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
- set ret [$db stat -recordcount]
+ set ret [$db stat -faststat]
if { [is_rbtree $method] != 1 } {
- # If this is a recno database, the record count should
- # be up to 17000, the largest number we've seen, with
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
# or without renumbering.
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 17000
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
} else {
- # In an rbtree, 1000 of those keys were overwrites,
- # so there are 7000 new keys + 9000 old keys == 16000
- error_check_good \
- recordcount_after_dels [lindex [lindex $ret 0] 1] 16000
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
}
error_check_good db_close [$db close] 0
diff --git a/bdb/test/test066.tcl b/bdb/test/test066.tcl
index 591c51a4c87..13d0894dcae 100644
--- a/bdb/test/test066.tcl
+++ b/bdb/test/test066.tcl
@@ -1,12 +1,15 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test066.tcl,v 11.7 2000/08/25 14:21:58 sue Exp $
+# $Id: test066.tcl,v 11.12 2002/05/24 15:24:56 sue Exp $
#
-# DB Test 66: Make sure a cursor put to DB_CURRENT acts as an overwrite in
-# a database with duplicates
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
proc test066 { method args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
@@ -22,6 +25,7 @@ proc test066 { method args } {
source ./include.tcl
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -33,9 +37,15 @@ proc test066 { method args } {
set testfile test066.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
+ set txn ""
set key "test"
set data "olddata"
@@ -43,10 +53,23 @@ proc test066 { method args } {
$testfile]
error_check_good db_open [is_valid_db $db] TRUE
- set ret [eval {$db put} $key [chop_data $method $data]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set ret [$dbc get -first]
@@ -67,6 +90,9 @@ proc test066 { method args } {
error_check_good db_get_next $ret ""
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
puts "\tTest0$tnum: Test completed successfully."
diff --git a/bdb/test/test067.tcl b/bdb/test/test067.tcl
index c287d7b1ec5..5f5a88c4be1 100644
--- a/bdb/test/test067.tcl
+++ b/bdb/test/test067.tcl
@@ -1,26 +1,32 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test067.tcl,v 11.12 2000/08/25 14:21:58 sue Exp $
+# $Id: test067.tcl,v 11.19 2002/06/11 15:19:16 sue Exp $
#
-# DB Test 67: Test of DB_CURRENT partial puts on almost-empty duplicate pages.
-# This test was written to address the following issue, #2 in the list of
-# issues relating to bug #0820:
-# 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
-# In Btree, the DB_CURRENT overwrite of off-page duplicate records
-# first deletes the record and then puts the new one -- this could
-# be a problem if the removal of the record causes a reverse split.
-# Suggested solution is to acquire a cursor to lock down the current
-# record, put a new record after that record, and then delete using
-# the held cursor.
-# It also tests the following, #5 in the same list of issues:
-# 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL set,
-# duplicate comparison routine specified.
-# The partial change does not change how data items sort, but the
-# record to be put isn't built yet, and that record supplied is the
-# one that's checked for ordering compatibility.
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
proc test067 { method {ndups 1000} {tnum 67} args } {
source ./include.tcl
global alphabet
@@ -29,6 +35,12 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
set args [convert_args $method $args]
set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+ set txn ""
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
# If we are using an env, then testfile should just be the db name.
@@ -40,18 +52,31 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
}
puts "Test0$tnum:\
$method ($args) Partial puts on near-empty duplicate pages."
- if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
- puts "\tTest0$tnum: skipping for method $method."
- return
- }
foreach dupopt { "-dup" "-dup -dupsort" } {
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644 \
+ set db [eval {berkdb_open -create -mode 0644 \
$omethod} $args $dupopt {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
@@ -62,9 +87,17 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
set data $alphabet$ndx
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
# No need for pad_data since we're skipping recno.
- set ret [eval {$db put} $key $data]
+ set ret [eval {$db put} $txn {$key $data}]
error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# Sync so we can inspect database if the next section bombs.
@@ -72,7 +105,12 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
puts "\tTest0$tnum.b ($dupopt):\
Deleting dups (last first), overwriting each."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
set count 0
@@ -109,6 +147,9 @@ proc test067 { method {ndups 1000} {tnum 67} args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test068.tcl b/bdb/test/test068.tcl
index 587cd207890..31f4272ba55 100644
--- a/bdb/test/test068.tcl
+++ b/bdb/test/test068.tcl
@@ -1,28 +1,30 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test068.tcl,v 11.11 2000/08/25 14:21:58 sue Exp $
+# $Id: test068.tcl,v 11.17 2002/06/11 15:34:47 sue Exp $
#
-# DB Test 68: Test of DB_BEFORE and DB_AFTER and partial puts.
-# Make sure DB_BEFORE and DB_AFTER work properly with partial puts,
-# and check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
proc test068 { method args } {
source ./include.tcl
global alphabet
global errorCode
set tnum 68
- set nkeys 1000
set args [convert_args $method $args]
set omethod [convert_method $method]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set nkeys 1000
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
set env NULL
@@ -30,6 +32,12 @@ proc test068 { method args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
}
puts "Test0$tnum:\
@@ -41,6 +49,7 @@ proc test068 { method args } {
# Create a list of $nkeys words to insert into db.
puts "\tTest0$tnum.a: Initialize word list."
+ set txn ""
set wordlist {}
set count 0
set did [open $dict]
@@ -62,14 +71,30 @@ proc test068 { method args } {
}
foreach dupopt $dupoptlist {
+ #
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open_noerr -create -truncate -mode 0644 \
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
$omethod} $args $dupopt {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
foreach word $wordlist {
- error_check_good db_put [$db put $word $word] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest0$tnum.c ($dupopt): get loop."
@@ -82,7 +107,12 @@ proc test068 { method args } {
error_check_good get_key [list [list $word $word]] $dbt
}
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
@@ -116,6 +146,10 @@ proc test068 { method args } {
puts "\tTest0$tnum ($dupopt): Correct error returns,\
skipping further test."
# continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
continue
}
@@ -143,11 +177,19 @@ proc test068 { method args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
eval $db sync
puts "\tTest0$tnum.g ($dupopt): Verify correctness."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# loop through the whole db beginning to end,
@@ -176,6 +218,9 @@ proc test068 { method args } {
incr count
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test069.tcl b/bdb/test/test069.tcl
index f3b839de7f9..d986c861358 100644
--- a/bdb/test/test069.tcl
+++ b/bdb/test/test069.tcl
@@ -1,14 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test069.tcl,v 11.4 2000/02/14 03:00:21 bostic Exp $
+# $Id: test069.tcl,v 11.7 2002/01/11 15:53:52 bostic Exp $
#
-# DB Test 69: Run DB Test 67 with a small number of dups,
-# to ensure that partial puts to DB_CURRENT work correctly in
-# the absence of duplicate pages.
-
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
proc test069 { method {ndups 50} {tnum 69} args } {
eval test067 $method $ndups $tnum $args
}
diff --git a/bdb/test/test070.tcl b/bdb/test/test070.tcl
index befec9ce1e9..986fd079589 100644
--- a/bdb/test/test070.tcl
+++ b/bdb/test/test070.tcl
@@ -1,19 +1,22 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test070.tcl,v 11.18 2000/12/18 20:04:47 sue Exp $
+# $Id: test070.tcl,v 11.27 2002/09/05 17:23:07 sandstro Exp $
#
-# DB Test 70: Test of DB_CONSUME.
-# Fork off six processes, four consumers and two producers.
-# The producers will each put 20000 records into a queue;
-# the consumers will each get 10000.
-# Then, verify that no record was lost or retrieved twice.
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
proc test070 { method {nconsumers 4} {nproducers 2} \
{nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
source ./include.tcl
global alphabet
+ global encrypt
#
# If we are using an env, then skip this test. It needs its own.
@@ -26,6 +29,10 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
}
set omethod [convert_method $method]
set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test0$tnum skipping for security"
+ return
+ }
puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
puts "\tUsing $txn environment."
@@ -42,7 +49,7 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set testfile test0$tnum.db
# Create environment
- set dbenv [eval {berkdb env -create $txn -home } $testdir]
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir]
error_check_good dbenv_create [is_valid_env $dbenv] TRUE
# Create database
@@ -86,7 +93,7 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
}
# Wait for all children.
- watch_procs 10
+ watch_procs $pidlist 10
# Verify: slurp all record numbers into list, sort, and make
# sure each appears exactly once.
@@ -96,6 +103,12 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set input $consumerlog$ndx
set iid [open $input r]
while { [gets $iid str] != -1 } {
+ # Convert high ints to negative ints, to
+ # simulate Tcl's behavior on a 32-bit machine
+ # even if we're on a 64-bit one.
+ if { $str > 0x7fffffff } {
+ set str [expr $str - 1 - 0xffffffff]
+ }
lappend reclist $str
}
close $iid
@@ -104,16 +117,25 @@ proc test070 { method {nconsumers 4} {nproducers 2} \
set nitems [expr $start + $nitems]
for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Convert high ints to negative ints, to simulate
+ # 32-bit behavior on 64-bit platforms.
+ if { $ndx > 0x7fffffff } {
+ set cmp [expr $ndx - 1 - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
# Skip 0 if we are wrapping around
- if { $ndx == 0 } {
+ if { $cmp == 0 } {
incr ndx
incr nitems
+ incr cmp
}
# Be sure to convert ndx to a number before comparing.
- error_check_good pop_num [lindex $sortreclist 0] [expr $ndx + 0]
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
set sortreclist [lreplace $sortreclist 0 0]
}
error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
error_check_good dbenv_close [$dbenv close] 0
puts "\tTest0$tnum completed successfully."
diff --git a/bdb/test/test071.tcl b/bdb/test/test071.tcl
index 376c902ec4d..3f2604022f1 100644
--- a/bdb/test/test071.tcl
+++ b/bdb/test/test071.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test071.tcl,v 11.6 2000/12/01 04:28:36 ubell Exp $
+# $Id: test071.tcl,v 11.9 2002/01/11 15:53:53 bostic Exp $
#
-# DB Test 71: Test of DB_CONSUME.
-# This is DB Test 70, with one consumer, one producers, and 10000 items.
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
proc test071 { method {nconsumers 1} {nproducers 1}\
{nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
diff --git a/bdb/test/test072.tcl b/bdb/test/test072.tcl
index 3ca7415a2cb..3c08f93975d 100644
--- a/bdb/test/test072.tcl
+++ b/bdb/test/test072.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test072.tcl,v 11.13 2000/12/11 17:24:55 sue Exp $
+# $Id: test072.tcl,v 11.27 2002/07/01 15:40:48 krinsky Exp $
#
-# DB Test 72: Test of cursor stability when duplicates are moved off-page.
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
source ./include.tcl
global alphabet
@@ -13,6 +14,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -24,6 +26,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -37,8 +44,6 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
set predatum "1234567890"
set postdatum "0987654321"
- append args " -pagesize $pagesize "
-
puts -nonewline "Test0$tnum $omethod ($args): "
if { [is_record_based $method] || [is_rbtree $method] } {
puts "Skipping for method $method."
@@ -53,57 +58,73 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
return
}
- foreach dupopt { "-dup" "-dup -dupsort" } {
- set db [eval {berkdb_open -create -truncate -mode 0644} \
- $omethod $args $dupopt $testfile]
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ incr testid
+ set duptestfile $testfile$testid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
error_check_good "db open" [is_valid_db $db] TRUE
puts \
"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
- error_check_good pre_put [$db put $prekey $predatum] 0
- error_check_good post_put [$db put $postkey $postdatum] 0
- set precursor [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
error_check_good precursor [is_valid_cursor $precursor \
$db] TRUE
- set postcursor [$db cursor]
+ set postcursor [eval {$db cursor} $txn]
error_check_good postcursor [is_valid_cursor $postcursor \
$db] TRUE
error_check_good preset [$precursor get -set $prekey] \
[list [list $prekey $predatum]]
error_check_good postset [$postcursor get -set $postkey] \
[list [list $postkey $postdatum]]
-
+
puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
-
+
for { set i 0 } { $i < $ndups } { incr i } {
set datum [format "%4d$alphabet" [expr $i + 1000]]
set data($i) $datum
-
+
# Uncomment these lines to see intermediate steps.
- error_check_good db_sync($i) [$db sync] 0
- error_check_good db_dump($i) \
- [catch {exec $util_path/db_dump \
- -da $testfile > TESTDIR/out.$i}] 0
-
- error_check_good "db put ($i)" [$db put $key $datum] 0
-
- set dbc($i) [$db cursor]
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
-
+
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
-
+
for { set j 0 } { $j < $i } { incr j } {
set dbt [$dbc($j) get -current]
set k [lindex [lindex $dbt 0] 0]
set d [lindex [lindex $dbt 0] 1]
-
+
#puts "cursor $j after $i: $d"
-
+
eval {$db sync}
-
+
error_check_good\
"cursor $j key correctness after $i puts" \
$k $key
@@ -111,8 +132,8 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"cursor $j data correctness after $i puts" \
$d $data($j)
}
-
- # Check correctness of pre- and post- cursors. Do an
+
+ # Check correctness of pre- and post- cursors. Do an
# error_check_good on the lengths first so that we don't
# spew garbage as the "got" field and screw up our
# terminal. (It's happened here.)
@@ -121,7 +142,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
+ [string length $prekey]
error_check_good \
"data earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 1]] \
@@ -129,12 +150,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
+ [string length $postkey]
error_check_good \
"data later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 1]]\
[string length $postdatum]
-
error_check_good \
"earlier cursor correctness after $i puts" \
@@ -143,38 +163,40 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"later cursor correctness after $i puts" \
$post_dbt [list [list $postkey $postdatum]]
}
-
+
puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
set end [expr $ndups * 2 - 1]
- for { set i $end } { $i > $ndups } { set i [expr $i - 1] } {
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
set datum [format "%4d$alphabet" [expr $i + 1000]]
set data($i) $datum
-
+
# Uncomment these lines to see intermediate steps.
- error_check_good db_sync($i) [$db sync] 0
- error_check_good db_dump($i) \
- [catch {exec $util_path/db_dump \
- -da $testfile > TESTDIR/out.$i}] 0
-
- error_check_good "db put ($i)" [$db put $key $datum] 0
-
- set dbc($i) [$db cursor]
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
-
+
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
-
+
for { set j $i } { $j < $end } { incr j } {
set dbt [$dbc($j) get -current]
set k [lindex [lindex $dbt 0] 0]
set d [lindex [lindex $dbt 0] 1]
-
+
#puts "cursor $j after $i: $d"
-
+
eval {$db sync}
-
+
error_check_good\
"cursor $j key correctness after $i puts" \
$k $key
@@ -182,8 +204,8 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
"cursor $j data correctness after $i puts" \
$d $data($j)
}
-
- # Check correctness of pre- and post- cursors. Do an
+
+ # Check correctness of pre- and post- cursors. Do an
# error_check_good on the lengths first so that we don't
# spew garbage as the "got" field and screw up our
# terminal. (It's happened here.)
@@ -192,7 +214,7 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 0]] \
- [string length $prekey]
+ [string length $prekey]
error_check_good \
"data earlier cursor correctness after $i puts" \
[string length [lindex [lindex $pre_dbt 0] 1]] \
@@ -200,12 +222,11 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
error_check_good \
"key later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 0]] \
- [string length $postkey]
+ [string length $postkey]
error_check_good \
"data later cursor correctness after $i puts" \
[string length [lindex [lindex $post_dbt 0] 1]]\
[string length $postdatum]
-
error_check_good \
"earlier cursor correctness after $i puts" \
@@ -217,9 +238,15 @@ proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
# Close cursors.
puts "\tTest0$tnum.d: Closing cursors."
- for { set i 0 } { $i < $ndups } { incr i } {
+ for { set i 0 } { $i <= $end } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
}
}
diff --git a/bdb/test/test073.tcl b/bdb/test/test073.tcl
index 12a48b0e412..02a0f3b0d19 100644
--- a/bdb/test/test073.tcl
+++ b/bdb/test/test073.tcl
@@ -1,25 +1,27 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test073.tcl,v 11.17 2000/12/11 17:24:55 sue Exp $
+# $Id: test073.tcl,v 11.23 2002/05/22 15:42:59 sue Exp $
#
-# DB Test 73: Test of cursor stability on duplicate pages.
-# Does the following:
-# a. Initialize things by DB->putting ndups dups and
-# setting a reference cursor to point to each.
-# b. c_put ndups dups (and correspondingly expanding
-# the set of reference cursors) after the last one, making sure
-# after each step that all the reference cursors still point to
-# the right item.
-# c. Ditto, but before the first one.
-# d. Ditto, but after each one in sequence first to last.
-# e. Ditto, but after each one in sequence from last to first.
-# occur relative to the new datum)
-# f. Ditto for the two sequence tests, only doing a
-# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# new one.
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
source ./include.tcl
global alphabet
@@ -27,6 +29,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -38,11 +41,16 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
set key "the key"
-
+ set txn ""
puts -nonewline "Test0$tnum $omethod ($args): "
if { [is_record_based $method] || [is_rbtree $method] } {
@@ -60,7 +68,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
append args " -pagesize $pagesize -dup"
set db [eval {berkdb_open \
- -create -truncate -mode 0644} $omethod $args $testfile]
+ -create -mode 0644} $omethod $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
# Number of outstanding keys.
@@ -71,17 +79,31 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $ndups } { incr i } {
set datum [makedatum_t73 $i 0]
- error_check_good "db put ($i)" [$db put $key $datum] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
set is_long($i) 0
incr keys
}
puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
for { set i 0 } { $i < $keys } { incr i } {
set datum [makedatum_t73 $i 0]
- set dbc($i) [$db cursor]
+ set dbc($i) [eval {$db cursor} $txn]
error_check_good "db cursor ($i)"\
[is_valid_cursor $dbc($i) $db] TRUE
error_check_good "dbc get -get_both ($i)"\
@@ -97,7 +119,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
# to be added (since they start from zero)
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
error_check_good "c_put(DB_KEYLAST, $keys)"\
@@ -118,7 +140,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
# to be added (since they start from zero)
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
error_check_good "c_put(DB_KEYFIRST, $keys)"\
@@ -138,7 +160,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
set keysnow $keys
for { set i 0 } { $i < $keysnow } { incr i } {
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -162,7 +184,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
set datum [makedatum_t73 $keys 0]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -190,7 +212,7 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $keysnow } { incr i } {
set olddatum [makedatum_t73 $i 0]
set newdatum [makedatum_t73 $i 1]
- set curs [$db cursor]
+ set curs [eval {$db cursor} $txn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
TRUE
@@ -215,6 +237,9 @@ proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
for { set i 0 } { $i < $keys } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
}
diff --git a/bdb/test/test074.tcl b/bdb/test/test074.tcl
index ddc5f16429d..7f620db2d97 100644
--- a/bdb/test/test074.tcl
+++ b/bdb/test/test074.tcl
@@ -1,12 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test074.tcl,v 11.10 2000/08/25 14:21:58 sue Exp $
+# $Id: test074.tcl,v 11.17 2002/05/24 15:24:56 sue Exp $
#
-# DB Test 74: Test of DB_NEXT_NODUP.
-proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} args } {
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum 74} args } {
source ./include.tcl
global alphabet
global rand_init
@@ -31,6 +32,7 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
puts "\tTest0$tnum.a: No duplicates."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -42,11 +44,17 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set testfile test0$tnum-nodup.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod\
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
$args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
# Insert nitems items.
puts "\t\tTest0$tnum.a.1: Put loop."
@@ -61,14 +69,28 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set key "key$i"
}
set data "$globaldata$i"
- error_check_good put($i) [$db put $key\
- [chop_data $method $data]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\t\tTest0$tnum.a.2: Get($dir)"
# foundarray($i) is set when key number i is found in the database
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
# Initialize foundarray($i) to zero for all $i
@@ -105,17 +127,28 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
}
error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
# If we are a method that doesn't allow dups, verify that
# we get an empty list if we try to use DB_NEXT_DUP
if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
set dbt [$dbc get $dir]
error_check_good $method:nextdup [$dbc get -nextdup] [list]
error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
error_check_good db_close(nodup) [$db close] 0
@@ -143,7 +176,7 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
puts "\tTest0$tnum.b: Duplicates ($opt)."
puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
- set db [eval {berkdb_open -create -truncate -mode 0644}\
+ set db [eval {berkdb_open -create -mode 0644}\
$opt $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
@@ -160,8 +193,17 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
set data "$globaldata$j"
}
- error_check_good put($i,$j) \
- [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
@@ -175,7 +217,12 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
# within the duplicate set.
puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
set one "001"
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
for { set i 1 } { $i <= $nitems } { incr i } {
set dbt [$dbc get $dir]
@@ -216,6 +263,9 @@ proc test074 { method {dir -nextnodup} {pagesize 512} {nitems 100} {tnum 74} arg
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
}
diff --git a/bdb/test/test075.tcl b/bdb/test/test075.tcl
index 2aa0e1e2501..540d8f0ed73 100644
--- a/bdb/test/test075.tcl
+++ b/bdb/test/test075.tcl
@@ -1,195 +1,205 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test075.tcl,v 11.9 2000/08/25 14:21:58 sue Exp $
+# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $
#
-# DB Test 75 (replacement)
-# Test the DB->rename method.
+# TEST test075
+# TEST Test of DB->rename().
+# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
proc test075 { method { tnum 75 } args } {
+ global encrypt
global errorCode
+ global errorInfo
+
source ./include.tcl
set omethod [convert_method $method]
set args [convert_args $method $args]
puts "Test0$tnum: $method ($args): Test of DB->rename()"
-
- # If we are using an env, then testfile should just be the db name.
- # Otherwise it is the test directory and the name.
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
set eindex [lsearch -exact $args "-env"]
- if { $eindex == -1 } {
- set oldfile $testdir/test0$tnum-old.db
- set newfile $testdir/test0$tnum.db
- set env NULL
- set renargs ""
- } else {
- set oldfile test0$tnum-old.db
- set newfile test0$tnum.db
- # File existence checks won't work in an env, since $oldfile
- # and $newfile won't be in the current working directory.
- # We use this to skip them, and turn our secondary check
- # (opening the dbs and seeing that all is well) into the main
- # one.
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
incr eindex
set env [lindex $args $eindex]
- set renargs " -env $env"
- }
-
- # Make sure we're starting from a clean slate.
- cleanup $testdir $env
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
-
- puts "\tTest0$tnum.a: Create/rename file"
- puts "\t\tTest0$tnum.a.1: create"
- set db [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 0
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
-
- # The nature of the key and data are unimportant; use numeric key
- # so record-based methods don't need special treatment.
- set key 1
- set data [pad_data $method data]
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\t\tTest0$tnum.a.2: rename"
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 0
- error_check_bad "$newfile exists" [file exists $newfile] 1
- }
- error_check_good rename_file [eval {berkdb dbrename}\
- $renargs $oldfile $newfile] 0
- if { $env == "NULL" } {
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 0
+ puts "Skipping test075 for env $env"
+ return
}
-
- puts "\t\tTest0$tnum.a.3: check"
- # Open again with create to make sure we're not caching or anything
- # silly. In the normal case (no env), we already know the file doesn't
- # exist.
- set odb [eval {berkdb_open -create -mode 0644} $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} $omethod $args $newfile]
- error_check_good odb_open [is_valid_db $odb] TRUE
- error_check_good ndb_open [is_valid_db $ndb] TRUE
-
- set odbt [$odb get $key]
- set ndbt [$ndb get $key]
-
- # The DBT from the "old" database should be empty, not the "new" one.
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
-
- error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
-
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
-
- if { $env != "NULL" } {
- puts "\tTest0$tnum: External environment present; \
- skipping remainder"
+ if { $encrypt != 0 } {
+ puts "Skipping test075 for security"
return
}
- # Now there's both an old and a new. Rename the "new" to the "old"
- # and make sure that fails.
- #
- # XXX Ideally we'd do this test even when there's an external
- # environment, but that env has errpfx/errfile set now. :-(
- puts "\tTest0$tnum.b: Make sure rename fails instead of overwriting"
- set ret [catch {eval {berkdb dbrename} $renargs $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
-
- # Verify and then start over from a clean slate.
- verify_dir $testdir "\tTest0$tnum.c: "
- cleanup $testdir $env
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
-
- set oldfile test0$tnum-old.db
- set newfile test0$tnum.db
-
- puts "\tTest0$tnum.d: Create/rename file in environment"
-
- set env [berkdb env -create -home $testdir]
- error_check_good env_open [is_valid_env $env] TRUE
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
-
- puts "\t\tTest0$tnum.d.1: create"
- set db [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $oldfile]
- error_check_good dbopen [is_valid_db $db] TRUE
-
- # We need to make sure that it didn't create/rename into the
- # current directory.
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- error_check_bad "$testdir/$oldfile exists"\
- [file exists $testdir/$oldfile] 0
- error_check_bad "$testdir/$newfile exists"\
- [file exists $testdir/$newfile] 1
-
- error_check_good dbput [$db put $key $data] 0
- error_check_good dbclose [$db close] 0
-
- puts "\t\tTest0$tnum.d.2: rename"
-
- error_check_good rename_file [berkdb dbrename -env $env\
- $oldfile $newfile] 0
- error_check_bad "$oldfile exists" [file exists $oldfile] 1
- error_check_bad "$newfile exists" [file exists $newfile] 1
- error_check_bad "$testdir/$oldfile exists"\
- [file exists $testdir/$oldfile] 1
- error_check_bad "$testdir/$newfile exists"\
- [file exists $testdir/$newfile] 0
-
- puts "\t\tTest0$tnum.d.3: check"
- # Open again with create to make sure we're not caching or anything
- # silly.
- set odb [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $oldfile]
- set ndb [eval {berkdb_open -create -mode 0644} -env $env\
- $omethod $args $newfile]
- error_check_good odb_open [is_valid_db $odb] TRUE
- error_check_good ndb_open [is_valid_db $ndb] TRUE
-
- set odbt [$odb get $key]
- set ndbt [$ndb get $key]
-
- # The DBT from the "old" database should be empty, not the "new" one.
- error_check_good odbt_empty [llength $odbt] 0
- error_check_bad ndbt_empty [llength $ndbt] 0
-
- error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
-
- error_check_good odb_close [$odb close] 0
- error_check_good ndb_close [$ndb close] 0
-
- # XXX
- # We need to close and reopen the env since berkdb_open has
- # set its errfile/errpfx, and we can't unset that.
- error_check_good env_close [$env close] 0
- set env [berkdb env -home $testdir]
- error_check_good env_open2 [is_valid_env $env] TRUE
-
- puts "\tTest0$tnum.e:\
- Make sure rename fails instead of overwriting in env"
- set ret [catch {eval {berkdb dbrename} -env $env $newfile $oldfile} res]
- error_check_bad rename_overwrite $ret 0
- error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
-
- error_check_good env_close [$env close] 0
-
- puts "\tTest0$tnum succeeded."
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Set up absolute and relative pathnames for test
+ set paths [list $fulldir $reldir]
+ foreach path $paths {
+ puts "\tTest0$tnum: starting test of $path path"
+ set oldfile $path/test0$tnum-old.db
+ set newfile $path/test0$tnum.db
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tTest0$tnum.a: Create/rename file with $op"
+
+ # Make sure we're starting with a clean slate.
+
+ if { $op == "noenv" } {
+ cleanup $path $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+ }
+ }
+
+ if { $op == "env" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ if { $op == "auto" || $op == "commit" || $op == "abort" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path -txn]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # Regular renames use berkdb dbrename but transaction
+ # protected renames must use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename_file [eval {berkdb dbrename} \
+ $envargs $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename_file [eval {$env dbrename} \
+ -auto_commit $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename_file [eval {$env dbrename} \
+ -txn $txn $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or
+ # anything silly. In the normal case (no env), we already
+ # know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be empty,
+ # not the "new" one, except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] $data
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's
+ # an external environment, but that env has
+ # errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ set ret [catch {eval {berkdb dbrename} \
+ -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+ }
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tTest0$tnum.c: "
+ cleanup $path $env
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ }
+ }
+ }
}
diff --git a/bdb/test/test076.tcl b/bdb/test/test076.tcl
index 13a919011e4..9f7b1ed2972 100644
--- a/bdb/test/test076.tcl
+++ b/bdb/test/test076.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test076.tcl,v 1.7 2000/08/25 14:21:58 sue Exp $
+# $Id: test076.tcl,v 1.18 2002/07/08 20:16:31 sue Exp $
#
-# DB Test 76: Test creation of many small databases in an env
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
proc test076 { method { ndbs 1000 } { tnum 76 } args } {
source ./include.tcl
- set omethod [convert_method $method]
set args [convert_args $method $args]
-
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
if { [is_record_based $method] == 1 } {
set key ""
@@ -20,34 +22,53 @@ proc test076 { method { ndbs 1000 } { tnum 76 } args } {
}
set data "datamoredatamoredata"
- puts -nonewline "Test0$tnum $method ($args): "
- puts -nonewline "Create $ndbs"
- puts " small databases in one env."
-
# Create an env if we weren't passed one.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set deleteenv 1
- set env [eval {berkdb env -create -home} $testdir \
- {-cachesize {0 102400 1}}]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $encargs]
error_check_good env [is_valid_env $env] TRUE
set args "$args -env $env"
} else {
set deleteenv 0
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
}
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
cleanup $testdir $env
+ set txn ""
for { set i 1 } { $i <= $ndbs } { incr i } {
set testfile test0$tnum.$i.db
- set db [eval {berkdb_open -create -truncate -mode 0644}\
+ set db [eval {berkdb_open -create -mode 0644}\
$args $omethod $testfile]
error_check_good db_open($i) [is_valid_db $db] TRUE
- error_check_good db_put($i) [$db put $key$i \
- [chop_data $method $data$i]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close($i) [$db close] 0
}
diff --git a/bdb/test/test077.tcl b/bdb/test/test077.tcl
index 47248a309b8..99cf432af20 100644
--- a/bdb/test/test077.tcl
+++ b/bdb/test/test077.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test077.tcl,v 1.4 2000/08/25 14:21:58 sue Exp $
+# $Id: test077.tcl,v 1.10 2002/05/24 15:24:57 sue Exp $
#
-# DB Test 77: Test of DB_GET_RECNO [#1206].
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
source ./include.tcl
global alphabet
@@ -22,6 +23,7 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
set data $alphabet
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set testfile $testdir/test0$tnum.db
@@ -30,23 +32,43 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
puts "\tTest0$tnum.a: Populating database."
+ set txn ""
for { set i 1 } { $i <= $nkeys } { incr i } {
set key [format %5d $i]
- error_check_good db_put($key) [$db put $key $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
puts "\tTest0$tnum.b: Verifying record numbers."
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
set i 1
@@ -64,5 +86,8 @@ proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
diff --git a/bdb/test/test078.tcl b/bdb/test/test078.tcl
index 9642096faf9..45a1d46466e 100644
--- a/bdb/test/test078.tcl
+++ b/bdb/test/test078.tcl
@@ -1,11 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test078.tcl,v 1.9 2000/12/11 17:24:55 sue Exp $
+# $Id: test078.tcl,v 1.18 2002/06/20 19:01:02 sue Exp $
#
-# DB Test 78: Test of DBC->c_count(). [#303]
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
source ./include.tcl
global alphabet rand_init
@@ -17,14 +18,23 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
berkdb srand $rand_init
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
if { $eindex == -1 } {
- set testfile $testdir/test0$tnum.db
+ set testfile $testdir/test0$tnum-a.db
set env NULL
} else {
- set testfile test0$tnum.db
- incr eindex
+ set testfile test0$tnum-a.db
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
cleanup $testdir $env
@@ -35,13 +45,23 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
return
}
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
for { set i 1 } { $i <= $nkeys } { incr i } {
- error_check_good put.a($i) [$db put $i\
- [pad_data $method $alphabet$i]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good count.a [$db count $i] 1
}
error_check_good db_close.a [$db close] 0
@@ -56,18 +76,38 @@ proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
set letter [lindex $tuple 0]
set dupopt [lindex $tuple 2]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
puts "\t\tTest0$tnum.$letter.1: Populating database."
- set db [eval {berkdb_open -create -truncate -mode 0644\
+ set db [eval {berkdb_open -create -mode 0644\
-pagesize $pagesize} $dupopt $omethod $args {$testfile}]
error_check_good db_open [is_valid_db $db] TRUE
for { set i 1 } { $i <= $nkeys } { incr i } {
for { set j 0 } { $j < $i } { incr j } {
- error_check_good put.$letter,$i [$db put $i\
- [pad_data $method $j$alphabet]] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$letter,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
diff --git a/bdb/test/test079.tcl b/bdb/test/test079.tcl
index fe7b978a3dd..70fd4e05090 100644
--- a/bdb/test/test079.tcl
+++ b/bdb/test/test079.tcl
@@ -1,14 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test079.tcl,v 11.5 2000/11/16 23:56:18 ubell Exp $
+# $Id: test079.tcl,v 11.8 2002/01/11 15:53:54 bostic Exp $
#
-# DB Test 79 {access method}
-# Check that delete operations work in large btrees. 10000 entries and
-# a pagesize of 512 push this out to a four-level btree, with a small fraction
-# of the entries going on overflow pages.
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
if { [ is_queueext $method ] == 1 } {
set method "queue";
diff --git a/bdb/test/test080.tcl b/bdb/test/test080.tcl
index 02a6a7242cd..9f649496f68 100644
--- a/bdb/test/test080.tcl
+++ b/bdb/test/test080.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test080.tcl,v 11.7 2000/10/19 23:15:22 ubell Exp $
+# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
#
-# DB Test 80 {access method}
-# Test of dbremove
+# TEST test080
+# TEST Test of DB->remove()
proc test080 { method {tnum 80} args } {
source ./include.tcl
@@ -15,27 +15,112 @@ proc test080 { method {tnum 80} args } {
puts "Test0$tnum: Test of DB->remove()"
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ # Test both relative and absolute path
+ set paths [list $fulldir $testdir]
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
set eindex [lsearch -exact $args "-env"]
- if { $eindex != -1 } {
- puts "\tTest0$tnum: Skipping in the presence of an environment"
+ set encargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != ""} {
+ puts "Skipping test080 for security"
return
}
- cleanup $testdir NULL
-
- set testfile $testdir/test0$tnum.db
- set db [eval {berkdb_open -create -truncate -mode 0644} $omethod \
- $args {$testfile}]
- error_check_good db_open [is_valid_db $db] TRUE
- for {set i 1} { $i < 1000 } {incr i} {
- $db put $i $i
+ if { $eindex != -1 } {
+ incr eindex
+ set e [lindex $args $eindex]
+ puts "Skipping test080 for env $e"
+ return
}
- error_check_good db_close [$db close] 0
- error_check_good file_exists_before [file exists $testfile] 1
+ foreach path $paths {
+
+ set dbfile test0$tnum.db
+ set testfile $path/$dbfile
+
+ # Loop through test using the following remove options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
- error_check_good db_remove [berkdb dbremove $testfile] 0
- error_check_good file_exists_after [file exists $testfile] 0
+ # Make sure we're starting with a clean slate.
+ env_cleanup $testdir
+ if { $op == "noenv" } {
+ set dbfile $testfile
+ set e NULL
+ set envargs ""
+ } else {
+ if { $op == "env" } {
+ set largs ""
+ } else {
+ set largs " -txn"
+ }
+ set e [eval {berkdb_env -create -home $path} $largs]
+ set envargs "-env $e"
+ error_check_good env_open [is_valid_env $e] TRUE
+ }
- puts "\tTest0$tnum succeeded."
+ puts "\tTest0$tnum: dbremove with $op in $path"
+ puts "\tTest0$tnum.a.1: Create file"
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $envargs $args {$dbfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good file_exists_before \
+ [file exists $testfile] 1
+
+ # Use berkdb dbremove for non-transactional tests
+ # and $env dbremove for transactional tests
+ puts "\tTest0$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} $envargs $dbfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$e dbremove} -auto_commit $dbfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$e txn]
+ error_check_good remove_$op \
+ [eval {$e dbremove} -txn $txn $dbfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\tTest0$tnum.a.3: Check that file is gone"
+ # File should now be gone, except in the case of an abort.
+ if { $op != "abort" } {
+ error_check_good exists_after \
+ [file exists $testfile] 0
+ } else {
+ error_check_good exists_after \
+ [file exists $testfile] 1
+ }
+
+ if { $e != "NULL" } {
+ error_check_good env_close [$e close] 0
+ }
+
+ set dbfile test0$tnum-old.db
+ set testfile $path/$dbfile
+ }
+ }
}
diff --git a/bdb/test/test081.tcl b/bdb/test/test081.tcl
index 44e708c5d49..37c2b44ac33 100644
--- a/bdb/test/test081.tcl
+++ b/bdb/test/test081.tcl
@@ -1,14 +1,13 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test081.tcl,v 11.3 2000/03/01 15:13:59 krinsky Exp $
-#
-# Test 81.
-# Test off-page duplicates and overflow pages together with
-# very large keys (key/data as file contents).
+# $Id: test081.tcl,v 11.6 2002/01/11 15:53:55 bostic Exp $
#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
proc test081 { method {ndups 13} {tnum 81} args} {
source ./include.tcl
diff --git a/bdb/test/test082.tcl b/bdb/test/test082.tcl
index e8bd4f975dd..e8c1fa45a92 100644
--- a/bdb/test/test082.tcl
+++ b/bdb/test/test082.tcl
@@ -1,15 +1,14 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test082.tcl,v 11.1 2000/04/30 05:05:26 krinsky Exp $
+# $Id: test082.tcl,v 11.5 2002/01/11 15:53:55 bostic Exp $
#
-# Test 82.
-# Test of DB_PREV_NODUP
-proc test082 { method {dir -prevnodup} {pagesize 512} {nitems 100}\
- {tnum 82} args} {
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum 82} args} {
source ./include.tcl
- eval {test074 $method $dir $pagesize $nitems $tnum} $args
+ eval {test074 $method $dir $nitems $tnum} $args
}
diff --git a/bdb/test/test083.tcl b/bdb/test/test083.tcl
index 7565a5a74f5..e4168ee1c43 100644
--- a/bdb/test/test083.tcl
+++ b/bdb/test/test083.tcl
@@ -1,12 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test083.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+# $Id: test083.tcl,v 11.13 2002/06/24 14:06:38 sue Exp $
#
-# Test 83.
-# Test of DB->key_range
+# TEST test083
+# TEST Test of DB->key_range.
proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
source ./include.tcl
set omethod [convert_method $method]
@@ -25,6 +25,7 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
# If we are using an env, then testfile should just be the db name.
# Otherwise it is the test directory and the name.
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
if { $eindex == -1 } {
set testfile $testdir/test083.db
@@ -33,6 +34,11 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
set testfile test083.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
# We assume that numbers will be at most six digits wide
@@ -45,19 +51,22 @@ proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
{ set nitems [expr $nitems * $step] } {
puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
cleanup $testdir $env
- set db [eval {berkdb_open -create -truncate -mode 0644} \
+ set db [eval {berkdb_open -create -mode 0644} \
-pagesize $pgsz $omethod $args $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
- t83_build $db $nitems
- t83_test $db $nitems
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv
error_check_good db_close [$db close] 0
}
}
-proc t83_build { db nitems } {
+proc t83_build { db nitems env txnenv } {
source ./include.tcl
puts "\tTest083.b: Populating database with $nitems keys"
@@ -73,24 +82,38 @@ proc t83_build { db nitems } {
# just skip the randomization step.
#puts "\t\tTest083.b.2: Randomizing key list"
#set keylist [randomize_list $keylist]
-
#puts "\t\tTest083.b.3: Populating database with randomized keys"
puts "\t\tTest083.b.2: Populating database"
set data [repeat . 50]
-
+ set txn ""
foreach keynum $keylist {
- error_check_good db_put [$db put key[format %6d $keynum] \
- $data] 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
}
-proc t83_test { db nitems } {
+proc t83_test { db nitems env txnenv } {
# Look at the first key, then at keys about 1/4, 1/2, 3/4, and
# all the way through the database. Make sure the key_ranges
# aren't off by more than 10%.
- set dbc [$db cursor]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
error_check_good dbc [is_valid_cursor $dbc $db] TRUE
puts "\tTest083.c: Verifying ranges..."
@@ -129,6 +152,9 @@ proc t83_test { db nitems } {
}
error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
proc roughly_equal { a b tol } {
diff --git a/bdb/test/test084.tcl b/bdb/test/test084.tcl
index 0efd0d17c00..89bc13978b0 100644
--- a/bdb/test/test084.tcl
+++ b/bdb/test/test084.tcl
@@ -1,16 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test084.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
-#
-# Test 84.
-# Basic sanity test (test001) with large (64K) pages.
+# $Id: test084.tcl,v 11.11 2002/07/13 18:09:14 margo Exp $
#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
source ./include.tcl
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -22,6 +22,11 @@ proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
set testfile test0$tnum-empty.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set pgindex [lsearch -exact $args "-pagesize"]
@@ -34,7 +39,7 @@ proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
set args "-pagesize $pagesize $args"
- eval {test001 $method $nentries 0 $tnum} $args
+ eval {test001 $method $nentries 0 $tnum 0} $args
set omethod [convert_method $method]
set args [convert_args $method $args]
diff --git a/bdb/test/test085.tcl b/bdb/test/test085.tcl
index 09134a00f65..b0412d6fe68 100644
--- a/bdb/test/test085.tcl
+++ b/bdb/test/test085.tcl
@@ -1,20 +1,23 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test085.tcl,v 1.4 2000/12/11 17:24:55 sue Exp $
+# $Id: test085.tcl,v 1.13 2002/08/08 17:23:46 sandstro Exp $
#
-# DB Test 85: Test of cursor behavior when a cursor is pointing to a deleted
-# btree key which then has duplicates added.
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
source ./include.tcl
global alphabet
set omethod [convert_method $method]
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
-
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -26,6 +29,11 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
set testfile test0$tnum.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set pgindex [lsearch -exact $args "-pagesize"]
@@ -45,6 +53,7 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
set predatum "1234567890"
set datum $alphabet
set postdatum "0987654321"
+ set txn ""
append args " -pagesize $pagesize -dup"
@@ -61,8 +70,8 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
# Repeat the test with both on-page and off-page numbers of dups.
foreach ndups "$onp $offp" {
- # Put operations we want to test on a cursor set to the
- # deleted item, the key to use with them, and what should
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
# come before and after them given a placement of
# the deleted item at the beginning or end of the dupset.
set final [expr $ndups - 1]
@@ -100,15 +109,22 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
{{-prevnodup} "" $prekey $predatum end}
}
+ set txn ""
foreach pair $getops {
set op [lindex $pair 0]
puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
cursor at the [lindex $pair 4]."
set db [eval {berkdb_open -create \
- -truncate -mode 0644} $omethod $args $testfile]
+ -mode 0644} $omethod $encargs $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
- set dbc [test085_setup $db]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
set beginning [expr [string compare \
[lindex $pair 4] "beginning"] == 0]
@@ -116,9 +132,10 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
for { set i 0 } { $i < $ndups } { incr i } {
if { $beginning } {
error_check_good db_put($i) \
- [$db put $key [test085_ddatum $i]] 0
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
} else {
- set c [$db cursor]
+ set c [eval {$db cursor} $txn]
set j [expr $ndups - $i - 1]
error_check_good db_cursor($j) \
[is_valid_cursor $c $db] TRUE
@@ -128,14 +145,14 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
error_check_good c_close [$c close] 0
}
}
-
+
set gargs [lindex $pair 1]
set ekey ""
set edata ""
eval set ekey [lindex $pair 2]
eval set edata [lindex $pair 3]
- set dbt [eval $dbc get $op $gargs]
+ set dbt [eval $dbc get $op $gargs]
if { [string compare $ekey EMPTYLIST] == 0 } {
error_check_good dbt($op,$ndups) \
[llength $dbt] 0
@@ -144,8 +161,27 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
[list [list $ekey $edata]]
}
error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
}
foreach pair $putops {
@@ -154,21 +190,27 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
cursor at the [lindex $pair 4]."
set db [eval {berkdb_open -create \
- -truncate -mode 0644} $omethod $args $testfile]
+ -mode 0644} $omethod $args $encargs $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
set beginning [expr [string compare \
[lindex $pair 4] "beginning"] == 0]
-
- set dbc [test085_setup $db]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
# Put duplicates.
for { set i 0 } { $i < $ndups } { incr i } {
if { $beginning } {
error_check_good db_put($i) \
- [$db put $key [test085_ddatum $i]] 0
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
} else {
- set c [$db cursor]
+ set c [eval {$db cursor} $txn]
set j [expr $ndups - $i - 1]
error_check_good db_cursor($j) \
[is_valid_cursor $c $db] TRUE
@@ -180,17 +222,17 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
}
# Set up cursors for stability test.
- set pre_dbc [$db cursor]
+ set pre_dbc [eval {$db cursor} $txn]
error_check_good pre_set [$pre_dbc get -set $prekey] \
[list [list $prekey $predatum]]
- set post_dbc [$db cursor]
+ set post_dbc [eval {$db cursor} $txn]
error_check_good post_set [$post_dbc get -set $postkey]\
[list [list $postkey $postdatum]]
- set first_dbc [$db cursor]
+ set first_dbc [eval {$db cursor} $txn]
error_check_good first_set \
[$first_dbc get -get_both $key [test085_ddatum 0]] \
[list [list $key [test085_ddatum 0]]]
- set last_dbc [$db cursor]
+ set last_dbc [eval {$db cursor} $txn]
error_check_good last_set \
[$last_dbc get -get_both $key [test085_ddatum \
[expr $ndups - 1]]] \
@@ -227,23 +269,39 @@ proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
[$last_dbc get -current] \
[list [list $key [test085_ddatum [expr $ndups -1]]]]
-
foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
error_check_good ${c}_close [$c close] 0
}
error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good "db close" [$db close] 0
- verify_dir $testdir "\t\t"
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
}
}
}
-
-# Set up the test database; put $prekey, $key, and $postkey with their
+# Set up the test database; put $prekey, $key, and $postkey with their
# respective data, and then delete $key with a new cursor. Return that
# cursor, still pointing to the deleted item.
-proc test085_setup { db } {
+proc test085_setup { db txn } {
upvar key key
upvar prekey prekey
upvar postkey postkey
@@ -251,13 +309,13 @@ proc test085_setup { db } {
upvar postdatum postdatum
# no one else should ever see this one!
- set datum "bbbbbbbb"
+ set datum "bbbbbbbb"
- error_check_good pre_put [$db put $prekey $predatum] 0
- error_check_good main_put [$db put $key $datum] 0
- error_check_good post_put [$db put $postkey $postdatum] 0
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
error_check_good dbc_getset [$dbc get -get_both $key $datum] \
diff --git a/bdb/test/test086.tcl b/bdb/test/test086.tcl
index dc30de8ec37..e15aa1d8bb9 100644
--- a/bdb/test/test086.tcl
+++ b/bdb/test/test086.tcl
@@ -1,16 +1,21 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test086.tcl,v 11.2 2000/08/25 14:21:58 sue Exp $
-
-# Test086: Cursor stability across btree splits w/ subtransaction abort [#2373].
+# $Id: test086.tcl,v 11.9 2002/08/06 17:58:00 sandstro Exp $
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
proc test086 { method args } {
global errorCode
source ./include.tcl
set tstn 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -40,11 +45,11 @@ proc test086 { method args } {
set t1 $testdir/t1
env_cleanup $testdir
- set env [berkdb env -create -home $testdir -txn]
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
error_check_good berkdb_env [is_valid_env $env] TRUE
puts "\tTest$tstn.a: Create $method database."
- set oflags "-create -env $env -mode 0644 $args $method"
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -97,7 +102,6 @@ proc test086 { method args } {
puts "\tTest$tstn.e: Abort."
error_check_good ctxn_abort [$ctxn abort] 0
-
puts "\tTest$tstn.f: Check and see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
set ret [$dbc_set($i) get -current]
@@ -107,7 +111,7 @@ proc test086 { method args } {
error_check_good dbc$i:get(match) $ret $ret2
}
- # Put (and this time keep) the keys that caused the split.
+ # Put (and this time keep) the keys that caused the split.
# We'll delete them to test reverse splits.
puts "\tTest$tstn.g: Put back added keys."
for {set i $nkeys} { $i < $mkeys } { incr i } {
diff --git a/bdb/test/test087.tcl b/bdb/test/test087.tcl
index 7096e6c1cb9..089664a0002 100644
--- a/bdb/test/test087.tcl
+++ b/bdb/test/test087.tcl
@@ -1,31 +1,38 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test087.tcl,v 11.6 2000/12/11 17:24:55 sue Exp $
+# $Id: test087.tcl,v 11.14 2002/07/08 20:16:31 sue Exp $
#
-# DB Test 87: Test of cursor stability on duplicate pages w/aborts.
-# Does the following:
-# a. Initialize things by DB->putting ndups dups and
-# setting a reference cursor to point to each.
-# b. c_put ndups dups (and correspondingly expanding
-# the set of reference cursors) after the last one, making sure
-# after each step that all the reference cursors still point to
-# the right item.
-# c. Ditto, but before the first one.
-# d. Ditto, but after each one in sequence first to last.
-# e. Ditto, but after each one in sequence from last to first.
-# occur relative to the new datum)
-# f. Ditto for the two sequence tests, only doing a
-# DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
-# new one.
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
source ./include.tcl
global alphabet
- set omethod [convert_method $method]
set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
puts "Test0$tnum $omethod ($args): "
set eindex [lsearch -exact $args "-env"]
@@ -52,34 +59,38 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
puts "Cursor stability on dup. pages w/ aborts."
}
- set env [berkdb env -create -home $testdir -txn]
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
error_check_good env_create [is_valid_env $env] TRUE
- set db [eval {berkdb_open -env $env \
- -create -mode 0644} $omethod $args $testfile]
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
error_check_good "db open" [is_valid_db $db] TRUE
# Number of outstanding keys.
- set keys 0
+ set keys $ndups
- puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+ puts "\tTest0$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
set txn [$env txn]
error_check_good txn [is_valid_txn $txn $env] TRUE
for { set i 0 } { $i < $ndups } { incr i } {
set datum [makedatum_t73 $i 0]
- error_check_good "db put ($i)" [$db put -txn $txn $key $datum] 0
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
- set is_long($i) 0
- incr keys
- }
- error_check_good txn_commit [$txn commit] 0
+ verify_t73 is_long dbc [expr $i - 1] $key
- puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
- set txn [$env txn]
- error_check_good txn [is_valid_txn $txn $env] TRUE
- for { set i 0 } { $i < $keys } { incr i } {
- set datum [makedatum_t73 $i 0]
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
set dbc($i) [$db cursor -txn $txn]
error_check_good "db cursor ($i)"\
@@ -87,6 +98,8 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
error_check_good "dbc get -get_both ($i)"\
[$dbc($i) get -get_both $key $datum]\
[list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
}
puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
@@ -97,7 +110,6 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
for { set i 0 } { $i < $ndups } { incr i } {
# !!! keys contains the number of the next dup
# to be added (since they start from zero)
-
set datum [makedatum_t73 $keys 0]
set curs [$db cursor -txn $ctxn]
error_check_good "db cursor create" [is_valid_cursor $curs $db]\
@@ -272,7 +284,7 @@ proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
for { set i 0 } { $i < $keys } { incr i } {
error_check_good "dbc close ($i)" [$dbc($i) close] 0
}
- error_check_good txn_commit [$txn commit] 0
error_check_good "db close" [$db close] 0
+ error_check_good txn_commit [$txn commit] 0
error_check_good "env close" [$env close] 0
}
diff --git a/bdb/test/test088.tcl b/bdb/test/test088.tcl
index d7b0f815a00..7065b4cd642 100644
--- a/bdb/test/test088.tcl
+++ b/bdb/test/test088.tcl
@@ -1,17 +1,19 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test088.tcl,v 11.4 2000/12/11 17:24:55 sue Exp $
+# $Id: test088.tcl,v 11.12 2002/08/05 19:23:51 sandstro Exp $
#
-# Test088: Cursor stability across btree splits with very deep trees.
-# (Variant of test048, SR #2514.)
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
proc test088 { method args } {
global errorCode alphabet
source ./include.tcl
set tstn 088
+ set args [convert_args $method $args]
if { [is_btree $method] != 1 } {
puts "Test$tstn skipping for method $method."
@@ -33,6 +35,7 @@ proc test088 { method args } {
set flags ""
puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
set eindex [lsearch -exact $args "-env"]
#
# If we are using an env, then testfile should just be the db name.
@@ -44,12 +47,18 @@ proc test088 { method args } {
set testfile test$tstn.db
incr eindex
set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
}
set t1 $testdir/t1
cleanup $testdir $env
- set ps 512
- set oflags "-create -pagesize $ps -truncate -mode 0644 $args $method"
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
set db [eval {berkdb_open} $oflags $testfile]
error_check_good dbopen [is_valid_db $db] TRUE
@@ -58,45 +67,62 @@ proc test088 { method args } {
#
puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
for { set i 0 } { $i < $nkeys } { incr i } {
- set ret [$db put ${key}00000$i $data$i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
}
# get db ordering, set cursors
puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
for {set i 0; set ret [$db get ${key}00000$i]} {\
$i < $nkeys && [llength $ret] != 0} {\
incr i; set ret [$db get ${key}00000$i]} {
set key_set($i) [lindex [lindex $ret 0] 0]
set data_set($i) [lindex [lindex $ret 0] 1]
- set dbc [$db cursor]
+ set dbc [eval {$db cursor} $txn]
set dbc_set($i) $dbc
error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
set ret [$dbc_set($i) get -set $key_set($i)]
error_check_bad dbc_set($i)_get:set [llength $ret] 0
}
- # if mkeys is above 1000, need to adjust below for lexical order
- set mkeys 30000
puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 10000 } {
- set ret [$db put ${key}0$i $data$i]
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
} elseif { $i >= 1000 } {
- set ret [$db put ${key}00$i $data$i]
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
} elseif { $i >= 100 } {
- set ret [$db put ${key}000$i $data$i]
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
} elseif { $i >= 10 } {
- set ret [$db put ${key}0000$i $data$i]
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
} else {
- set ret [$db put ${key}00000$i $data$i]
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
}
error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.e: Make sure splits happened."
- error_check_bad stat:check-split [is_substr [$db stat] \
- "{{Internal pages} 0}"] 1
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
puts "\tTest$tstn.f: Check to see that cursors maintained reference."
for {set i 0} { $i < $nkeys } {incr i} {
@@ -110,16 +136,17 @@ proc test088 { method args } {
puts "\tTest$tstn.g: Delete added keys to force reverse splits."
for {set i $nkeys} { $i < $mkeys } { incr i } {
if { $i >= 10000 } {
- error_check_good db_del:$i [$db del ${key}0$i] 0
+ set ret [eval {$db del} $txn {${key}0$i}]
} elseif { $i >= 1000 } {
- error_check_good db_del:$i [$db del ${key}00$i] 0
+ set ret [eval {$db del} $txn {${key}00$i}]
} elseif { $i >= 100 } {
- error_check_good db_del:$i [$db del ${key}000$i] 0
+ set ret [eval {$db del} $txn {${key}000$i}]
} elseif { $i >= 10 } {
- error_check_good db_del:$i [$db del ${key}0000$i] 0
+ set ret [eval {$db del} $txn {${key}0000$i}]
} else {
- error_check_good db_del:$i [$db del ${key}00000$i] 0
+ set ret [eval {$db del} $txn {${key}00000$i}]
}
+ error_check_good dbput:more $ret 0
}
puts "\tTest$tstn.h: Verify cursor reference."
@@ -136,6 +163,9 @@ proc test088 { method args } {
for {set i 0} { $i < $nkeys } {incr i} {
error_check_good dbc_close:$i [$dbc_set($i) close] 0
}
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good dbclose [$db close] 0
puts "\tTest$tstn complete."
diff --git a/bdb/test/test089.tcl b/bdb/test/test089.tcl
new file mode 100644
index 00000000000..d378152f203
--- /dev/null
+++ b/bdb/test/test089.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test089.tcl,v 11.2 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ # This tests the failure found in #1923
+ puts "\tTest089.b: test delete then get"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest089.c: CDB cursor dups"
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+ set stat [catch {$dbc dup} ret]
+ error_check_bad wr_cdup_stat $stat 0
+ error_check_good wr_cdup [is_substr $ret \
+ "Cannot duplicate writeable cursor"] 1
+
+ set dbc_ro [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc_ro $db1] TRUE
+ set dup_dbc [$dbc_ro dup]
+ error_check_good rd_cdup [is_valid_cursor $dup_dbc $db1] TRUE
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc_close [$dbc_ro close] 0
+ error_check_good dbc_close [$dup_dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+ append oargs " -dup "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ set dbc [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest089.f: test delete then get of off-page dups"
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/bdb/test/test090.tcl b/bdb/test/test090.tcl
index ed6ec9632f5..da90688ffc5 100644
--- a/bdb/test/test090.tcl
+++ b/bdb/test/test090.tcl
@@ -1,20 +1,16 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test090.tcl,v 11.4 2000/12/11 17:24:56 sue Exp $
+# $Id: test090.tcl,v 11.10 2002/08/15 20:55:21 sandstro Exp $
#
-# DB Test 90 {access method}
-# Check for functionality near the end of the queue.
-#
-#
-proc test090 { method {nentries 1000} {txn -txn} {tnum "90"} args} {
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {txn -txn} {tnum "90"} args} {
if { [is_queueext $method ] == 0 } {
puts "Skipping test0$tnum for $method."
return;
}
- eval {test001 $method $nentries 4294967000 $tnum} $args
- eval {test025 $method $nentries 4294967000 $tnum} $args
- eval {test070 $method 4 2 $nentries WAIT 4294967000 $txn $tnum} $args
+ eval {test001 $method $nentries 4294967000 $tnum 0} $args
}
diff --git a/bdb/test/test091.tcl b/bdb/test/test091.tcl
index 9420b571ce3..cfd2a60ebb5 100644
--- a/bdb/test/test091.tcl
+++ b/bdb/test/test091.tcl
@@ -1,13 +1,12 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: test091.tcl,v 11.4 2000/12/01 04:28:36 ubell Exp $
-#
-# DB Test 91 {access method}
-# Check for CONSUME_WAIT functionality
+# $Id: test091.tcl,v 11.7 2002/01/11 15:53:56 bostic Exp $
#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
proc test091 { method {nconsumers 4} \
{nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
if { [is_queue $method ] == 0 } {
diff --git a/bdb/test/test092.tcl b/bdb/test/test092.tcl
new file mode 100644
index 00000000000..29c1c55a9a9
--- /dev/null
+++ b/bdb/test/test092.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test092.tcl,v 11.13 2002/02/22 15:26:28 sandstro Exp $
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty read and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -dirty]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
+ -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.c: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -dirty} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.e: Check dirty data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.f: Check dirty data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.g: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.h: Check clean data using -dirty cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.i: Check clean data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.j: Check clean data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/bdb/test/test093.tcl b/bdb/test/test093.tcl
new file mode 100644
index 00000000000..e3f8f0103c6
--- /dev/null
+++ b/bdb/test/test093.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test093.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test093
+# TEST Test using set_bt_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test093 { method {nentries 10000} {tnum "93"} args} {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ }
+ puts "Test0$tnum: $method ($args) $nentries using btcompare"
+
+
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
+ #
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so verification doesn't fail. (There's currently
+ # no way to specify a comparison function to berkdb dbverify.)
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_check
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_checkbig
+ puts "\tTest0$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/bdb/test/test094.tcl b/bdb/test/test094.tcl
new file mode 100644
index 00000000000..781052913f4
--- /dev/null
+++ b/bdb/test/test094.tcl
@@ -0,0 +1,251 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test094.tcl,v 11.16 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "94"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr -dupcompare test094_cmp \
+ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest0$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest0$tnum.c: big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest0$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Clean up the test directory, since there's currently
+ # no way to specify a dup_compare function to berkdb dbverify
+ # and without one it will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc test094_dup_big { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
diff --git a/bdb/test/test095.tcl b/bdb/test/test095.tcl
new file mode 100644
index 00000000000..5543f346b7e
--- /dev/null
+++ b/bdb/test/test095.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test095.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test095
+# TEST Bulk get test. [#2934]
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.h: Growing\
+ database with $noverflows overflow sets (max item size $max)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum i [expr 10 * 8192] 1
+ t95_gettest $db $tnum j [expr $max * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l [expr 10 * 8192] 1
+ t95_cgettest $db $tnum m [expr $max * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Test of invalid flag combinations for -multi
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi "
+ } else {
+ set action "dbc get -multi "
+ }
+ puts "\tTest0$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for basic -multi get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach flag $badflags {
+ catch {eval $db get -multi $bufsize -$flag} ret
+ error_check_good \
+ db:get:multi:$flag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for cursor -multi get
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach flag $cbadflags {
+ catch {eval $dbc get -multi $bufsize -$flag} ret
+ error_check_good dbc:get:multi:$flag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ puts "\t\tTest0$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/bdb/test/test096.tcl b/bdb/test/test096.tcl
new file mode 100644
index 00000000000..042df19eac7
--- /dev/null
+++ b/bdb/test/test096.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test096.tcl,v 11.19 2002/08/19 20:09:29 margo Exp $
+#
+# TEST test096
+# TEST Db->truncate test.
+proc test096 { method {pagesize 512} {nentries 50} {ndups 4} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -auto_commit]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txncommit [$txn commit] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -auto_commit -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/bdb/test/test097.tcl b/bdb/test/test097.tcl
new file mode 100644
index 00000000000..6e43b820b2f
--- /dev/null
+++ b/bdb/test/test097.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test097.tcl,v 11.8 2002/09/04 18:47:42 sue Exp $
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create \
+ -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Create the database and open the dictionary
+ set testfile test097.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $testfile $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env testfile largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $testfile.$i}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/bdb/test/test098.tcl b/bdb/test/test098.tcl
new file mode 100644
index 00000000000..320e0258a84
--- /dev/null
+++ b/bdb/test/test098.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test098.tcl,v 1.5 2002/07/11 20:38:36 sandstro Exp $
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/bdb/test/test099.tcl b/bdb/test/test099.tcl
new file mode 100644
index 00000000000..db177ce5fff
--- /dev/null
+++ b/bdb/test/test099.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test099.tcl,v 1.2 2002/08/08 15:38:13 bostic Exp $
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+# global kvals
+# set key [expr $count]
+# set kvals($key) [pad_data $method $str]
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/bdb/test/test100.tcl b/bdb/test/test100.tcl
new file mode 100644
index 00000000000..f80b2e526dd
--- /dev/null
+++ b/bdb/test/test100.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test100.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {txn -txn} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/bdb/test/test101.tcl b/bdb/test/test101.tcl
new file mode 100644
index 00000000000..7e5c8fc30fc
--- /dev/null
+++ b/bdb/test/test101.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test101.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 10000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 1000 WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/bdb/test/testparams.tcl b/bdb/test/testparams.tcl
index 2def6a9d0d8..6628db532d7 100644
--- a/bdb/test/testparams.tcl
+++ b/bdb/test/testparams.tcl
@@ -1,37 +1,72 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 2000
+# Copyright (c) 2000-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: testparams.tcl,v 11.39 2001/01/11 17:29:42 sue Exp $
+# $Id: testparams.tcl,v 11.117 2002/09/05 02:30:00 margo Exp $
-set deadtests 3
-set envtests 8
-set recdtests 13
-set rsrctests 3
-set runtests 93
-set subdbtests 10
-set rpctests 2
+set subs {bigfile dead env lock log memp mutex recd rep rpc rsrc \
+ sdb sdbtest sec si test txn}
+set num_test(bigfile) 2
+set num_test(dead) 7
+set num_test(env) 11
+set num_test(lock) 5
+set num_test(log) 5
+set num_test(memp) 3
+set num_test(mutex) 3
+set num_test(recd) 20
+set num_test(rep) 5
+set num_test(rpc) 5
+set num_test(rsrc) 4
+set num_test(sdb) 12
+set num_test(sdbtest) 2
+set num_test(sec) 2
+set num_test(si) 6
+set num_test(test) 101
+set num_test(txn) 9
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
set parms(subdb001) ""
set parms(subdb002) 10000
set parms(subdb003) 1000
set parms(subdb004) ""
set parms(subdb005) 100
set parms(subdb006) 100
-set parms(subdb007) 10000
-set parms(subdb008) 10000
+set parms(subdb007) ""
+set parms(subdb008) ""
set parms(subdb009) ""
set parms(subdb010) ""
-set parms(test001) {10000 0 "01"}
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(test001) {10000 0 "01" 0}
set parms(test002) 10000
set parms(test003) ""
set parms(test004) {10000 4 0}
set parms(test005) 10000
set parms(test006) {10000 0 6}
set parms(test007) {10000 7}
-set parms(test008) {10000 8 0}
-set parms(test009) 10000
+set parms(test008) {8 0}
+set parms(test009) ""
set parms(test010) {10000 5 10}
set parms(test011) {10000 5 11}
set parms(test012) ""
@@ -96,7 +131,7 @@ set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
set parms(test072) {512 20 72}
set parms(test073) {512 50 73}
-set parms(test074) {-nextnodup 512 100 74}
+set parms(test074) {-nextnodup 100 74}
set parms(test075) {75}
set parms(test076) {1000 76}
set parms(test077) {1000 512 77}
@@ -104,12 +139,56 @@ set parms(test078) {100 512 78}
set parms(test079) {10000 512 79}
set parms(test080) {80}
set parms(test081) {13 81}
-set parms(test082) {-prevnodup 512 100 82}
+set parms(test082) {-prevnodup 100 82}
set parms(test083) {512 5000 2}
set parms(test084) {10000 84 65536}
set parms(test085) {512 3 10 85}
set parms(test086) ""
set parms(test087) {512 50 87}
set parms(test088) ""
-set parms(test090) {1000 -txn 90}
+set parms(test089) 1000
+set parms(test090) {10000 -txn 90}
set parms(test091) {4 2 1000 0 91}
+set parms(test092) {1000}
+set parms(test093) {10000 93}
+set parms(test094) {10000 10 94}
+set parms(test095) {1000 25 95}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 -txn 100}
+set parms(test101) {10000 -txn 101}
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.javatests }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+}
diff --git a/bdb/test/testutils.tcl b/bdb/test/testutils.tcl
index c5edaef7f6a..d1f89dd1e15 100644
--- a/bdb/test/testutils.tcl
+++ b/bdb/test/testutils.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
+# Copyright (c) 1996-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: testutils.tcl,v 11.86 2001/01/18 23:21:14 krinsky Exp $
+# $Id: testutils.tcl,v 11.165 2002/09/05 17:54:04 sandstro Exp $
#
# Test system utilities
#
@@ -12,14 +12,25 @@
proc timestamp {{opt ""}} {
global __timestamp_start
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
if {[string compare $opt "-r"] == 0} {
- clock seconds
+ return $now
} elseif {[string compare $opt "-t"] == 0} {
- # -t gives us the current time in the format expected by
- # db_recover -t.
- return [clock format [clock seconds] -format "%y%m%d%H%M.%S"]
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
} else {
- set now [clock seconds]
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
if {[catch {set start $__timestamp_start}] != 0} {
set __timestamp_start $now
@@ -30,7 +41,13 @@ proc timestamp {{opt ""}} {
set the_time [clock format $now -format ""]
set __timestamp_start $now
- format "%02d:%02d:%02d (%02d:%02d:%02d)" \
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
[__fix_num [clock format $now -format "%H"]] \
[__fix_num [clock format $now -format "%M"]] \
[__fix_num [clock format $now -format "%S"]] \
@@ -115,32 +132,68 @@ proc get_file_as_key { db txn flags file} {
# open file and call dump_file to dumpkeys to tempfile
proc open_and_dump_file {
- dbname dbenv txn outfile checkfunc dump_func beg cont} {
+ dbname env outfile checkfunc dump_func beg cont } {
+ global encrypt
+ global passwd
source ./include.tcl
- if { $dbenv == "NULL" } {
- set db [berkdb open -rdonly -unknown $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [berkdb open -env $dbenv -rdonly -unknown $dbname]
- error_check_good dbopen [is_valid_db $db] TRUE
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
}
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
# open file and call dump_file to dumpkeys to tempfile
proc open_and_dump_subfile {
- dbname dbenv txn outfile checkfunc dump_func beg cont subdb} {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
source ./include.tcl
- if { $dbenv == "NULL" } {
- set db [berkdb open -rdonly -unknown $dbname $subdb]
- error_check_good dbopen [is_valid_db $db] TRUE
- } else {
- set db [berkdb open -env $dbenv -rdonly -unknown $dbname $subdb]
- error_check_good dbopen [is_valid_db $db] TRUE
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
}
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
$dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
error_check_good db_close [$db close] 0
}
@@ -155,12 +208,18 @@ proc dump_file { db txn outfile checkfunc } {
proc dump_file_direction { db txn outfile checkfunc start continue } {
source ./include.tcl
- set outf [open $outfile w]
# Now we will get each key from the DB and dump to outfile
set c [eval {$db cursor} $txn]
error_check_good db_cursor [is_valid_cursor $c $db] TRUE
- for {set d [$c get $start] } { [llength $d] != 0 } {
- set d [$c get $continue] } {
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
set kd [lindex $d 0]
set k [lindex $kd 0]
set d2 [lindex $kd 1]
@@ -170,7 +229,6 @@ proc dump_file_direction { db txn outfile checkfunc start continue } {
# puts $outf "$k $d2"
}
close $outf
- error_check_good curs_close [$c close] 0
}
proc dump_binkey_file { db txn outfile checkfunc } {
@@ -285,8 +343,8 @@ proc error_check_good { func result desired {txn 0} } {
}
# Locks have the prefix of their manager.
-proc is_substr { l mgr } {
- if { [string first $mgr $l] == -1 } {
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
return 0
} else {
return 1
@@ -297,7 +355,7 @@ proc release_list { l } {
# Now release all the locks
foreach el $l {
- set ret [$el put]
+ catch { $el put } ret
error_check_good lock_put $ret 0
}
}
@@ -374,6 +432,54 @@ proc dup_check { db txn tmpfile dlist {extra 0}} {
error_check_good curs_close [$c close] 0
}
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
# Parse duplicate data entries of the form N:data. Data_of returns
# the data part; id_of returns the numerical part
proc data_of {str} {
@@ -513,7 +619,7 @@ proc sentinel_init { } {
set filelist {}
set ret [catch {glob $testdir/begin.*} result]
- if { $ret == 0 } {
+ if { $ret == 0 } {
set filelist $result
}
@@ -527,16 +633,33 @@ proc sentinel_init { } {
}
}
-proc watch_procs { {delay 30} {max 3600} } {
+proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
source ./include.tcl
set elapsed 0
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-tenth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 10}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
while { 1 } {
tclsleep $delay
incr elapsed $delay
- # Find the list of processes withoutstanding sentinel
+ # Find the list of processes with outstanding sentinel
# files (i.e. a begin.pid and no end.pid).
set beginlist {}
set endlist {}
@@ -586,18 +709,14 @@ proc watch_procs { {delay 30} {max 3600} } {
if { $elapsed > $max } {
# We have exceeded the limit; kill processes
# and report an error
- set rlist {}
foreach i $l {
- set r [catch { exec $KILL $i } result]
- if { $r == 0 } {
- lappend rlist $i
- }
+ tclkill $i
}
- error_check_good "Processes still running" \
- [llength $rlist] 0
}
}
- puts "All processes have exited."
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
}
# These routines are all used from within the dbscript.tcl tester.
@@ -935,7 +1054,7 @@ proc filecheck { file txn } {
unset check_array
}
- open_and_dump_file $file NULL $txn $file.dump dbcheck dump_full_file \
+ open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
"-first" "-next"
# Check that everything we checked had all its data
@@ -964,20 +1083,11 @@ proc filecheck { file txn } {
}
}
-proc esetup { dir } {
- source ./include.tcl
-
- set ret [berkdb envremove -home $dir]
-
- fileremove -f $dir/file0 $dir/file1 $dir/file2 $dir/file3
- set mp [memp $dir 0644 -create -cachesize { 0 10240 }]
- set lp [lock_open "" -create 0644]
- error_check_good memp_close [$mp close] 0
- error_check_good lock_close [$lp close] 0
-}
-
-proc cleanup { dir env } {
+proc cleanup { dir env { quiet 0 } } {
global gen_upgrade
+ global is_qnx_test
+ global old_encrypt
+ global passwd
global upgrade_dir
global upgrade_be
global upgrade_method
@@ -989,46 +1099,109 @@ proc cleanup { dir env } {
set maj [lindex $vers 0]
set min [lindex $vers 1]
- if { $upgrade_be == 1 } {
- set version_dir "$maj.${min}be"
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
} else {
- set version_dir "$maj.${min}le"
+ set myendianness le
}
- set dest $upgrade_dir/$version_dir/$upgrade_method/$upgrade_name
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
- catch {exec mkdir -p $dest}
- catch {exec sh -c "mv $dir/*.db $dest"}
- catch {exec sh -c "mv $dir/__dbq.* $dest"}
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ set dbfiles [glob -nocomplain $dir/*.db]
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ error_check_good db_dump($dbfile) \
+ [catch {exec $util_path/db_dump -k $dbfile > \
+ $dir/$newbasename.dump}] 0
+
+ # tcl_dump file
+ upgrade_dump $dbfile \
+ $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar}
+ cd $cwd
+ }
}
# check_handles
set remfiles {}
set ret [catch { glob $dir/* } result]
if { $ret == 0 } {
- foreach file $result {
+ foreach fileorig $result {
#
# We:
# - Ignore any env-related files, which are
# those that have __db.* or log.* if we are
- # running in an env.
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
# - Call 'dbremove' on any databases.
# Remove any remaining temp files.
#
- switch -glob -- $file {
+ switch -glob -- $fileorig {
+ */DIR_* -
*/__db.* -
*/log.* {
if { $env != "NULL" } {
continue
} else {
- lappend remfiles $file
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
}
}
*.db {
set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
if { $env != "NULL"} {
- set file [file tail $file]
+ set file [file tail $fileorig]
set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
}
# If a database is left in a corrupt
@@ -1038,15 +1211,33 @@ proc cleanup { dir env } {
# just forcibly remove the file with a warning
# message.
set ret [catch \
- {eval {berkdb dbremove} $envargs $file} res]
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
if { $ret != 0 } {
- puts \
+ # If it failed, there is a chance
+ # that the previous run was using
+ # encryption and we cannot know about
+ # it (different tclsh instantiation).
+ # Try to remove it with crypto.
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $envargs $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
"FAIL: dbremove in cleanup failed: $res"
- lappend remfiles $file
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
}
}
default {
- lappend remfiles $file
+ lappend remfiles $fileorig
}
}
}
@@ -1068,9 +1259,15 @@ proc log_cleanup { dir } {
}
proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
source ./include.tcl
- set stat [catch {berkdb envremove -home $dir} ret]
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
#
# If something failed and we are left with a region entry
# in /dev/shmem that is zero-length, the envremove will
@@ -1136,33 +1333,90 @@ proc help { cmd } {
# Notice that we catch the return from CP and do not do anything with it.
# This is because Solaris CP seems to exit non-zero on occasion, but
# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
+ }
+ op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
global log_log_record_types
global recd_debug
global recd_id
global recd_op
source ./include.tcl
- #puts "op_recover: $encodedop $dir $env_cmd $dbfile $cmd $msg"
+ #puts "op_recover: $op $dir $env $dbfile $cmd"
set init_file $dir/t1
set afterop_file $dir/t2
set final_file $dir/t3
- set op ""
- set op2 ""
- if { $encodedop == "prepare-abort" } {
- set op "prepare"
- set op2 "abort"
- } elseif { $encodedop == "prepare-commit" } {
- set op "prepare"
- set op2 "commit"
- } else {
- set op $encodedop
- }
-
- puts "\t$msg $encodedop"
-
# Keep track of the log types we've seen
if { $log_log_record_types == 1} {
logtrack_read $dir
@@ -1172,13 +1426,15 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
copy_extent_file $dir $dbfile init
+ convert_encrypt $env_cmd
set env [eval $env_cmd]
- set db [berkdb open -env $env $dbfile]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set db [berkdb open -auto_commit -env $env $dbfile]
error_check_good dbopen [is_valid_db $db] TRUE
# Dump out file contents for initial case
- set tflags ""
- open_and_dump_file $dbfile $env $tflags $init_file nop \
+ open_and_dump_file $dbfile $env $init_file nop \
dump_file_direction "-first" "-next"
set t [$env txn]
@@ -1233,43 +1489,38 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
set record_exec_cmd_ret 0
set lenient_exec_cmd_ret 0
- # Sync the file so that we can capture a snapshot to test
- # recovery.
+ # Sync the file so that we can capture a snapshot to test recovery.
error_check_good sync:$db [$db sync] 0
catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
copy_extent_file $dir $dbfile afterop
+ open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
- #set tflags "-txn $t"
- open_and_dump_file $dir/$dbfile.afterop NULL $tflags \
- $afterop_file nop dump_file_direction \
- "-first" "-next"
#puts "\t\t\tExecuting txn_$op:$t"
- error_check_good txn_$op:$t [$t $op] 0
- if { $op2 != "" } {
- #puts "\t\t\tExecuting txn_$op2:$t"
- error_check_good txn_$op2:$t [$t $op2] 0
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
}
- switch $encodedop {
+ switch $op {
"commit" { puts "\t\tCommand executed and committed." }
"abort" { puts "\t\tCommand executed and aborted." }
"prepare" { puts "\t\tCommand executed and prepared." }
- "prepare-commit" {
- puts "\t\tCommand executed, prepared, and committed."
- }
- "prepare-abort" {
- puts "\t\tCommand executed, prepared, and aborted."
- }
}
- # Dump out file and save a copy.
+ # Sync the file so that we can capture a snapshot to test recovery.
error_check_good sync:$db [$db sync] 0
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
- dump_file_direction "-first" "-next"
catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
copy_extent_file $dir $dbfile final
+ open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next"
# If this is an abort or prepare-abort, it should match the
# original file.
@@ -1281,56 +1532,121 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
# Thus we just skip this in the prepare-only case; what
# we care about are the results of a prepare followed by a
# recovery, which we test later.
- if { $op == "commit" || $op2 == "commit" } {
+ if { $op == "commit" } {
filesort $afterop_file $afterop_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
[filecmp $afterop_file.sort $final_file.sort] 0
- } elseif { $op == "abort" || $op2 == "abort" } {
+ } elseif { $op == "abort" } {
filesort $init_file $init_file.sort
filesort $final_file $final_file.sort
error_check_good \
diff(initial,post-$op):diff($init_file,$final_file) \
[filecmp $init_file.sort $final_file.sort] 0
} else {
- # Make sure this really is a prepare-only
- error_check_good assert:prepare-only $encodedop "prepare"
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
}
# Running recovery on this database should not do anything.
# Flush all data to disk, close the environment and save the
# file.
- error_check_good close:$db [$db close] 0
-
- # If all we've done is a prepare, then there's still a
- # transaction active, and an env close will return DB_RUNRECOVERY
- if { $encodedop == "prepare" } {
- catch {$env close} ret
- error_check_good env_close \
- [is_substr $ret DB_RUNRECOVERY] 1
- } else {
- reset_env $env
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
}
berkdb debug_check
- puts -nonewline "\t\tRunning recovery ... "
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
flush stdout
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
if { $stat == 1 } {
error "FAIL: Recovery error: $result."
}
puts -nonewline "complete ... "
- error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
puts "verified"
- berkdb debug_check
- set env [eval $env_cmd]
- error_check_good dbenv [is_valid_widget $env env] TRUE
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "commit" || $op2 == "commit" } {
filesort $afterop_file $afterop_file.sort
@@ -1358,11 +1674,10 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
}
berkdb debug_check
- puts -nonewline \
- "\t\tRunning recovery on pre-op database ... "
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
flush stdout
- set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
if { $stat == 1 } {
error "FAIL: Recovery error: $result."
}
@@ -1374,7 +1689,7 @@ proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
set env [eval $env_cmd]
- open_and_dump_file $dir/$dbfile NULL $tflags $final_file nop \
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
dump_file_direction "-first" "-next"
if { $op == "commit" || $op2 == "commit" } {
filesort $final_file $final_file.sort
@@ -1458,6 +1773,54 @@ proc reset_env { env } {
error_check_good env_close [$env close] 0
}
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire a write lock
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
# This routine will let us obtain a ring of deadlocks.
# Each locker will get a lock on obj_id, then sleep, and
# then try to lock (obj_id + 1) % num.
@@ -1469,7 +1832,7 @@ proc ring { myenv locker_id obj_id num } {
source ./include.tcl
if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
- puts $errorInfo
+ puts $lock1
return ERROR
} else {
error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
@@ -1482,6 +1845,7 @@ proc ring { myenv locker_id obj_id num } {
if {[string match "*DEADLOCK*" $lock2] == 1} {
set ret DEADLOCK
} else {
+ puts $lock2
set ret ERROR
}
} else {
@@ -1511,7 +1875,7 @@ proc clump { myenv locker_id obj_id num } {
set obj_id 10
if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
- puts $errorInfo
+ puts $lock1
return ERROR
} else {
error_check_good lockget:$obj_id \
@@ -1542,10 +1906,15 @@ proc clump { myenv locker_id obj_id num } {
return $ret
}
-proc dead_check { t procs dead clean other } {
+proc dead_check { t procs timeout dead clean other } {
error_check_good $t:$procs:other $other 0
switch $t {
ring {
+ # with timeouts the number of deadlocks is unpredictable
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
error_check_good $t:$procs:deadlocks $dead 1
error_check_good $t:$procs:success $clean \
[expr $procs - 1]
@@ -1555,6 +1924,26 @@ proc dead_check { t procs dead clean other } {
[expr $procs - 1]
error_check_good $t:$procs:success $clean 1
}
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
default {
error "Test $t not implemented"
}
@@ -1604,6 +1993,9 @@ proc reverse { s } {
return $res
}
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
proc is_valid_widget { w expected } {
# First N characters must match "expected"
set l [string length $expected]
@@ -1640,6 +2032,10 @@ proc is_valid_lock { lock env } {
return [is_valid_widget $lock $env.lock]
}
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
proc is_valid_mpool { mpool env } {
return [is_valid_widget $mpool $env.mp]
}
@@ -1656,11 +2052,20 @@ proc is_valid_mutex { m env } {
return [is_valid_widget $m $env.mutex]
}
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
proc send_cmd { fd cmd {sleep 2}} {
source ./include.tcl
- puts $fd "set v \[$cmd\]"
- puts $fd "puts \$v"
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
puts $fd "flush stdout"
flush $fd
berkdb debug_check
@@ -1747,6 +2152,20 @@ proc make_fixed_length {method data {pad 0}} {
return $data
}
+proc make_gid {data} {
+ while { [string length $data] < 127 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
# shift data for partial
# pad with fixed pad (which is NULL)
proc partial_shift { data offset direction} {
@@ -1785,7 +2204,9 @@ proc convert_method { method } {
switch -- $method {
-btree -
-dbtree -
+ dbtree -
-ddbtree -
+ ddbtree -
-rbtree -
BTREE -
DB_BTREE -
@@ -1799,9 +2220,12 @@ proc convert_method { method } {
rbtree { return "-btree" }
-dhash -
+ -ddhash -
-hash -
DB_HASH -
HASH -
+ dhash -
+ ddhash -
db_hash -
h -
hash { return "-hash" }
@@ -1819,7 +2243,7 @@ proc convert_method { method } {
qe -
qamext -
-queueext -
- queueextent -
+ queueextent -
queueext { return "-queue" }
-frecno -
@@ -1845,6 +2269,32 @@ proc convert_method { method } {
}
}
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
# If recno-with-renumbering or btree-with-renumbering is specified, then
# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
# -flags argument.
@@ -1856,13 +2306,15 @@ proc convert_args { method {largs ""} } {
source ./include.tcl
if { [string first - $largs] == -1 &&\
- [string compare $largs ""] != 0 } {
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
set errstring "args must contain a hyphen; does this test\
have no numeric args?"
- puts "FAIL:[timestamp] $errstring"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
return -code return
}
+ convert_encrypt $largs
if { $gen_upgrade == 1 && $upgrade_be == 1 } {
append largs " -lorder 4321 "
} elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
@@ -1880,6 +2332,9 @@ proc convert_args { method {largs ""} } {
append largs " -dupsort "
} elseif { [is_dhash $method] == 1 } {
append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
} elseif { [is_queueext $method] == 1 } {
append largs " -extent 2 "
}
@@ -1900,7 +2355,7 @@ proc is_btree { method } {
}
proc is_dbtree { method } {
- set names { -dbtree }
+ set names { -dbtree dbtree }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -1909,7 +2364,7 @@ proc is_dbtree { method } {
}
proc is_ddbtree { method } {
- set names { -ddbtree }
+ set names { -ddbtree ddbtree }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -1963,7 +2418,16 @@ proc is_hash { method } {
}
proc is_dhash { method } {
- set names { -dhash }
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
if { [lsearch $names $method] >= 0 } {
return 1
} else {
@@ -2107,6 +2571,16 @@ proc tclsleep { s } {
after [expr $s * 1000 + 56]
}
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
proc filecmp { file_a file_b } {
set fda [open $file_a r]
@@ -2133,17 +2607,47 @@ proc filecmp { file_a file_b } {
return 0
}
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
# Verify all .db files in the specified directory.
-proc verify_dir { \
- {directory "./TESTDIR"} { pref "" } { noredo 0 } { quiet 0 } } {
+proc verify_dir { {directory $testdir} \
+ { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } {
+ global encrypt
+ global passwd
+
# If we're doing database verification between tests, we don't
# want to do verification twice without an intervening cleanup--some
# test was skipped. Always verify by default (noredo == 0) so
# that explicit calls to verify_dir during tests don't require
# cleanup commands.
- if { $noredo == 1 } {
+ if { $noredo == 1 } {
if { [file exists $directory/NOREVERIFY] == 1 } {
- if { $quiet == 0 } {
+ if { $quiet == 0 } {
puts "Skipping verification."
}
return
@@ -2164,21 +2668,177 @@ proc verify_dir { \
set errpfxarg {-errpfx "FAIL: verify" }
set errarg $errfilearg$errpfxarg
set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env $errarg "
+
foreach db $dbs {
- if { [catch {eval {berkdb dbverify} $errarg $db} res] != 0 } {
+ if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } {
puts $res
puts "FAIL:[timestamp] Verification of $db failed."
set ret 1
+ continue
} else {
error_check_good verify:$db $res 0
- if { $quiet == 0 } {
+ if { $quiet == 0 } {
puts "${pref}Verification of $db succeeded."
}
}
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
}
+
+ error_check_good vrfyenv_close [$env close] 0
+
return $ret
}
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc dumploadtest { db {subdb ""} } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ # Open original database, or subdb if we have one.
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+ set max_size [expr 15 * 1024]
+ if { [string length $subdb] == 0 } {
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, dumploadtest each one
+ # separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ # Skip any files over this size. The problem is
+ # that when when we dump/load it, files that are
+ # too big result in E2BIG errors because the
+ # arguments to db_dump are too long. 64K seems
+ # to be the limit (on FreeBSD), cut it to 32K
+ # just to be safe.
+ if {[string length $subdb] < $max_size && \
+ [string length $subdb] != 0} {
+ dumploadtest $db $subdb
+ }
+ }
+ error_check_good oldcclose [$oc close] 0
+ error_check_good olddbclose [$olddb close] 0
+ return 0
+ }
+ # No subdatabase
+ set have_subdb 0
+ } else {
+ set olddb [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ set have_subdb 1
+ }
+
+ # Do a db_dump test. Dump/load each file.
+ if { $have_subdb } {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ -s {$subdb} $db | \
+ $util_path/db_load $utilflag $newdbname} res]
+ } else {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ }
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # Now open new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($db) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next] } {
+ set ndbt [$nc get -get_both \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ error_check_good db_compare($db/$newdbname) $ndbt $odbt
+ }
+
+ for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next] } {
+ set odbt [$oc get -get_both \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ error_check_good db_compare_back($db) $odbt $ndbt
+ }
+
+ error_check_good orig_cursor_close($db) [$oc close] 0
+ error_check_good new_cursor_close($db) [$nc close] 0
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ error_check_good new_db_close($db) [$newdb close] 0
+
+ eval berkdb dbremove $dbarg $newdbname
+
+ return 0
+}
+
# Generate randomly ordered, guaranteed-unique four-character strings that can
# be used to differentiate duplicates without creating duplicate duplicates.
# (test031 & test032) randstring_init is required before the first call to
@@ -2285,10 +2945,16 @@ proc extractflags { args } {
# Wrapper for berkdb open, used throughout the test suite so that we can
# set an errfile/errpfx as appropriate.
proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
set errargs {}
- if { [file exists /dev/stderr] == 1 } {
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
append errargs " -errfile /dev/stderr "
- append errargs " -errpfx \\F\\A\\I\\L "
+ append errargs " -errpfx \\F\\A\\I\\L"
}
eval {berkdb open} $errargs $args
@@ -2299,6 +2965,29 @@ proc berkdb_open_noerr { args } {
eval {berkdb open} $args
}
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
proc check_handles { {outf stdout} } {
global ohandles
@@ -2314,8 +3003,16 @@ proc open_handles { } {
}
proc move_file_extent { dir dbfile tag op } {
- set files [get_extfiles $dir $dbfile $tag]
- foreach extfile $files {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
set i [string last "." $extfile]
incr i
set extnum [string range $extfile $i end]
@@ -2378,3 +3075,135 @@ proc get_pagesize { stat } {
}
return -1
}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global src_root
+
+ if { $is_qnx_test } {
+ set small 1
+ }
+ if { $small && $is_windows_test } {
+ return [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ return [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ return \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
diff --git a/bdb/test/txn.tcl b/bdb/test/txn.tcl
deleted file mode 100644
index 904ef5fdca0..00000000000
--- a/bdb/test/txn.tcl
+++ /dev/null
@@ -1,181 +0,0 @@
-# See the file LICENSE for redistribution information.
-#
-# Copyright (c) 1996, 1997, 1998, 1999, 2000
-# Sleepycat Software. All rights reserved.
-#
-# $Id: txn.tcl,v 11.12 2000/12/31 19:26:23 bostic Exp $
-#
-# Options are:
-# -dir <directory in which to store memp>
-# -max <max number of concurrent transactions>
-# -iterations <iterations>
-# -stat
-proc txn_usage {} {
- puts "txn -dir <directory> -iterations <number of ops> \
- -max <max number of transactions> -stat"
-}
-
-proc txntest { args } {
- source ./include.tcl
-
- # Set defaults
- set iterations 50
- set max 1024
- set dostat 0
- set flags ""
- for { set i 0 } { $i < [llength $args] } {incr i} {
- switch -regexp -- [lindex $args $i] {
- -d.* { incr i; set testdir [lindex $args $i] }
- -f.* { incr i; set flags [lindex $args $i] }
- -i.* { incr i; set iterations [lindex $args $i] }
- -m.* { incr i; set max [lindex $args $i] }
- -s.* { set dostat 1 }
- default {
- puts -nonewline "FAIL:[timestamp] Usage: "
- txn_usage
- return
- }
- }
- }
- if { $max < $iterations } {
- set max $iterations
- }
-
- # Now run the various functionality tests
- txn001 $testdir $max $iterations $flags
- txn002 $testdir $max $iterations
-}
-
-proc txn001 { dir max ntxns flags} {
- source ./include.tcl
-
- puts "Txn001: Basic begin, commit, abort"
-
- # Open environment
- env_cleanup $dir
-
- set env [eval {berkdb \
- env -create -mode 0644 -txn -txn_max $max -home $dir} $flags]
- error_check_good evn_open [is_valid_env $env] TRUE
- txn001_suba $ntxns $env
- txn001_subb $ntxns $env
- txn001_subc $ntxns $env
- # Close and unlink the file
- error_check_good env_close:$env [$env close] 0
-}
-
-proc txn001_suba { ntxns env } {
- source ./include.tcl
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.a: Beginning/Committing $ntxns Transactions in $env"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-}
-
-proc txn001_subb { ntxns env } {
- # We will create a bunch of transactions and abort them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.b: Beginning/Aborting Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now abort them all
- foreach t $txn_list {
- error_check_good txn_abort:$t [$t abort] 0
- }
-}
-
-proc txn001_subc { ntxns env } {
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn001.c: Beginning/Prepare/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now prepare them all
- foreach t $txn_list {
- error_check_good txn_prepare:$t [$t prepare] 0
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
-}
-
-# Verify that read-only transactions do not create any log records
-proc txn002 { dir max ntxns } {
- source ./include.tcl
-
- puts "Txn002: Read-only transaction test"
-
- env_cleanup $dir
- set env [berkdb \
- env -create -mode 0644 -txn -txn_max $max -home $dir]
- error_check_good dbenv [is_valid_env $env] TRUE
-
- # We will create a bunch of transactions and commit them.
- set txn_list {}
- set tid_list {}
- puts "Txn002.a: Beginning/Committing Transactions"
- for { set i 0 } { $i < $ntxns } { incr i } {
- set txn [$env txn]
- error_check_good txn_begin [is_valid_txn $txn $env] TRUE
-
- lappend txn_list $txn
-
- set tid [$txn id]
- error_check_good tid_check [lsearch $tid_list $tid] -1
-
- lappend tid_list $tid
- }
-
- # Now commit them all
- foreach t $txn_list {
- error_check_good txn_commit:$t [$t commit] 0
- }
-
- # Now verify that there aren't any log records.
- set r [$env log_get -first]
- error_check_good log_get:$r [llength $r] 0
-
- error_check_good env_close:$r [$env close] 0
-}
diff --git a/bdb/test/txn001.tcl b/bdb/test/txn001.tcl
new file mode 100644
index 00000000000..406ef35751c
--- /dev/null
+++ b/bdb/test/txn001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn001.tcl,v 11.35 2002/05/10 17:44:28 sue Exp $
+#
+
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "01"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "01"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/bdb/test/txn002.tcl b/bdb/test/txn002.tcl
new file mode 100644
index 00000000000..5107472644d
--- /dev/null
+++ b/bdb/test/txn002.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn002.tcl,v 11.38 2002/05/10 17:44:29 sue Exp $
+#
+
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "02" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "02" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/bdb/test/txn003.tcl b/bdb/test/txn003.tcl
new file mode 100644
index 00000000000..71e450cf9ce
--- /dev/null
+++ b/bdb/test/txn003.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn003.tcl,v 11.40 2002/09/05 17:23:08 sandstro Exp $
+#
+
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "03"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Outstanding child transaction test"
+
+ if { $tnum != "03" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put -auto_commit $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn0$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn0$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn0$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ # We can't do the attempted child discard on Windows
+ # because it will leave open files that can't be removed.
+ # Skip the remainder of the test for Windows.
+ if { $is_windows_test == 1 } {
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+ puts "\tTxn0$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn0$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ set stat [catch {$env txn} ret]
+ error_check_good begin_error $stat 1
+ error_check_good begin_err \
+ [is_substr $ret "not yet committed transactions is incomplete"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/bdb/test/txn004.tcl b/bdb/test/txn004.tcl
new file mode 100644
index 00000000000..75e1b40043f
--- /dev/null
+++ b/bdb/test/txn004.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn004.tcl,v 11.39 2002/05/15 17:14:06 sandstro Exp $
+#
+
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "04.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "04.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/bdb/test/txn005.tcl b/bdb/test/txn005.tcl
new file mode 100644
index 00000000000..604f3ad7de4
--- /dev/null
+++ b/bdb/test/txn005.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn005.tcl,v 11.35 2002/08/08 15:38:14 bostic Exp $
+#
+
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/bdb/test/txn006.tcl b/bdb/test/txn006.tcl
new file mode 100644
index 00000000000..7bf37d34dfc
--- /dev/null
+++ b/bdb/test/txn006.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn006.tcl,v 1.5 2002/08/01 19:59:19 sue Exp $
+#
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/bdb/test/txn007.tcl b/bdb/test/txn007.tcl
new file mode 100644
index 00000000000..f67dc209f92
--- /dev/null
+++ b/bdb/test/txn007.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn007.tcl,v 11.3 2002/08/08 15:38:14 bostic Exp $
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/bdb/test/txn008.tcl b/bdb/test/txn008.tcl
new file mode 100644
index 00000000000..ad57ea0eeaa
--- /dev/null
+++ b/bdb/test/txn008.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn008.tcl,v 11.3 2002/05/10 17:55:54 sue Exp $
+#
+
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "08.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "08.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/bdb/test/txn009.tcl b/bdb/test/txn009.tcl
new file mode 100644
index 00000000000..784c0068a41
--- /dev/null
+++ b/bdb/test/txn009.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn009.tcl,v 11.3 2002/05/10 17:55:55 sue Exp $
+#
+
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "09.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "09.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/bdb/test/txnscript.tcl b/bdb/test/txnscript.tcl
new file mode 100644
index 00000000000..1a4a1b6f2ec
--- /dev/null
+++ b/bdb/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txnscript.tcl,v 11.3 2002/01/23 15:33:40 bostic Exp $
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/bdb/test/update.tcl b/bdb/test/update.tcl
index 81fc9ba9e2c..2bedfacc793 100644
--- a/bdb/test/update.tcl
+++ b/bdb/test/update.tcl
@@ -1,9 +1,10 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: update.tcl,v 11.9 2000/10/27 13:23:56 sue Exp $
+# $Id: update.tcl,v 11.11 2002/01/11 15:53:58 bostic Exp $
+
source ./include.tcl
global update_dir
set update_dir "$test_path/update_test"
diff --git a/bdb/test/upgrade.tcl b/bdb/test/upgrade.tcl
index 0d2f656bcf9..1c0ffc5461a 100644
--- a/bdb/test/upgrade.tcl
+++ b/bdb/test/upgrade.tcl
@@ -1,9 +1,9 @@
# See the file LICENSE for redistribution information.
#
-# Copyright (c) 1999, 2000
+# Copyright (c) 1999-2002
# Sleepycat Software. All rights reserved.
#
-# $Id: upgrade.tcl,v 11.16 2000/10/27 13:23:56 sue Exp $
+# $Id: upgrade.tcl,v 11.22 2002/07/28 03:22:41 krinsky Exp $
source ./include.tcl
@@ -17,6 +17,7 @@ set gen_upgrade 0
global upgrade_dir
global upgrade_be
global upgrade_method
+global upgrade_name
proc upgrade { { archived_test_loc "DEFAULT" } } {
source ./include.tcl
@@ -40,7 +41,7 @@ proc upgrade { { archived_test_loc "DEFAULT" } } {
foreach file [glob $upgrade_dir/$version/$method/*] {
regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
- cleanup $testdir NULL
+ cleanup $testdir NULL 1
#puts "$upgrade_dir/$version/$method/$name.tar.gz"
set curdir [pwd]
cd $testdir
@@ -109,6 +110,8 @@ proc _upgrade_test { temp_dir version method file endianness } {
set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
error_check_good dbupgrade $ret 0
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
error_check_good "Upgrade diff.$endianness: $version $method $file" \
@@ -138,31 +141,41 @@ proc gen_upgrade { dir } {
global upgrade_dir
global upgrade_be
global upgrade_method
- global runtests
+ global upgrade_name
+ global num_test
+ global parms
source ./include.tcl
set gen_upgrade 1
set upgrade_dir $dir
- foreach upgrade_be { 0 1 } {
- foreach i "btree rbtree hash recno rrecno queue frecno" {
- puts "Running $i tests"
- set upgrade_method $i
- set start 1
- for { set j $start } { $j <= $runtests } {incr j} {
+ foreach i "btree rbtree hash recno rrecno frecno queue queueext" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $num_test(test) } { incr j } {
+ set upgrade_name [format "test%03d" $j]
+ if { [info exists parms($upgrade_name)] != 1 } {
+ continue
+ }
+
+ foreach upgrade_be { 0 1 } {
if [catch {exec $tclsh_path \
<< "source $test_path/test.tcl;\
- global upgrade_be;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ set gen_upgrade 1;\
set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
run_method -$i $j $j"} res] {
- puts "FAIL: [format "test%03d" $j] $i"
+ puts "FAIL: $upgrade_name $i"
}
puts $res
- cleanup $testdir NULL
+ cleanup $testdir NULL 1
}
}
}
-
set gen_upgrade 0
}
@@ -241,6 +254,8 @@ proc upgrade_dump { database file {stripnulls 0} } {
}
close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
}
proc _comp { a b } {
diff --git a/bdb/test/upgrade/README b/bdb/test/upgrade/README
deleted file mode 100644
index 1afada2ecf4..00000000000
--- a/bdb/test/upgrade/README
+++ /dev/null
@@ -1,85 +0,0 @@
- The Berkeley DB Upgrade Tests
-
-Quick ref:
-
- Running the tests:
- (in tclsh)
- % source ../test/test.tcl
- % upgrade
-
- Generating the test databases:
- (in tclsh)
- % source ../test/test.tcl
- % gen_upgrade /where/you/want/them
-
- (in your shell)
- $ cd /where/you/want/them
- $ perl $db_dir/upgrade/scripts/pack-3.0.pl
- $ mv 3.0 $db_dir/upgrade/databases
-
-What they are:
-
-The DB upgrade tests are a framework for testing two main features of
-Berkeley DB: the db_dump utility, and the "DB_UPGRADE" flag to DB->open.
-They work by taking a tarred, gzipped set of test databases and dumps, and
-verifying that the set of items is the same in the original database (as
-dumped by the version of DB that created it) as in the upgraded one,
-and is the same in the original database and in a new database generated by
-db_loading a db_dump.
-
-In db 3.X and higher, the upgrade test is repeated on a database with
-the opposite endianness to the system the database was generated on.
-
-How to generate test databases:
-
-Ordinarily, this is something that only very rarely has to occur;
-an archive of upgrade test databases can and should be kept, so ideally
-the generation step only needs to be done once for each major DB release.
-
-To generate the test databases, execute the command "gen_upgrade <dir>"
-inside a tclsh. The method tests will run twice, once for each endianness,
-and all the databases will be saved in a hierarchy named by <dir>.
-
-Once the databases have been built, the archives expected by the upgrade tests
-must be built using the "pack" script, in upgrade/scripts/pack-<version>.pl.
-This script must be edited slightly to specify the location on a given system
-of the DB source tree and utilities; it then converts the set of databases
-under the current working directory into a set of .tar.gz files containing
-the databases as well as flat files with their contents in item-by-item and
-db_dump formats.
-
-How to run the upgrade tests:
-
-Run "upgrade" from tclsh in the DB build directory. By default, this
-looks in upgrade/databases, in the DB source tree. An optional first argument
-can be used to specify an alternate directory.
-
-A note on 2.X tests:
-
-The 2.X packing script, as well as a patch against a 2.6.6 test directory
-to allow it to generate test databases, is in upgrade/generate-2.X.
-
-Note that the upgrade tests can be *run* on an the 2.X test archives
-without anything in this directory. It is provided only for
-archival reasons, in case there is ever reason to generate a new
-set of test databases.
-
-XXX: Note also that it quite likely has paths hard-coded for a specific
-system that is not yours.
-
-Known Issues:
-
-1. The following 2.X databases trigger a bug in the db 2.X hash code.
-This bug affects only empty and near-empty databases, and has been
-corrected in db 3.X, but it will prevent the following from passing
-the db_dump test. (They have been removed from the canonical database
-collection.)
-
- 2.X hash -- test026
- 2.X hash -- test038
- 2.X hash -- test039
- 2.X hash -- test040
-
-2. The 2.X recno versions of test043 cannot be made to pass the db_dump
-test because the 2.X version of db_dump has no -k flag and cannot preserve
-sparsely populated databases.
diff --git a/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl b/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
deleted file mode 100644
index f031d46ca62..00000000000
--- a/bdb/test/upgrade/generate-2.X/pack-2.6.6.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use Archive::Tar;
-
-my $subdir;
-my $file;
-my $archive_name;
-
-my $version = "2.6.6";
-my $build_dir = "/work/db/upgrade/db-2.6.6/build_unix";
-my $db_dump_path = "$build_dir/db_dump";
-my $pwd = `pwd`;
-
-$| = 1;
-
-chomp( $pwd );
-
-opendir( DIR, $version . "le" ) || die;
-while( $subdir = readdir( DIR ) )
-{
- if( $subdir !~ m{^\.\.?$} )
- {
- opendir( SUBDIR, $version . "le/$subdir" ) || die;
- while( $file = readdir( SUBDIR ) )
- {
- if( $file !~ m{^\.\.?$} )
- {
- print "[" . localtime() . "] " . "$subdir $file", "\n";
-
- eval
- {
- my $data;
- my $archive;
-
- system( "mkdir", "-p", "$version/$subdir" );
- $file =~ m{(.*)\.};
- $archive_name = "$1";
- $archive_name =~ s{Test}{test};
- $archive = Archive::Tar->new();
- $archive->add_data( "$archive_name-le.db",
- read_file( $version . "le/$subdir/$file" ) );
-# $archive->add_data( "$archive_name-be.db",
-# read_file( $version . "be/$subdir/$file" ) );
- $archive->add_data( "$archive_name.dump",
- db_dump( "$pwd/$version" . "le/$subdir/$file" ) );
- $data = tcl_dump( "$pwd/$version" . "le/$subdir/$file" );
- $archive->add_data( "$archive_name.tcldump", $data );
- $archive->write( "$version/$subdir/$archive_name.tar.gz", 9 );
- };
- if( $@ )
- {
- print( "Could not process $file: $@\n" );
- }
- }
- }
- }
-}
-
-sub read_file
-{
- my ($file) = @_;
- my $data;
-
- open( FILE, "<$file" ) || die;
- read( FILE, $data, -s $file );
- close( file );
-
- return $data;
-}
-
-sub db_dump
-{
- my ($file) = @_;
-
- #print $file, "\n";
- unlink( "temp.dump" );
- system( "sh", "-c", "$db_dump_path $file >temp.dump" ) && die;
- if( -e "temp.dump" )
- {
- return read_file( "temp.dump" );
- }
- else
- {
- die "db_dump failure: $file\n";
- }
-}
-
-sub tcl_dump
-{
- my ($file) = @_;
- my $up_dump_args = "";
-
- if ($file =~ /test012/) {
- $up_dump_args .= "1";
- }
-
- unlink( "temp.dump" );
- open( TCL, "|$build_dir/dbtest" );
-print TCL <<END;
-cd $build_dir
-source ../test/test.tcl
-upgrade_dump $file $pwd/temp.dump $up_dump_args
-END
- close( TCL );
- if( -e "temp.dump" )
- {
- return read_file( "temp.dump" );
- }
- else
- {
- die "TCL dump failure: $file\n";
- }
-}
diff --git a/bdb/test/upgrade/generate-2.X/test-2.6.patch b/bdb/test/upgrade/generate-2.X/test-2.6.patch
deleted file mode 100644
index 557e8061eae..00000000000
--- a/bdb/test/upgrade/generate-2.X/test-2.6.patch
+++ /dev/null
@@ -1,379 +0,0 @@
-diff -crN test.orig/test.tcl test/test.tcl
-*** test.orig/test.tcl Fri Dec 11 14:56:26 1998
---- test/test.tcl Mon Oct 4 15:26:16 1999
-***************
-*** 8,13 ****
---- 8,14 ----
- source ./include.tcl
- source ../test/testutils.tcl
- source ../test/byteorder.tcl
-+ source ../test/upgrade.tcl
-
- set testdir ./TESTDIR
- if { [file exists $testdir] != 1 } {
-***************
-*** 114,119 ****
---- 115,124 ----
- global debug_print
- global debug_on
- global runtests
-+
-+ global __method
-+ set __method $method
-+
- if { $stop == 0 } {
- set stop $runtests
- }
-diff -crN test.orig/testutils.tcl test/testutils.tcl
-*** test.orig/testutils.tcl Tue Dec 15 07:58:51 1998
---- test/testutils.tcl Wed Oct 6 17:40:45 1999
-***************
-*** 680,690 ****
---- 680,698 ----
-
- proc cleanup { dir } {
- source ./include.tcl
-+ global __method
-+ global errorInfo
- # Remove the database and environment.
- txn_unlink $dir 1
- memp_unlink $dir 1
- log_unlink $dir 1
- lock_unlink $dir 1
-+
-+ catch { exec mkdir -p /work/upgrade/2.6/$__method } res
-+ puts $res
-+ catch { exec sh -c "mv $dir/*.db /work/upgrade/2.6/$__method" } res
-+ puts $res
-+
- set ret [catch { glob $dir/* } result]
- if { $ret == 0 } {
- eval exec $RM -rf $result
-diff -crN test.orig/upgrade.tcl test/upgrade.tcl
-*** test.orig/upgrade.tcl Wed Dec 31 19:00:00 1969
---- test/upgrade.tcl Mon Oct 18 21:22:39 1999
-***************
-*** 0 ****
---- 1,322 ----
-+ # See the file LICENSE for redistribution information.
-+ #
-+ # Copyright (c) 1999
-+ # Sleepycat Software. All rights reserved.
-+ #
-+ # @(#)upgrade.tcl 11.1 (Sleepycat) 8/23/99
-+ #
-+ source ./include.tcl
-+ global gen_upgrade
-+ set gen_upgrade 0
-+ global upgrade_dir
-+ set upgrade_dir "/work/upgrade/DOTEST"
-+ global upgrade_be
-+ global upgrade_method
-+
-+ proc upgrade { } {
-+ source ./include.tcl
-+ global upgrade_dir
-+
-+ foreach version [glob $upgrade_dir/*] {
-+ regexp \[^\/\]*$ $version version
-+ foreach method [glob $upgrade_dir/$version/*] {
-+ regexp \[^\/\]*$ $method method
-+ foreach file [glob $upgrade_dir/$version/$method/*] {
-+ puts $file
-+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
-+ foreach endianness {"le" "be"} {
-+ puts "Update: $version $method $name $endianness"
-+ set ret [catch {_upgrade $upgrade_dir $testdir $version $method $name $endianness 1 1} message]
-+ if { $ret != 0 } {
-+ puts $message
-+ }
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ proc _upgrade { source_dir temp_dir version method file endianness do_db_load_test do_upgrade_test } {
-+ source include.tcl
-+ global errorInfo
-+
-+ cleanup $temp_dir
-+
-+ exec tar zxf "$source_dir/$version/$method/$file.tar.gz" -C $temp_dir
-+
-+ if { $do_db_load_test } {
-+ set ret [catch \
-+ {exec ./db_load -f "$temp_dir/$file.dump" \
-+ "$temp_dir/upgrade.db"} message]
-+ error_check_good \
-+ "Update load: $version $method $file $message" $ret 0
-+
-+ set ret [catch \
-+ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
-+ "$temp_dir/upgrade.db"} message]
-+ error_check_good \
-+ "Update dump: $version $method $file $message" $ret 0
-+
-+ error_check_good "Update diff.1.1: $version $method $file" \
-+ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
-+ error_check_good "Update diff.1.2: $version $method $file" $ret ""
-+ }
-+
-+ if { $do_upgrade_test } {
-+ set ret [catch {berkdb open -upgrade "$temp_dir/$file-$endianness.db"} db]
-+ if { $ret == 1 } {
-+ if { ![is_substr $errorInfo "version upgrade"] } {
-+ set fnl [string first "\n" $errorInfo]
-+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
-+ error $theError
-+ }
-+ } else {
-+ error_check_good dbopen [is_valid_db $db] TRUE
-+ error_check_good dbclose [$db close] 0
-+
-+ set ret [catch \
-+ {exec ./db_dump -f "$temp_dir/upgrade.dump" \
-+ "$temp_dir/$file-$endianness.db"} message]
-+ error_check_good \
-+ "Update dump: $version $method $file $message" $ret 0
-+
-+ error_check_good "Update diff.2: $version $method $file" \
-+ [catch { exec $CMP "$temp_dir/$file.dump" "$temp_dir/upgrade.dump" } ret] 0
-+ error_check_good "Update diff.2: $version $method $file" $ret ""
-+ }
-+ }
-+ }
-+
-+ proc gen_upgrade { dir } {
-+ global gen_upgrade
-+ global upgrade_dir
-+ global upgrade_be
-+ global upgrade_method
-+ global __method
-+ global runtests
-+ source ./include.tcl
-+ set tclsh_path "/work/db/upgrade/db-2.6.6/build_unix/dbtest"
-+
-+ set gen_upgrade 1
-+ set upgrade_dir $dir
-+
-+ foreach upgrade_be { 0 1 } {
-+ foreach i "rrecno" {
-+ # "hash btree rbtree hash recno rrecno"
-+ puts "Running $i tests"
-+ set upgrade_method $i
-+ for { set j 1 } { $j <= $runtests } {incr j} {
-+ if [catch {exec $tclsh_path \
-+ << "source ../test/test.tcl; \
-+ run_method $i $j $j"} res] {
-+ puts "FAIL: [format "test%03d" $j] $i"
-+ }
-+ puts $res
-+ set __method $i
-+ cleanup $testdir
-+ }
-+ }
-+ }
-+
-+ set gen_upgrade 0
-+ }
-+
-+ proc upgrade_dump { database file {with_binkey 0} } {
-+ source ./include.tcl
-+ global errorInfo
-+
-+ set is_recno 0
-+
-+ set db [dbopen $database 0 0600 DB_UNKNOWN]
-+ set dbc [$db cursor 0]
-+
-+ set f [open $file w+]
-+ fconfigure $f -encoding binary -translation binary
-+
-+ #
-+ # Get a sorted list of keys
-+ #
-+ set key_list ""
-+ if { [catch {set pair [$dbc get "" $DB_FIRST]}] != 0 } {
-+ set pair [$dbc get 0 $DB_FIRST]
-+ set is_recno 1
-+ }
-+
-+ while { 1 } {
-+ if { [llength $pair] == 0 } {
-+ break
-+ }
-+ lappend key_list [list [lindex $pair 0]]
-+ set pair [$dbc get 0 $DB_NEXT]
-+ }
-+
-+
-+ # Discard duplicated keys; we now have a key for each
-+ # duplicate, not each unique key, and we don't want to get each
-+ # duplicate multiple times when we iterate over key_list.
-+ set uniq_keys {}
-+ foreach key $key_list {
-+ if { [info exists existence_list($key)] == 0 } {
-+ lappend uniq_keys [list $key]
-+ }
-+ set existence_list($key) 1
-+ }
-+ set key_list $uniq_keys
-+
-+ set key_list [lsort -command _comp $key_list]
-+
-+ #foreach llave $key_list {
-+ # puts $llave
-+ #}
-+
-+ #
-+ # Get the data for each key
-+ #
-+
-+ for { set i 0 } { $i < [llength $key_list] } { incr i } {
-+ set key [concat [lindex $key_list $i]]
-+ # XXX Gross awful hack. We want to DB_SET in the vast
-+ # majority of cases, but DB_SET can't handle binary keys
-+ # in the 2.X Tcl interface. So we look manually and linearly
-+ # for the key we want if with_binkey == 1.
-+ if { $with_binkey != 1 } {
-+ set pair [$dbc get $key $DB_SET]
-+ } else {
-+ set pair [_search_binkey $key $dbc]
-+ }
-+ if { $is_recno != 1 } {
-+ set key [upgrade_convkey $key $dbc]
-+ }
-+ #puts "pair:$pair:[lindex $pair 1]"
-+ set data [lindex $pair 1]
-+ set data [upgrade_convdata $data $dbc]
-+ set data_list [list $data]
-+ catch { while { $is_recno == 0 } {
-+ set pair [$dbc get 0 $DB_NEXT_DUP]
-+ if { [llength $pair] == 0 } {
-+ break
-+ }
-+
-+ set data [lindex $pair 1]
-+ set data [upgrade_convdata $data $dbc]
-+ lappend data_list [list $data]
-+ } }
-+ set data_list [lsort -command _comp $data_list]
-+ puts -nonewline $f [binary format i [string length $key]]
-+ puts -nonewline $f $key
-+ puts -nonewline $f [binary format i [llength $data_list]]
-+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
-+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
-+ puts -nonewline $f [concat [lindex $data_list $j]]
-+ }
-+ }
-+
-+ close $f
-+ }
-+
-+ proc _comp { a b } {
-+ # return expr [[concat $a] < [concat $b]]
-+ return [string compare [concat $a] [concat $b]]
-+ }
-+
-+ # Converts a key to the format of keys in the 3.X Tcl interface
-+ proc upgrade_convkey { key dbc } {
-+ source ./include.tcl
-+
-+ # Stick a null on the end.
-+ set k "$key\0"
-+
-+ set tmp $testdir/gb0
-+
-+ # Attempt a dbc getbinkey to get any additional parts of the key.
-+ set dbt [$dbc getbinkey $tmp 0 $DB_CURRENT]
-+
-+ set tmpid [open $tmp r]
-+ fconfigure $tmpid -encoding binary -translation binary
-+ set cont [read $tmpid]
-+
-+ set k $k$cont
-+
-+ close $tmpid
-+
-+ exec $RM -f $tmp
-+
-+ return $k
-+ }
-+
-+ # Converts a datum to the format of data in the 3.X Tcl interface
-+ proc upgrade_convdata { data dbc } {
-+ source ./include.tcl
-+ set is_partial 0
-+
-+ # Get the datum out of "data"
-+ if { [llength $data] == 1 } {
-+ set d [lindex $data 0]
-+ } elseif { [llength $data] == 2 } {
-+ # It was a partial return; the first arg is the number of nuls
-+ set d [lindex $data 1]
-+ set numnul [lindex $data 0]
-+ while { $numnul > 0 } {
-+ set d "\0$d"
-+ incr numnul -1
-+ }
-+
-+ # The old Tcl getbin and the old Tcl partial put
-+ # interface are incompatible; we'll wind up returning
-+ # the datum twice if we try a getbin now. So
-+ # set a flag to avoid it.
-+ set is_partial 1
-+
-+ } else {
-+ set d $data
-+ }
-+
-+
-+ if { $is_partial != 1 } {
-+
-+ # Stick a null on the end.
-+ set d "$d\0"
-+
-+ set tmp $testdir/gb1
-+
-+ # Attempt a dbc getbin to get any additional parts of the datum
-+ # the Tcl interface has neglected.
-+ set dbt [$dbc getbin $tmp 0 $DB_CURRENT]
-+
-+ set tmpid [open $tmp r]
-+ fconfigure $tmpid -encoding binary -translation binary
-+ set cont [read $tmpid]
-+
-+ set d $d$cont
-+
-+ #puts "$data->$d"
-+
-+ close $tmpid
-+ }
-+
-+ return [list $d]
-+ }
-+
-+ # Implement the DB_SET functionality, stupidly, in terms of DB_NEXT and
-+ # manual comparisons. We have to use this instead of DB_SET with
-+ # binary keys, as the old Tcl interface can't handle binary keys but DB_SET
-+ # requires them. So instead, we page through using DB_NEXT, which returns
-+ # the binary keys only up to the first null, and compare to our specified
-+ # key, which is similarly truncated.
-+ #
-+ # This is really slow, but is seldom used.
-+ proc _search_binkey { key dbc } {
-+ #puts "doing _search_binkey $key $dbc"
-+ source ./include.tcl
-+ set dbt [$dbc get 0 $DB_FIRST]
-+ while { [llength $dbt] != 0 } {
-+ set curkey [lindex $dbt 0]
-+ if { [string compare $key $curkey] == 0 } {
-+ return $dbt
-+ }
-+ set dbt [$dbc get 0 $DB_NEXT]
-+ }
-+
-+ # We didn't find it. Return an empty list.
-+ return {}
-+ }
diff --git a/bdb/test/wrap.tcl b/bdb/test/wrap.tcl
index 4a5c825d8f0..aaceb4f74e6 100644
--- a/bdb/test/wrap.tcl
+++ b/bdb/test/wrap.tcl
@@ -1,12 +1,19 @@
-# Sentinel file wrapper for multi-process tests.
-# This is designed to avoid a set of nasty bugs, primarily on Windows,
-# where pid reuse causes watch_procs to sit around waiting for some
-# random process that's not DB's and is not exiting.
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: wrap.tcl,v 11.6 2002/04/25 13:35:02 bostic Exp $
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
source ./include.tcl
+source $test_path/testutils.tcl
# Arguments:
-#
if { $argc < 3 } {
puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
exit
@@ -33,13 +40,17 @@ set childsentinel $testdir/begin.$childpid
set f [open $childsentinel w]
close $f
+puts $t "source $test_path/test.tcl"
+puts $t "set script $script"
+
# Set up argv for the subprocess, since the args aren't passed in as true
# arguments thanks to the pipe structure.
puts $t "set argc [llength $args]"
puts $t "set argv [list $args]"
-# Command the test to run.
-puts $t "source $test_path/$script"
+puts $t {set ret [catch { source $test_path/$script } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
# Close the pipe. This will flush the above commands and actually run the
# test, and will also return an error a la exec if anything bad happens
@@ -55,4 +66,6 @@ close $f
set f [open $testdir/end.$parentpid w]
close $f
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
exit $ret
diff --git a/bdb/txn/txn.c b/bdb/txn/txn.c
index 0f6d894c19b..06fc8264afd 100644
--- a/bdb/txn/txn.c
+++ b/bdb/txn/txn.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -39,11 +39,12 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: txn.c,v 11.61 2001/01/10 18:18:52 bostic Exp $";
+static const char revid[] = "$Id: txn.c,v 11.179 2002/08/29 17:41:17 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <stdlib.h>
#if TIME_WITH_SYS_TIME
#include <sys/time.h>
@@ -59,58 +60,129 @@ static const char revid[] = "$Id: txn.c,v 11.61 2001/01/10 18:18:52 bostic Exp $
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_shash.h"
-#include "txn.h"
-#include "lock.h"
-#include "log.h"
-#include "db_dispatch.h"
-#include "db_page.h"
-#include "db_ext.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/crypto.h"
+#include "dbinc/hmac.h"
+#include "dbinc/db_page.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/hash.h"
+#include "dbinc/lock.h"
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+
+#define SET_LOG_FLAGS(dbenv, txnp, lflags) \
+ do { \
+ lflags = DB_COMMIT | DB_PERMANENT; \
+ if (F_ISSET(txnp, TXN_SYNC)) \
+ lflags |= DB_FLUSH; \
+ else if (!F_ISSET(txnp, TXN_NOSYNC) && \
+ !F_ISSET(dbenv, DB_ENV_TXN_NOSYNC)) { \
+ if (F_ISSET(dbenv, DB_ENV_TXN_WRITE_NOSYNC)) \
+ lflags |= DB_WRNOSYNC; \
+ else \
+ lflags |= DB_FLUSH; \
+ } \
+ } while (0)
-static int __txn_begin __P((DB_TXN *));
-static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, u_int32_t));
+/*
+ * __txn_isvalid enumerated types. We cannot simply use the transaction
+ * statuses, because different statuses need to be handled differently
+ * depending on the caller.
+ */
+typedef enum {
+ TXN_OP_ABORT,
+ TXN_OP_COMMIT,
+ TXN_OP_DISCARD,
+ TXN_OP_PREPARE
+} txnop_t;
+
+static int __txn_begin_int __P((DB_TXN *, int));
+static int __txn_end __P((DB_TXN *, int));
+static int __txn_isvalid __P((const DB_TXN *, TXN_DETAIL **, txnop_t));
+static int __txn_set_timeout __P(( DB_TXN *, db_timeout_t, u_int32_t));
static int __txn_undo __P((DB_TXN *));
+#ifndef db_create
/*
+ * txn_abort --
* txn_begin --
- * This is a wrapper to the actual begin process. Normal txn_begin()
- * allocates a DB_TXN structure for the caller, while txn_xa_begin() does
- * not. Other than that, both call into the common __txn_begin code().
+ * txn_commit --
+ *
+ * When we switched to methods in 4.0, we guessed txn_{abort,begin,commit}
+ * were the interfaces applications would likely use and not be willing to
+ * change, due to the sheer volume of the calls. Provide wrappers -- we
+ * could do txn_abort and txn_commit using macros, but not txn_begin, as
+ * the name of the field is txn_begin, we didn't want to modify it.
+ *
+ * The issue with txn_begin hits us in another way. If configured with the
+ * --with-uniquename option, we use #defines to re-define DB's interfaces
+ * to unique names. We can't do that for these functions because txn_begin
+ * is also a field name in the DB_ENV structure, and the #defines we use go
+ * at the end of the db.h file -- we get control too late to #define a field
+ * name. So, modify the script that generates the unique names #defines to
+ * not generate them for these three functions, and don't include the three
+ * functions in libraries built with that configuration option.
+ *
+ * EXTERN: int txn_abort __P((DB_TXN *));
+ * EXTERN: int txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
+ * EXTERN: int txn_commit __P((DB_TXN *, u_int32_t));
+ */
+int
+txn_abort(txnp)
+ DB_TXN *txnp;
+{
+ return (txnp->abort(txnp));
+}
+
+int
+txn_begin(dbenv, parent, txnpp, flags)
+ DB_ENV *dbenv;
+ DB_TXN *parent, **txnpp;
+ u_int32_t flags;
+{
+ return (dbenv->txn_begin(dbenv, parent, txnpp, flags));
+}
+
+int
+txn_commit(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ return (txnp->commit(txnp, flags));
+}
+#endif /* !db_create */
+
+/*
+ * __txn_begin --
+ * This is a wrapper to the actual begin process. Normal transaction
+ * begin allocates a DB_TXN structure for the caller, while XA transaction
+ * begin does not. Other than that, both call into common __txn_begin_int
+ * code.
*
* Internally, we use TXN_DETAIL structures, but the DB_TXN structure
* provides access to the transaction ID and the offset in the transaction
* region of the TXN_DETAIL structure.
+ *
+ * PUBLIC: int __txn_begin __P((DB_ENV *, DB_TXN *, DB_TXN **, u_int32_t));
*/
int
-txn_begin(dbenv, parent, txnpp, flags)
+__txn_begin(dbenv, parent, txnpp, flags)
DB_ENV *dbenv;
DB_TXN *parent, **txnpp;
u_int32_t flags;
{
+ DB_LOCKREGION *region;
DB_TXN *txn;
int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_begin(dbenv, parent, txnpp, flags));
-#endif
-
+ *txnpp = NULL;
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_begin", DB_INIT_TXN);
if ((ret = __db_fchk(dbenv,
"txn_begin", flags,
- DB_TXN_NOWAIT | DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0)
+ DB_DIRTY_READ | DB_TXN_NOWAIT |
+ DB_TXN_NOSYNC | DB_TXN_SYNC)) != 0)
return (ret);
if ((ret = __db_fcchk(dbenv,
"txn_begin", flags, DB_TXN_NOSYNC, DB_TXN_SYNC)) != 0)
@@ -122,7 +194,10 @@ txn_begin(dbenv, parent, txnpp, flags)
txn->mgrp = dbenv->tx_handle;
txn->parent = parent;
TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
txn->flags = TXN_MALLOC;
+ if (LF_ISSET(DB_DIRTY_READ))
+ F_SET(txn, TXN_DIRTY_READ);
if (LF_ISSET(DB_TXN_NOSYNC))
F_SET(txn, TXN_NOSYNC);
if (LF_ISSET(DB_TXN_SYNC))
@@ -130,15 +205,41 @@ txn_begin(dbenv, parent, txnpp, flags)
if (LF_ISSET(DB_TXN_NOWAIT))
F_SET(txn, TXN_NOWAIT);
- if ((ret = __txn_begin(txn)) != 0) {
- __os_free(txn, sizeof(DB_TXN));
- txn = NULL;
- }
+ if ((ret = __txn_begin_int(txn, 0)) != 0)
+ goto err;
- if (txn != NULL && parent != NULL)
+ if (parent != NULL)
TAILQ_INSERT_HEAD(&parent->kids, txn, klinks);
+ if (LOCKING_ON(dbenv)) {
+ region = ((DB_LOCKTAB *)dbenv->lk_handle)->reginfo.primary;
+ if (parent != NULL) {
+ ret = __lock_inherit_timeout(dbenv,
+ parent->txnid, txn->txnid);
+ /* No parent locker set yet. */
+ if (ret == EINVAL) {
+ parent = NULL;
+ ret = 0;
+ }
+ if (ret != 0)
+ goto err;
+ }
+
+ /*
+ * Parent is NULL if we have no parent
+ * or it has no timeouts set.
+ */
+ if (parent == NULL && region->tx_timeout != 0)
+ if ((ret = __lock_set_timeout(dbenv, txn->txnid,
+ region->tx_timeout, DB_SET_TXN_TIMEOUT)) != 0)
+ goto err;
+ }
+
*txnpp = txn;
+ return (0);
+
+err:
+ __os_free(dbenv, txn);
return (ret);
}
@@ -158,26 +259,60 @@ __txn_xa_begin(dbenv, txn)
memset(txn, 0, sizeof(DB_TXN));
txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
- return (__txn_begin(txn));
+ return (__txn_begin_int(txn, 0));
}
/*
- * __txn_begin --
+ * __txn_compensate_begin
+ * Begin an compensation transaction. This is a special interface
+ * that is used only for transactions that must be started to compensate
+ * for actions during an abort. Currently only used for allocations.
+ *
+ * PUBLIC: int __txn_compensate_begin __P((DB_ENV *, DB_TXN **txnp));
+ */
+int
+__txn_compensate_begin(dbenv, txnpp)
+ DB_ENV *dbenv;
+ DB_TXN **txnpp;
+{
+ DB_TXN *txn;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_TXN), &txn)) != 0)
+ return (ret);
+
+ txn->mgrp = dbenv->tx_handle;
+ TAILQ_INIT(&txn->kids);
+ TAILQ_INIT(&txn->events);
+ txn->flags = TXN_MALLOC;
+ F_SET(txn, TXN_COMPENSATE);
+
+ *txnpp = txn;
+ return (__txn_begin_int(txn, 1));
+}
+
+/*
+ * __txn_begin_int --
* Normal DB version of txn_begin.
*/
static int
-__txn_begin(txn)
+__txn_begin_int(txn, internal)
DB_TXN *txn;
+ int internal;
{
DB_ENV *dbenv;
- DB_LSN begin_lsn;
+ DB_LSN begin_lsn, null_lsn;
DB_TXNMGR *mgr;
DB_TXNREGION *region;
TXN_DETAIL *td;
size_t off;
- u_int32_t id;
- int ret;
+ u_int32_t id, *ids;
+ int nids, ret;
mgr = txn->mgrp;
dbenv = mgr->dbenv;
@@ -188,36 +323,73 @@ __txn_begin(txn)
* need never write records for read-only transactions). However,
* we do need to find the current LSN so that we can store it in the
* transaction structure, so we can know where to take checkpoints.
+ *
+ * XXX
+ * We should set this value when we write the first log record, not
+ * here.
*/
- if (LOGGING_ON(dbenv) &&
- (ret = log_put(dbenv, &begin_lsn, NULL, DB_CURLSN)) != 0)
- goto err2;
+ if (DBENV_LOGGING(dbenv))
+ __log_txn_lsn(dbenv, &begin_lsn, NULL, NULL);
R_LOCK(dbenv, &mgr->reginfo);
+ if (!F_ISSET(txn, TXN_COMPENSATE) && F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted during recovery");
+ ret = EINVAL;
+ goto err;
+ }
- /* Make sure that last_txnid is not going to wrap around. */
- if (region->last_txnid == TXN_INVALID) {
+ /* Make sure that we aren't still recovering prepared transactions. */
+ if (!internal && region->stat.st_nrestores != 0) {
__db_err(dbenv,
-"txn_begin: transaction ID wrapped. Exit the database environment\nand restart the application as if application failure had occurred");
+ "recovery of prepared but not yet committed transactions is incomplete");
ret = EINVAL;
- goto err1;
+ goto err;
+ }
+
+ /*
+ * Allocate a new transaction id. Our current valid range can span
+ * the maximum valid value, so check for it and wrap manually.
+ */
+ if (region->last_txnid == TXN_MAXIMUM &&
+ region->cur_maxid != TXN_MAXIMUM)
+ region->last_txnid = TXN_MINIMUM - 1;
+
+ if (region->last_txnid == region->cur_maxid) {
+ if ((ret = __os_malloc(dbenv,
+ sizeof(u_int32_t) * region->maxtxns, &ids)) != 0)
+ goto err;
+ nids = 0;
+ for (td = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail))
+ ids[nids++] = td->txnid;
+ region->last_txnid = TXN_MINIMUM - 1;
+ region->cur_maxid = TXN_MAXIMUM;
+ if (nids != 0)
+ __db_idspace(ids, nids,
+ &region->last_txnid, &region->cur_maxid);
+ __os_free(dbenv, ids);
+ if (DBENV_LOGGING(dbenv) &&
+ (ret = __txn_recycle_log(dbenv, NULL,
+ &null_lsn, 0, region->last_txnid, region->cur_maxid)) != 0)
+ goto err;
}
/* Allocate a new transaction detail structure. */
if ((ret =
__db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
__db_err(dbenv,
- "Unable to allocate memory for transaction detail");
- goto err1;
+ "Unable to allocate memory for transaction detail");
+ goto err;
}
/* Place transaction on active transaction list. */
SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
id = ++region->last_txnid;
- ++region->nbegins;
- if (++region->nactive > region->maxnactive)
- region->maxnactive = region->nactive;
+ ++region->stat.st_nbegins;
+ if (++region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
td->txnid = id;
td->begin_lsn = begin_lsn;
@@ -228,12 +400,20 @@ __txn_begin(txn)
else
td->parent = INVALID_ROFF;
+ td->flags = 0;
off = R_OFFSET(&mgr->reginfo, td);
R_UNLOCK(dbenv, &mgr->reginfo);
ZERO_LSN(txn->last_lsn);
txn->txnid = id;
- txn->off = off;
+ txn->off = (u_int32_t)off;
+
+ txn->abort = __txn_abort;
+ txn->commit = __txn_commit;
+ txn->discard = __txn_discard;
+ txn->id = __txn_id;
+ txn->prepare = __txn_prepare;
+ txn->set_timeout = __txn_set_timeout;
/*
* If this is a transaction family, we must link the child to the
@@ -242,7 +422,7 @@ __txn_begin(txn)
if (txn->parent != NULL && LOCKING_ON(dbenv))
if ((ret = __lock_addfamilylocker(dbenv,
txn->parent->txnid, txn->txnid)) != 0)
- goto err2;
+ return (ret);
if (F_ISSET(txn, TXN_MALLOC)) {
MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
@@ -252,34 +432,33 @@ __txn_begin(txn)
return (0);
-err1: R_UNLOCK(dbenv, &mgr->reginfo);
-
-err2: return (ret);
+err: R_UNLOCK(dbenv, &mgr->reginfo);
+ return (ret);
}
/*
- * txn_commit --
+ * __txn_commit --
* Commit a transaction.
+ *
+ * PUBLIC: int __txn_commit __P((DB_TXN *, u_int32_t));
*/
int
-txn_commit(txnp, flags)
+__txn_commit(txnp, flags)
DB_TXN *txnp;
u_int32_t flags;
{
DB_ENV *dbenv;
+ DB_LOCKREQ request;
DB_TXN *kid;
- int is_commit, ret, t_ret;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret, t_ret;
dbenv = txnp->mgrp->dbenv;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_commit(txnp, flags));
-#endif
-
PANIC_CHECK(dbenv);
- if ((ret = __txn_isvalid(txnp, NULL, TXN_COMMITTED)) != 0)
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_COMMIT)) != 0)
return (ret);
/*
@@ -290,10 +469,10 @@ txn_commit(txnp, flags)
* specifying the wrong flag for some reason.
*/
if (__db_fchk(dbenv,
- "txn_commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0)
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC | DB_TXN_SYNC) != 0)
flags = DB_TXN_SYNC;
if (__db_fcchk(dbenv,
- "txn_commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0)
+ "DB_TXN->commit", flags, DB_TXN_NOSYNC, DB_TXN_SYNC) != 0)
flags = DB_TXN_SYNC;
if (LF_ISSET(DB_TXN_NOSYNC)) {
F_CLR(txnp, TXN_SYNC);
@@ -305,16 +484,33 @@ txn_commit(txnp, flags)
}
/*
- * Commit any unresolved children. If there's an error, abort any
- * unresolved children and the parent.
+ * Commit any unresolved children. If anyone fails to commit,
+ * then try to abort the rest of the kids and then abort the parent.
+ * Abort should never fail; if it does, we bail out immediately.
*/
while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
- if ((ret = txn_commit(kid, flags)) != 0) {
+ if ((ret = kid->commit(kid, flags)) != 0)
while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
- (void)txn_abort(kid);
- (void)txn_abort(txnp);
- goto err;
- }
+ if ((t_ret = kid->abort(kid)) != 0)
+ return (__db_panic(dbenv, t_ret));
+
+ /*
+ * Process any aborted pages from our children.
+ * We delay putting pages on the free list that are newly
+ * allocated and then aborted so that we can undo other
+ * allocations, if necessary, without worrying about
+ * these pages which were not on the free list before.
+ */
+ if (txnp->txn_list != NULL) {
+ t_ret = __db_do_the_limbo(dbenv, NULL, txnp, txnp->txn_list);
+ __db_txnlist_end(dbenv, txnp->txn_list);
+ txnp->txn_list = NULL;
+ if (t_ret != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+ if (ret != 0)
+ goto err;
/*
* If there are any log records, write a log record and sync the log,
@@ -322,27 +518,35 @@ txn_commit(txnp, flags)
* we do not need to commit the child synchronously since it may still
* abort (if its parent aborts), and otherwise its parent or ultimate
* ancestor will write synchronously.
- *
- * I'd rather return a logging error than a flag-wrong error, so if
- * the log routines fail, set "ret" without regard to previous value.
*/
- if (LOGGING_ON(dbenv) && !IS_ZERO_LSN(txnp->last_lsn)) {
+ if (DBENV_LOGGING(dbenv) && !IS_ZERO_LSN(txnp->last_lsn)) {
if (txnp->parent == NULL) {
- if ((t_ret = __txn_regop_log(dbenv,
- txnp, &txnp->last_lsn,
- (F_ISSET(dbenv, DB_ENV_TXN_NOSYNC) &&
- !F_ISSET(txnp, TXN_SYNC)) ||
- F_ISSET(txnp, TXN_NOSYNC) ? 0 : DB_FLUSH,
- TXN_COMMIT, (int32_t)time(NULL))) != 0) {
- ret = t_ret;
+ /*
+ * We are about to free all the read locks
+ * for this transaction below. Some of those
+ * locks might be handle locks which should
+ * not be freed, because they will be freed
+ * when the handle is closed. Check the
+ * events and preprocess any trades now so
+ * that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ goto err;
+ request.op = DB_LOCK_PUT_READ;
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ goto err;
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if ((ret = __txn_regop_log(dbenv,
+ txnp, &txnp->last_lsn, lflags,
+ TXN_COMMIT, (int32_t)time(NULL))) != 0)
goto err;
- }
} else {
/* Log the commit in the parent! */
- if ((t_ret = __txn_child_log(dbenv,
+ if ((ret = __txn_child_log(dbenv,
txnp->parent, &txnp->parent->last_lsn,
0, txnp->txnid, &txnp->last_lsn)) != 0) {
- ret = t_ret;
goto err;
}
@@ -350,108 +554,206 @@ txn_commit(txnp, flags)
}
}
- is_commit = 1;
- if (0) {
-err: is_commit = 0;
- }
- if ((t_ret = __txn_end(txnp, is_commit)) != 0 && ret == 0)
+ /* This is OK because __txn_end can only fail with a panic. */
+ return (__txn_end(txnp, 1));
+
+err: /*
+ * If we are prepared, then we "must" be able to commit. We
+ * panic here because even though the coordinator might be
+ * able to retry it is not clear it would know to do that.
+ * Otherwise we'll try to abort. If that is successful,
+ * then we return whatever was in ret (i.e., the reason we failed).
+ * If the abort was unsuccessful, then abort probably returned
+ * DB_RUNRECOVERY and we need to propagate that up.
+ */
+ if (td->status == TXN_PREPARED)
+ return (__db_panic(dbenv, ret));
+
+ if ((t_ret = txnp->abort(txnp)) != 0)
ret = t_ret;
return (ret);
}
/*
- * txn_abort --
+ * __txn_abort --
* Abort a transaction.
+ *
+ * PUBLIC: int __txn_abort __P((DB_TXN *));
*/
int
-txn_abort(txnp)
+__txn_abort(txnp)
DB_TXN *txnp;
{
DB_ENV *dbenv;
+ DB_LOCKREQ request;
DB_TXN *kid;
- int ret, t_ret;
+ TXN_DETAIL *td;
+ u_int32_t lflags;
+ int ret;
dbenv = txnp->mgrp->dbenv;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_abort(txnp));
-#endif
+ PANIC_CHECK(dbenv);
+
+ /* Ensure that abort always fails fatally. */
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_ABORT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Try to abort any unresolved children.
+ *
+ * Abort either succeeds or panics the region. As soon as we
+ * see any failure, we just get out of here and return the panic
+ * up.
+ */
+ while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
+ if ((ret = kid->abort(kid)) != 0)
+ return (ret);
+
+ if (LOCKING_ON(dbenv)) {
+ /*
+ * We are about to free all the read locks for this transaction
+ * below. Some of those locks might be handle locks which
+ * should not be freed, because they will be freed when the
+ * handle is closed. Check the events and preprocess any
+ * trades now so that we don't release the locks below.
+ */
+ if ((ret = __txn_doevents(dbenv, txnp, 0, 1)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* Turn off timeouts. */
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_TXN_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ if ((ret = __lock_set_timeout(dbenv,
+ txnp->txnid, 0, DB_SET_LOCK_TIMEOUT)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ request.op = DB_LOCK_UPGRADE_WRITE;
+ if ((ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, 0, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
+ }
+ if ((ret = __txn_undo(txnp)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /*
+ * Normally, we do not need to log aborts. However, if we
+ * are a distributed transaction (i.e., we have a prepare),
+ * then we log the abort so we know that this transaction
+ * was actually completed.
+ */
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
+ if (DBENV_LOGGING(dbenv) && td->status == TXN_PREPARED &&
+ (ret = __txn_regop_log(dbenv, txnp, &txnp->last_lsn,
+ lflags, TXN_ABORT, (int32_t)time(NULL))) != 0)
+ return (__db_panic(dbenv, ret));
+
+ /* __txn_end always panics if it errors, so pass the return along. */
+ return (__txn_end(txnp, 0));
+}
+
+/*
+ * __txn_discard --
+ * Free the per-process resources associated with this txn handle.
+ *
+ * PUBLIC: int __txn_discard __P((DB_TXN *, u_int32_t flags));
+ */
+int
+__txn_discard(txnp, flags)
+ DB_TXN *txnp;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ DB_TXN *freep;
+ TXN_DETAIL *td;
+ int ret;
+
+ COMPQUIET(flags, 0);
+
+ dbenv = txnp->mgrp->dbenv;
+ freep = NULL;
PANIC_CHECK(dbenv);
- if ((ret = __txn_isvalid(txnp, NULL, TXN_ABORTED)) != 0)
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_DISCARD)) != 0)
return (ret);
- /* Abort any unresolved children. */
- while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
- if ((t_ret = txn_abort(kid)) != 0 && ret == 0)
- ret = t_ret;
+ /* Should be no children. */
+ DB_ASSERT(TAILQ_FIRST(&txnp->kids) == NULL);
+ DB_ASSERT(F_ISSET(td, TXN_RESTORED));
- if ((t_ret = __txn_undo(txnp)) != 0 && ret == 0)
- ret = t_ret;
+ /* Free the space. */
+ MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
+ txnp->mgrp->n_discards++;
+ if (F_ISSET(txnp, TXN_MALLOC)) {
+ TAILQ_REMOVE(&txnp->mgrp->txn_chain, txnp, links);
+ freep = txnp;
+ }
+ MUTEX_THREAD_UNLOCK(dbenv, txnp->mgrp->mutexp);
+ if (freep != NULL)
+ __os_free(dbenv, freep);
- if ((t_ret = __txn_end(txnp, 0)) != 0 && ret == 0)
- ret = t_ret;
- return (ret);
+ return (0);
}
/*
- * txn_prepare --
+ * __txn_prepare --
* Flush the log so a future commit is guaranteed to succeed.
+ *
+ * PUBLIC: int __txn_prepare __P((DB_TXN *, u_int8_t *));
*/
int
-txn_prepare(txnp)
+__txn_prepare(txnp, gid)
DB_TXN *txnp;
+ u_int8_t *gid;
{
DBT xid;
DB_ENV *dbenv;
DB_TXN *kid;
TXN_DETAIL *td;
+ u_int32_t lflags;
int ret;
dbenv = txnp->mgrp->dbenv;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_prepare(txnp));
-#endif
-
PANIC_CHECK(dbenv);
- if ((ret = __txn_isvalid(txnp, &td, TXN_PREPARED)) != 0)
+ if ((ret = __txn_isvalid(txnp, &td, TXN_OP_PREPARE)) != 0)
return (ret);
- /* Prepare any unresolved children. */
+ /* Commit any unresolved children. */
while ((kid = TAILQ_FIRST(&txnp->kids)) != NULL)
- if ((ret = txn_prepare(kid)) != 0)
+ if ((ret = kid->commit(kid, DB_TXN_NOSYNC)) != 0)
return (ret);
/*
- * We indicate that a transaction is an XA transaction by putting
- * a valid size in the xid.size fiels. XA requires that the transaction
- * be either ENDED or SUSPENDED when prepare is called, so we know
- * that if the xa_status isn't in one of those states, but we are
- * calling prepare that we are not an XA transaction.
+ * In XA, the global transaction ID in the txn_detail structure is
+ * already set; in a non-XA environment, we must set it here. XA
+ * requires that the transaction be either ENDED or SUSPENDED when
+ * prepare is called, so we know that if the xa_status isn't in one
+ * of those states, then we are calling prepare directly and we need
+ * to fill in the td->xid.
*/
-
- if (LOGGING_ON(dbenv)) {
+ if (DBENV_LOGGING(dbenv)) {
memset(&xid, 0, sizeof(xid));
+ if (td->xa_status != TXN_XA_ENDED &&
+ td->xa_status != TXN_XA_SUSPENDED)
+ /* Regular prepare; fill in the gid. */
+ memcpy(td->xid, gid, sizeof(td->xid));
+
+ xid.size = sizeof(td->xid);
xid.data = td->xid;
- xid.size = td->xa_status != TXN_XA_ENDED &&
- td->xa_status != TXN_XA_SUSPENDED ? 0 : sizeof(td->xid);
+
+ SET_LOG_FLAGS(dbenv, txnp, lflags);
if ((ret = __txn_xa_regop_log(dbenv, txnp, &txnp->last_lsn,
- (F_ISSET(dbenv, DB_ENV_TXN_NOSYNC) &&
- !F_ISSET(txnp, TXN_SYNC)) ||
- F_ISSET(txnp, TXN_NOSYNC) ? 0 : DB_FLUSH, TXN_PREPARE,
- &xid, td->format, td->gtrid, td->bqual,
+ lflags, TXN_PREPARE, &xid, td->format, td->gtrid, td->bqual,
&td->begin_lsn)) != 0) {
- __db_err(dbenv, "txn_prepare: log_write failed %s",
+ __db_err(dbenv, "DB_TXN->prepare: log_write failed %s",
db_strerror(ret));
return (ret);
}
- if (txnp->parent != NULL)
- F_SET(txnp->parent, TXN_CHILDCOMMIT);
}
MUTEX_THREAD_LOCK(dbenv, txnp->mgrp->mutexp);
@@ -461,17 +763,36 @@ txn_prepare(txnp)
}
/*
- * txn_id --
+ * __txn_id --
* Return the transaction ID.
+ *
+ * PUBLIC: u_int32_t __txn_id __P((DB_TXN *));
*/
u_int32_t
-txn_id(txnp)
+__txn_id(txnp)
DB_TXN *txnp;
{
return (txnp->txnid);
}
/*
+ * __txn_set_timeout --
+ * Set timeout values in the txn structure.
+ */
+static int
+__txn_set_timeout(txnp, timeout, op)
+ DB_TXN *txnp;
+ db_timeout_t timeout;
+ u_int32_t op;
+{
+ if (op != DB_SET_TXN_TIMEOUT && op != DB_SET_LOCK_TIMEOUT)
+ return (__db_ferr(txnp->mgrp->dbenv, "DB_TXN->set_timeout", 0));
+
+ return (__lock_set_timeout(
+ txnp->mgrp->dbenv, txnp->txnid, timeout, op));
+}
+
+/*
* __txn_isvalid --
* Return 0 if the txnp is reasonable, otherwise panic.
*/
@@ -479,12 +800,22 @@ static int
__txn_isvalid(txnp, tdp, op)
const DB_TXN *txnp;
TXN_DETAIL **tdp;
- u_int32_t op;
+ txnop_t op;
{
DB_TXNMGR *mgrp;
+ DB_TXNREGION *region;
TXN_DETAIL *tp;
mgrp = txnp->mgrp;
+ region = mgrp->reginfo.primary;
+
+ /* Check for recovery. */
+ if (!F_ISSET(txnp, TXN_COMPENSATE) &&
+ F_ISSET(region, TXN_IN_RECOVERY)) {
+ __db_err(mgrp->dbenv,
+ "operation not permitted during recovery");
+ goto err;
+ }
/* Check for live cursors. */
if (txnp->cursors != 0) {
@@ -492,25 +823,70 @@ __txn_isvalid(txnp, tdp, op)
goto err;
}
- /* Check transaction's status. */
+ /* Check transaction's state. */
tp = (TXN_DETAIL *)R_ADDR(&mgrp->reginfo, txnp->off);
if (tdp != NULL)
*tdp = tp;
+ /* Handle any operation specific checks. */
+ switch (op) {
+ case TXN_OP_DISCARD:
+ /*
+ * Since we're just tossing the per-process space; there are
+ * a lot of problems with the transaction that we can tolerate.
+ */
+
+ /* Transaction is already been reused. */
+ if (txnp->txnid != tp->txnid)
+ return (0);
+
+ /* What we've got had better be a restored transaction. */
+ if (!F_ISSET(tp, TXN_RESTORED)) {
+ __db_err(mgrp->dbenv, "not a restored transaction");
+ return (__db_panic(mgrp->dbenv, EINVAL));
+ }
+
+ return (0);
+ case TXN_OP_PREPARE:
+ if (txnp->parent != NULL) {
+ /*
+ * This is not fatal, because you could imagine an
+ * application that simply prepares everybody because
+ * it doesn't distinguish between children and parents.
+ * I'm not arguing this is good, but I could imagine
+ * someone doing it.
+ */
+ __db_err(mgrp->dbenv,
+ "Prepare disallowed on child transactions");
+ return (EINVAL);
+ }
+ break;
+ case TXN_OP_ABORT:
+ case TXN_OP_COMMIT:
+ default:
+ break;
+ }
+
switch (tp->status) {
+ case TXN_PREPARED:
+ if (op == TXN_OP_PREPARE) {
+ __db_err(mgrp->dbenv, "transaction already prepared");
+ /*
+ * Txn_prepare doesn't blow away the user handle, so
+ * in this case, give the user the opportunity to
+ * abort or commit.
+ */
+ return (EINVAL);
+ }
+ break;
+ case TXN_RUNNING:
+ break;
case TXN_ABORTED:
case TXN_COMMITTED:
default:
__db_err(mgrp->dbenv, "transaction already %s",
tp->status == TXN_COMMITTED ? "committed" : "aborted");
goto err;
- case TXN_PREPARED:
- if (op == TXN_PREPARED) {
- __db_err(mgrp->dbenv, "transaction already prepared");
- goto err;
- }
- case TXN_RUNNING:
- break;
}
return (0);
@@ -526,10 +902,8 @@ err: /*
/*
* __txn_end --
* Internal transaction end routine.
- *
- * PUBLIC: int __txn_end __P((DB_TXN *, int));
*/
-int
+static int
__txn_end(txnp, is_commit)
DB_TXN *txnp;
int is_commit;
@@ -539,46 +913,59 @@ __txn_end(txnp, is_commit)
DB_TXNMGR *mgr;
DB_TXNREGION *region;
TXN_DETAIL *tp;
- int ret;
+ int do_closefiles, ret;
mgr = txnp->mgrp;
dbenv = mgr->dbenv;
region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ /* Process commit events. */
+ if ((ret = __txn_doevents(dbenv, txnp, is_commit, 0)) != 0)
+ return (__db_panic(dbenv, ret));
/* Release the locks. */
request.op = txnp->parent == NULL ||
is_commit == 0 ? DB_LOCK_PUT_ALL : DB_LOCK_INHERIT;
- if (LOCKING_ON(dbenv)) {
- ret = lock_vec(dbenv, txnp->txnid, 0, &request, 1, NULL);
- if (ret != 0 && (ret != DB_LOCK_DEADLOCK || is_commit)) {
- __db_err(dbenv, "%s: release locks failed %s",
- is_commit ? "txn_commit" : "txn_abort",
- db_strerror(ret));
- __db_panic(dbenv, ret);
- }
- }
+ /*
+ * __txn_end cannot return an simple error, we MUST return
+ * success/failure from commit or abort, ignoring any internal
+ * errors. So, we panic if something goes wrong. We can't
+ * deadlock here because we're not acquiring any new locks,
+ * so DB_LOCK_DEADLOCK is just as fatal as any other error.
+ */
+ if (LOCKING_ON(dbenv) && (ret = dbenv->lock_vec(
+ dbenv, txnp->txnid, DB_LOCK_FREE_LOCKER, &request, 1, NULL)) != 0)
+ return (__db_panic(dbenv, ret));
/* End the transaction. */
R_LOCK(dbenv, &mgr->reginfo);
tp = (TXN_DETAIL *)R_ADDR(&mgr->reginfo, txnp->off);
SH_TAILQ_REMOVE(&region->active_txn, tp, links, __txn_detail);
+ if (F_ISSET(tp, TXN_RESTORED)) {
+ region->stat.st_nrestores--;
+ do_closefiles = region->stat.st_nrestores == 0;
+ }
+
__db_shalloc_free(mgr->reginfo.addr, tp);
if (is_commit)
- region->ncommits++;
+ region->stat.st_ncommits++;
else
- region->naborts++;
- --region->nactive;
+ region->stat.st_naborts++;
+ --region->stat.st_nactive;
R_UNLOCK(dbenv, &mgr->reginfo);
/*
- * The transaction cannot get more locks, remove its locker info.
+ * The transaction cannot get more locks, remove its locker info,
+ * if any.
*/
- if (LOCKING_ON(dbenv))
- __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid);
+ if (LOCKING_ON(dbenv) && (ret =
+ __lock_freefamilylocker(dbenv->lk_handle, txnp->txnid)) != 0)
+ return (__db_panic(dbenv, ret));
if (txnp->parent != NULL)
TAILQ_REMOVE(&txnp->parent->kids, txnp, klinks);
@@ -588,9 +975,16 @@ __txn_end(txnp, is_commit)
TAILQ_REMOVE(&mgr->txn_chain, txnp, links);
MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
- __os_free(txnp, sizeof(*txnp));
+ __os_free(dbenv, txnp);
}
+ if (do_closefiles) {
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ (void)__dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ mgr->n_discards = 0;
+ (void)dbenv->txn_checkpoint(dbenv, 0, 0, DB_FORCE);
+ }
return (0);
}
@@ -605,16 +999,19 @@ __txn_undo(txnp)
{
DBT rdbt;
DB_ENV *dbenv;
+ DB_LOGC *logc;
DB_LSN key_lsn;
+ DB_TXN *ptxn;
DB_TXNMGR *mgr;
+ int ret, t_ret;
void *txnlist;
- int ret, threaded;
mgr = txnp->mgrp;
dbenv = mgr->dbenv;
+ logc = NULL;
txnlist = NULL;
- if (!LOGGING_ON(dbenv))
+ if (!DBENV_LOGGING(dbenv))
return (0);
/*
@@ -623,53 +1020,66 @@ __txn_undo(txnp)
* allocation here and use DB_DBT_USERMEM.
*/
memset(&rdbt, 0, sizeof(rdbt));
- threaded = F_ISSET(dbenv, DB_ENV_THREAD) ? 1 : 0;
- if (threaded)
- F_SET(&rdbt, DB_DBT_MALLOC);
key_lsn = txnp->last_lsn;
- /* Allocate a transaction list for children or aborted page creates. */
- if ((ret = __db_txnlist_init(dbenv, &txnlist)) != 0)
+ /*
+ * Allocate a txnlist for children and aborted page allocs.
+ * We need to associate the list with the maximal parent
+ * so that aborted pages are recovered when that transaction
+ * is commited or aborted.
+ */
+ for (ptxn = txnp->parent; ptxn != NULL && ptxn->parent != NULL;)
+ ptxn = ptxn->parent;
+
+ if (ptxn != NULL && ptxn->txn_list != NULL)
+ txnlist = ptxn->txn_list;
+ else if (txnp->txn_list != NULL)
+ txnlist = txnp->txn_list;
+ else if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txnlist)) != 0)
return (ret);
+ else if (ptxn != NULL)
+ ptxn->txn_list = txnlist;
if (F_ISSET(txnp, TXN_CHILDCOMMIT) &&
- (ret = __db_txnlist_lsninit(dbenv,
- txnlist, &txnp->last_lsn)) != 0)
+ (ret = __db_txnlist_lsninit(dbenv, txnlist, &txnp->last_lsn)) != 0)
return (ret);
- for (ret = 0; ret == 0 && !IS_ZERO_LSN(key_lsn);) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ while (ret == 0 && !IS_ZERO_LSN(key_lsn)) {
/*
* The dispatch routine returns the lsn of the record
* before the current one in the key_lsn argument.
*/
- if ((ret = log_get(dbenv, &key_lsn, &rdbt, DB_SET)) == 0) {
- ret = __db_dispatch(dbenv,
- &rdbt, &key_lsn, DB_TXN_ABORT, txnlist);
- if (threaded && rdbt.data != NULL) {
- __os_free(rdbt.data, rdbt.size);
- rdbt.data = NULL;
- }
+ if ((ret = logc->get(logc, &key_lsn, &rdbt, DB_SET)) == 0) {
+ ret = __db_dispatch(dbenv, dbenv->recover_dtab,
+ dbenv->recover_dtab_size, &rdbt, &key_lsn,
+ DB_TXN_ABORT, txnlist);
if (F_ISSET(txnp, TXN_CHILDCOMMIT))
(void)__db_txnlist_lsnadd(dbenv,
txnlist, &key_lsn, 0);
}
- if (ret != 0) {
+ if (ret == DB_SURPRISE_KID) {
+ if ((ret = __db_txnlist_lsninit(
+ dbenv, txnlist, &key_lsn)) == 0)
+ F_SET(txnp, TXN_CHILDCOMMIT);
+ } else if (ret != 0) {
__db_err(txnp->mgrp->dbenv,
- "txn_abort: Log undo failed for LSN: %lu %lu: %s",
+ "DB_TXN->abort: Log undo failed for LSN: %lu %lu: %s",
(u_long)key_lsn.file, (u_long)key_lsn.offset,
db_strerror(ret));
- if (txnlist != NULL)
- __db_txnlist_end(dbenv, txnlist);
- return (ret);
+ goto err;
}
}
- if (txnlist != NULL) {
- __db_do_the_limbo(dbenv, txnlist);
- __db_txnlist_end(dbenv, txnlist);
- }
+ ret = __db_do_the_limbo(dbenv, ptxn, txnp, txnlist);
+err: if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (ptxn == NULL && txnlist != NULL)
+ __db_txnlist_end(dbenv, txnlist);
return (ret);
}
@@ -682,150 +1092,115 @@ __txn_undo(txnp)
* its first LSN. This is the lowest LSN we can checkpoint, since any record
* written after since that point may be involved in a transaction and may
* therefore need to be undone in the case of an abort.
+ *
+ * PUBLIC: int __txn_checkpoint
+ * PUBLIC: __P((DB_ENV *, u_int32_t, u_int32_t, u_int32_t));
*/
int
-txn_checkpoint(dbenv, kbytes, minutes, flags)
+__txn_checkpoint(dbenv, kbytes, minutes, flags)
DB_ENV *dbenv;
u_int32_t kbytes, minutes, flags;
{
- DB_LOG *dblp;
- DB_LSN ckp_lsn, sync_lsn, last_ckp;
+ DB_LSN ckp_lsn, last_ckp;
DB_TXNMGR *mgr;
DB_TXNREGION *region;
- LOG *lp;
TXN_DETAIL *txnp;
time_t last_ckp_time, now;
u_int32_t bytes, mbytes;
- int interval, ret;
+ int ret;
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_checkpoint(dbenv, kbytes, minutes));
-#endif
PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
-
- mgr = dbenv->tx_handle;
- region = mgr->reginfo.primary;
- dblp = dbenv->lg_handle;
- lp = dblp->reginfo.primary;
+ ENV_REQUIRES_CONFIG(dbenv,
+ dbenv->tx_handle, "txn_checkpoint", DB_INIT_TXN);
/*
- * Check if we need to checkpoint.
+ * On a replication client, all transactions are read-only; therefore,
+ * a checkpoint is a null-op.
+ *
+ * We permit txn_checkpoint, instead of just rendering it illegal,
+ * so that an application can just let a checkpoint thread continue
+ * to operate as it gets promoted or demoted between being a
+ * master and a client.
*/
- ZERO_LSN(ckp_lsn);
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
- if (LF_ISSET(DB_FORCE))
- goto do_ckp;
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
- R_LOCK(dbenv, &dblp->reginfo);
- mbytes = lp->stat.st_wc_mbytes;
/*
- * We add the current buffer offset so as to count bytes that
- * have not yet been written, but are sitting in the log buffer.
+ * The checkpoint LSN is an LSN such that all transactions begun before
+ * it are complete. Our first guess (corrected below based on the list
+ * of active transactions) is the last-written LSN.
*/
- bytes = lp->stat.st_wc_bytes + lp->b_off;
- ckp_lsn = lp->lsn;
- R_UNLOCK(dbenv, &dblp->reginfo);
-
- /* Don't checkpoint a quiescent database. */
- if (bytes == 0 && mbytes == 0)
- return (0);
+ __log_txn_lsn(dbenv, &ckp_lsn, &mbytes, &bytes);
- if (kbytes != 0 && mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes)
- goto do_ckp;
+ if (!LF_ISSET(DB_FORCE)) {
+ /* Don't checkpoint a quiescent database. */
+ if (bytes == 0 && mbytes == 0)
+ return (0);
- if (minutes != 0) {
- (void)time(&now);
+ if (kbytes != 0 &&
+ mbytes * 1024 + bytes / 1024 >= (u_int32_t)kbytes)
+ goto do_ckp;
- R_LOCK(dbenv, &mgr->reginfo);
- last_ckp_time = region->time_ckp;
- R_UNLOCK(dbenv, &mgr->reginfo);
+ if (minutes != 0) {
+ (void)time(&now);
- if (now - last_ckp_time >= (time_t)(minutes * 60))
- goto do_ckp;
- }
+ R_LOCK(dbenv, &mgr->reginfo);
+ last_ckp_time = region->time_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
- /*
- * If we checked time and data and didn't go to checkpoint,
- * we're done.
- */
- if (minutes != 0 || kbytes != 0)
- return (0);
+ if (now - last_ckp_time >= (time_t)(minutes * 60))
+ goto do_ckp;
+ }
-do_ckp:
- if (IS_ZERO_LSN(ckp_lsn)) {
- R_LOCK(dbenv, &dblp->reginfo);
- ckp_lsn = lp->lsn;
- R_UNLOCK(dbenv, &dblp->reginfo);
+ /*
+ * If we checked time and data and didn't go to checkpoint,
+ * we're done.
+ */
+ if (minutes != 0 || kbytes != 0)
+ return (0);
}
- /*
- * We have to find an LSN such that all transactions begun
- * before that LSN are complete.
- */
+do_ckp: /* Look through the active transactions for the lowest begin LSN. */
R_LOCK(dbenv, &mgr->reginfo);
-
- if (IS_ZERO_LSN(region->pending_ckp)) {
- for (txnp =
- SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
- txnp != NULL;
- txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
-
- /*
- * Look through the active transactions for the
- * lowest begin lsn.
- */
- if (!IS_ZERO_LSN(txnp->begin_lsn) &&
- log_compare(&txnp->begin_lsn, &ckp_lsn) < 0)
- ckp_lsn = txnp->begin_lsn;
- }
- region->pending_ckp = ckp_lsn;
- } else
- ckp_lsn = region->pending_ckp;
-
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail))
+ if (!IS_ZERO_LSN(txnp->begin_lsn) &&
+ log_compare(&txnp->begin_lsn, &ckp_lsn) < 0)
+ ckp_lsn = txnp->begin_lsn;
R_UNLOCK(dbenv, &mgr->reginfo);
+ if (MPOOL_ON(dbenv) && (ret = dbenv->memp_sync(dbenv, NULL)) != 0) {
+ __db_err(dbenv,
+ "txn_checkpoint: failed to flush the buffer cache %s",
+ db_strerror(ret));
+ return (ret);
+ }
+
/*
- * Try three times to sync the mpool buffers up to the specified LSN,
- * sleeping 1, 2 and 4 seconds between attempts.
+ * Because we can't be a replication client here, and because
+ * recovery (somewhat unusually) calls txn_checkpoint and expects
+ * it to write a log message, LOGGING_ON is the correct macro here.
*/
- if (MPOOL_ON(dbenv))
- for (interval = 1;;) {
- /*
- * memp_sync may change the lsn you pass it, so don't
- * pass it the actual ckp_lsn, pass it a local instead.
- */
- sync_lsn = ckp_lsn;
- if ((ret = memp_sync(dbenv, &sync_lsn)) == 0)
- break;
-
- /*
- * ret == DB_INCOMPLETE means there are still buffers
- * to flush, the checkpoint is not complete.
- */
- if (ret == DB_INCOMPLETE) {
- if (interval > 4)
- return (ret);
-
- (void)__os_sleep(dbenv, interval, 0);
- interval *= 2;
- } else {
- __db_err(dbenv,
- "txn_checkpoint: failure in memp_sync %s",
- db_strerror(ret));
- return (ret);
- }
- }
-
if (LOGGING_ON(dbenv)) {
R_LOCK(dbenv, &mgr->reginfo);
last_ckp = region->last_ckp;
- ZERO_LSN(region->pending_ckp);
R_UNLOCK(dbenv, &mgr->reginfo);
- if ((ret = __txn_ckp_log(dbenv,
- NULL, &ckp_lsn, DB_CHECKPOINT, &ckp_lsn,
+ /*
+ * Put out records for the open files before we log
+ * the checkpoint. The records are certain to be at
+ * or after ckp_lsn, but before the checkpoint record
+ * itself, so they're sure to be included if we start
+ * recovery from the ckp_lsn contained in this
+ * checkpoint.
+ */
+ if ((ret = __dbreg_open_files(dbenv)) != 0 ||
+ (ret = __txn_ckp_log(dbenv,
+ NULL, &ckp_lsn, DB_PERMANENT | DB_FLUSH, &ckp_lsn,
&last_ckp, (int32_t)time(NULL))) != 0) {
__db_err(dbenv,
"txn_checkpoint: log failed at LSN [%ld %ld] %s",
@@ -834,15 +1209,52 @@ do_ckp:
return (ret);
}
+ /*
+ * We want to make sure last_ckp only moves forward; since
+ * we drop locks above and in log_put, it's possible
+ * for two calls to __txn_ckp_log to finish in a different
+ * order from how they were called.
+ */
R_LOCK(dbenv, &mgr->reginfo);
- region->last_ckp = ckp_lsn;
- (void)time(&region->time_ckp);
+ if (log_compare(&region->last_ckp, &ckp_lsn) < 0) {
+ region->last_ckp = ckp_lsn;
+ (void)time(&region->time_ckp);
+ }
R_UNLOCK(dbenv, &mgr->reginfo);
}
return (0);
}
/*
+ * __txn_getckp --
+ * Get the LSN of the last transaction checkpoint.
+ *
+ * PUBLIC: int __txn_getckp __P((DB_ENV *, DB_LSN *));
+ */
+int
+__txn_getckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LSN lsn;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ lsn = region->last_ckp;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (IS_ZERO_LSN(lsn))
+ return (DB_NOTFOUND);
+
+ *lsnp = lsn;
+ return (0);
+}
+
+/*
* __txn_activekids --
* Return if this transaction has any active children.
*
@@ -858,7 +1270,7 @@ __txn_activekids(dbenv, rectype, txnp)
* On a child commit, we know that there are children (i.e., the
* commiting child at the least. In that case, skip this check.
*/
- if (rectype == DB_txn_child)
+ if (F_ISSET(txnp, TXN_COMPENSATE) || rectype == DB___txn_child)
return (0);
if (TAILQ_FIRST(&txnp->kids) != NULL) {
@@ -867,3 +1279,128 @@ __txn_activekids(dbenv, rectype, txnp)
}
return (0);
}
+
+/*
+ * __txn_force_abort --
+ * Force an abort record into the log if the commit record
+ * failed to get to disk.
+ *
+ * PUBLIC: int __txn_force_abort __P((DB_ENV *, u_int8_t *));
+ */
+int
+__txn_force_abort(dbenv, buffer)
+ DB_ENV *dbenv;
+ u_int8_t *buffer;
+{
+ DB_CIPHER *db_cipher;
+ HDR *hdr;
+ u_int32_t offset, opcode, rec_len, rec_type, sum_len;
+ u_int8_t *bp, *key, chksum[DB_MAC_KEY];
+ size_t hdrsize;
+ int ret;
+
+ db_cipher = dbenv->crypto_handle;
+
+ /*
+ * This routine depends on the layout of HDR and the __txn_regop
+ * record in txn.src. We are passed the beginning of the commit
+ * record in the log buffer and overwrite the commit with an abort
+ * and recalculate the checksum. We may be passed a txn_xa_regop
+ * that is, an XA prepare), there's no need to overwrite that one.
+ */
+ hdr = (HDR *)buffer;
+ memcpy(&rec_type, hdr, sizeof(rec_type));
+ if (rec_type == DB___txn_xa_regop)
+ return (0);
+
+ offset = sizeof(u_int32_t) + sizeof(u_int32_t) + sizeof(DB_LSN);
+ rec_len = offset + sizeof(u_int32_t) + sizeof(int32_t);
+ if (CRYPTO_ON(dbenv)) {
+ key = db_cipher->mac_key;
+ hdrsize = HDR_CRYPTO_SZ;
+ sum_len = DB_MAC_KEY;
+ if ((ret = db_cipher->decrypt(dbenv, db_cipher->data,
+ &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+ } else {
+ key = NULL;
+ hdrsize = HDR_NORMAL_SZ;
+ sum_len = sizeof(u_int32_t);
+ }
+ bp = buffer + hdrsize + offset;
+ opcode = TXN_ABORT;
+ memcpy(bp, &opcode, sizeof(opcode));
+
+ if (CRYPTO_ON(dbenv) &&
+ (ret = db_cipher->encrypt(dbenv,
+ db_cipher->data, &hdr->iv[0], buffer + hdrsize, rec_len)) != 0)
+ return (__db_panic(dbenv, ret));
+
+ __db_chksum(buffer + hdrsize, rec_len, key, chksum);
+ memcpy(buffer + SSZ(HDR, chksum), &chksum, sum_len);
+
+ return (0);
+}
+
+/*
+ * __txn_preclose
+ * Before we can close an environment, we need to check if we
+ * were in the midst of taking care of restored transactions. If
+ * so, then we need to close the files that we opened.
+ *
+ * PUBLIC: int __txn_preclose __P((DB_ENV *));
+ */
+int
+__txn_preclose(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ int do_closefiles, ret;
+
+ mgr = (DB_TXNMGR *)dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+ do_closefiles = 0;
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (region != NULL &&
+ region->stat.st_nrestores
+ <= mgr->n_discards && mgr->n_discards != 0)
+ do_closefiles = 1;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (do_closefiles) {
+ /*
+ * Set the DBLOG_RECOVER flag while closing these
+ * files so they do not create additional log records
+ * that will confuse future recoveries.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ ret = __dbreg_close_files(dbenv);
+ F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+/*
+ * __txn_reset --
+ * Reset the last txnid to its minimum value, and log the reset.
+ *
+ * PUBLIC: int __txn_reset __P((DB_ENV *));
+ */
+int
+__txn_reset(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_LSN scrap;
+ DB_TXNREGION *region;
+
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ region->last_txnid = TXN_MINIMUM;
+
+ DB_ASSERT(LOGGING_ON(dbenv));
+ return (__txn_recycle_log(dbenv,
+ NULL, &scrap, 0, TXN_MINIMUM, TXN_MAXIMUM));
+}
diff --git a/bdb/txn/txn.src b/bdb/txn/txn.src
index b1e131c2bd7..3f69b29e3ff 100644
--- a/bdb/txn/txn.src
+++ b/bdb/txn/txn.src
@@ -1,13 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*
- * $Id: txn.src,v 11.10 2001/01/02 00:58:33 margo Exp $
+ * $Id: txn.src,v 11.19 2002/03/27 04:33:15 bostic Exp $
*/
-PREFIX txn
+PREFIX __txn
+DBPRIVATE
INCLUDE #include "db_config.h"
INCLUDE
@@ -15,25 +16,20 @@ INCLUDE #ifndef NO_SYSTEM_INCLUDES
INCLUDE #include <sys/types.h>
INCLUDE
INCLUDE #include <ctype.h>
-INCLUDE #include <errno.h>
INCLUDE #include <string.h>
INCLUDE #endif
INCLUDE
INCLUDE #include "db_int.h"
-INCLUDE #include "db_page.h"
-INCLUDE #include "db_dispatch.h"
-INCLUDE #include "db_am.h"
-INCLUDE #include "txn.h"
+INCLUDE #include "dbinc/crypto.h"
+INCLUDE #include "dbinc/db_page.h"
+INCLUDE #include "dbinc/db_dispatch.h"
+INCLUDE #include "dbinc/db_am.h"
+INCLUDE #include "dbinc/log.h"
+INCLUDE #include "dbinc/rep.h"
+INCLUDE #include "dbinc/txn.h"
INCLUDE
/*
- * DEPRECATED in 3.1 to add timestamps.
- */
-DEPRECATED old_regop 6
-ARG opcode u_int32_t lu
-END
-
-/*
* This is the standard log operation for commit.
* Note that we are using an int32_t for the timestamp. This means that
* in 2039 we will need to deprecate this log record and create one that
@@ -44,11 +40,6 @@ ARG opcode u_int32_t lu
ARG timestamp int32_t ld
END
-DEPRECATED old_ckp 7
-POINTER ckp_lsn DB_LSN * lu
-POINTER last_ckp DB_LSN * lu
-END
-
/*
* This is the checkpoint record. It contains the lsn that the checkpoint
* guarantees and a pointer to the last checkpoint so we can walk backwards
@@ -70,20 +61,19 @@ ARG timestamp int32_t ld
END
/*
- * This is the standard log operation for prepare (since right now
- * we only use prepare in an XA environment).
+ * This is the (new) log operation for a child commit. It is
+ * logged as a record in the PARENT. The child field contains
+ * the transaction ID of the child committing and the c_lsn is
+ * the last LSN of the child's log trail.
*/
-DEPRECATED xa_regop_old 8
-ARG opcode u_int32_t lu
-DBT xid DBT s
-ARG formatID int32_t ld
-ARG gtrid u_int32_t u
-ARG bqual u_int32_t u
+BEGIN child 12
+ARG child u_int32_t lx
+POINTER c_lsn DB_LSN * lu
END
+
/*
- * This is the standard log operation for prepare (since right now
- * we only use prepare in an XA environment).
+ * This is the standard log operation for prepare.
*/
BEGIN xa_regop 13
ARG opcode u_int32_t lu
@@ -95,20 +85,9 @@ POINTER begin_lsn DB_LSN * lu
END
/*
- * This is the log operation for a child commit.
- */
-DEPRECATED child_old 9
-ARG opcode u_int32_t lu
-ARG parent u_int32_t lx
-END
-
-/*
- * This is the (new) log operation for a child commit. It is
- * logged as a record in the PARENT. The child field contains
- * the transaction ID of the child committing and the c_lsn is
- * the last LSN of the child's log trail.
+ * Log the fact that we are recycling txnids.
*/
-BEGIN child 12
-ARG child u_int32_t lx
-POINTER c_lsn DB_LSN * lu
+BEGIN recycle 14
+ARG min u_int32_t u
+ARG max u_int32_t u
END
diff --git a/bdb/txn/txn_method.c b/bdb/txn/txn_method.c
new file mode 100644
index 00000000000..60fdf30583e
--- /dev/null
+++ b/bdb/txn/txn_method.c
@@ -0,0 +1,105 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_method.c,v 11.62 2002/05/09 20:09:35 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#ifdef HAVE_RPC
+#include <rpc/rpc.h>
+#endif
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+#ifdef HAVE_RPC
+#include "dbinc_auto/db_server.h"
+#include "dbinc_auto/rpc_client_ext.h"
+#endif
+
+static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
+static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
+
+/*
+ * __txn_dbenv_create --
+ * Transaction specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
+ */
+void
+__txn_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->tx_max = DEF_MAX_TXNS;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_tx_max = __dbcl_set_tx_max;
+ dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
+ dbenv->txn_checkpoint = __dbcl_txn_checkpoint;
+ dbenv->txn_recover = __dbcl_txn_recover;
+ dbenv->txn_stat = __dbcl_txn_stat;
+ dbenv->txn_begin = __dbcl_txn_begin;
+ } else
+#endif
+ {
+ dbenv->set_tx_max = __txn_set_tx_max;
+ dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+ dbenv->txn_checkpoint = __txn_checkpoint;
+#ifdef CONFIG_TEST
+ dbenv->txn_id_set = __txn_id_set;
+#endif
+ dbenv->txn_recover = __txn_recover;
+ dbenv->txn_stat = __txn_stat;
+ dbenv->txn_begin = __txn_begin;
+ }
+}
+
+/*
+ * __txn_set_tx_max --
+ * Set the size of the transaction table.
+ */
+static int
+__txn_set_tx_max(dbenv, tx_max)
+ DB_ENV *dbenv;
+ u_int32_t tx_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
+
+ dbenv->tx_max = tx_max;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_timestamp --
+ * Set the transaction recovery timestamp.
+ */
+static int
+__txn_set_tx_timestamp(dbenv, timestamp)
+ DB_ENV *dbenv;
+ time_t *timestamp;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
+
+ dbenv->tx_timestamp = *timestamp;
+ return (0);
+}
diff --git a/bdb/txn/txn_rec.c b/bdb/txn/txn_rec.c
index bed20d98e1e..69af6a1f907 100644
--- a/bdb/txn/txn_rec.c
+++ b/bdb/txn/txn_rec.c
@@ -1,7 +1,7 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
/*
@@ -36,23 +36,20 @@
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: txn_rec.c,v 11.15 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: txn_rec.c,v 11.41 2002/08/06 04:42:37 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <sys/types.h>
+#include <string.h>
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "txn.h"
-#include "db_am.h"
-#include "db_dispatch.h"
-#include "log.h"
-#include "common_ext.h"
-
-static int __txn_restore_txn __P((DB_ENV *, DB_LSN *, __txn_xa_regop_args *));
+#include "dbinc/db_page.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_am.h"
+#include "dbinc/db_dispatch.h"
#define IS_XA_TXN(R) (R->xid.size != 0)
@@ -72,6 +69,7 @@ __txn_regop_recover(dbenv, dbtp, lsnp, op, info)
db_recops op;
void *info;
{
+ DB_TXNHEAD *headp;
__txn_regop_args *argp;
int ret;
@@ -82,31 +80,62 @@ __txn_regop_recover(dbenv, dbtp, lsnp, op, info)
if ((ret = __txn_regop_read(dbenv, dbtp->data, &argp)) != 0)
return (ret);
- if (argp->opcode != TXN_COMMIT) {
- ret = EINVAL;
- goto err;
- }
+ headp = info;
+ /*
+ * We are only ever called during FORWARD_ROLL or BACKWARD_ROLL.
+ * We check for the former explicitly and the last two clauses
+ * apply to the BACKWARD_ROLL case.
+ */
if (op == DB_TXN_FORWARD_ROLL)
- ret = __db_txnlist_remove(info, argp->txnid->txnid);
- else if (dbenv->tx_timestamp == 0 ||
- argp->timestamp <= (int32_t)dbenv->tx_timestamp)
/*
- * We know this is the backward roll case because we
- * are never called during ABORT or OPENFILES.
+ * If this was a 2-phase-commit transaction, then it
+ * might already have been removed from the list, and
+ * that's OK. Ignore the return code from remove.
*/
- ret = __db_txnlist_add(dbenv, info, argp->txnid->txnid, 0);
- else
+ (void)__db_txnlist_remove(dbenv, info, argp->txnid->txnid);
+ else if ((dbenv->tx_timestamp != 0 &&
+ argp->timestamp > (int32_t)dbenv->tx_timestamp) ||
+ (!IS_ZERO_LSN(headp->trunc_lsn) &&
+ log_compare(&headp->trunc_lsn, lsnp) < 0)) {
/*
- * This is commit record, but we failed the timestamp check
- * so we should treat it as an abort and add it to the list
- * as an aborted record.
+ * We failed either the timestamp check or the trunc_lsn check,
+ * so we treat this as an abort even if it was a commit record.
*/
- ret = __db_txnlist_add(dbenv, info, argp->txnid->txnid, 1);
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, TXN_ABORT, NULL);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_IGNORE, NULL);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ } else {
+ /* This is a normal commit; mark it appropriately. */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->txnid->txnid, argp->opcode, lsnp);
+
+ if (ret == TXN_NOTFOUND)
+ ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid,
+ argp->opcode == TXN_ABORT ?
+ TXN_IGNORE : argp->opcode, lsnp);
+ else if (ret != TXN_OK)
+ goto err;
+ /* else ret = 0; Not necessary because TXN_OK == 0 */
+ }
if (ret == 0)
*lsnp = argp->prev_lsn;
-err: __os_free(argp, 0);
+
+ if (0) {
+err: __db_err(dbenv,
+ "txnid %lx commit record found, already on commit list",
+ argp->txnid->txnid);
+ ret = EINVAL;
+ }
+ __os_free(dbenv, argp);
return (ret);
}
@@ -140,58 +169,50 @@ __txn_xa_regop_recover(dbenv, dbtp, lsnp, op, info)
goto err;
}
- ret = __db_txnlist_find(info, argp->txnid->txnid);
+ ret = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
/*
* If we are rolling forward, then an aborted prepare
- * indicates that this is the last record we'll see for
- * this transaction ID and we should remove it from the
+ * indicates that this may the last record we'll see for
+ * this transaction ID, so we should remove it from the
* list.
*/
- if (op == DB_TXN_FORWARD_ROLL && ret == 1)
- ret = __db_txnlist_remove(info, argp->txnid->txnid);
- else if (op == DB_TXN_BACKWARD_ROLL && ret != 0) {
+ if (op == DB_TXN_FORWARD_ROLL) {
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK)
+ goto txn_err;
+ } else if (op == DB_TXN_BACKWARD_ROLL && ret == TXN_PREPARE) {
/*
* On the backward pass, we have three possibilities:
* 1. The transaction is already committed, no-op.
- * 2. The transaction is not committed and we are XA, treat
- * like commited and roll forward so that can be committed
- * or aborted late.
- * 3. The transaction is not committed and we are not XA
- * mark the transaction as aborted.
- *
- * Cases 2 and 3 are handled here.
+ * 2. The transaction is already aborted, no-op.
+ * 3. The transaction is neither committed nor aborted.
+ * Treat this like a commit and roll forward so that
+ * the transaction can be resurrected in the region.
+ * We handle case 3 here; cases 1 and 2 are the final clause
+ * below.
+ * This is prepared, but not yet committed transaction. We
+ * need to add it to the transaction list, so that it gets
+ * rolled forward. We also have to add it to the region's
+ * internal state so it can be properly aborted or committed
+ * after recovery (see txn_recover).
*/
-
- /*
- * Should never have seen this transaction unless it was
- * commited.
- */
- DB_ASSERT(ret == DB_NOTFOUND);
-
- if (IS_XA_TXN(argp)) {
- /*
- * This is an XA prepared, but not yet committed
- * transaction. We need to add it to the
- * transaction list, so that it gets rolled
- * forward. We also have to add it to the region's
- * internal state so it can be properly aborted
- * or recovered.
- */
- if ((ret = __db_txnlist_add(dbenv,
- info, argp->txnid->txnid, 0)) == 0)
- ret = __txn_restore_txn(dbenv, lsnp, argp);
- } else
- ret = __db_txnlist_add(dbenv,
- info, argp->txnid->txnid, 1);
+ if ((ret = __db_txnlist_remove(dbenv,
+ info, argp->txnid->txnid)) != TXN_OK) {
+txn_err: __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ } else if ((ret = __db_txnlist_add(dbenv,
+ info, argp->txnid->txnid, TXN_COMMIT, lsnp)) == 0)
+ ret = __txn_restore_txn(dbenv, lsnp, argp);
} else
ret = 0;
if (ret == 0)
*lsnp = argp->prev_lsn;
-err: __os_free(argp, 0);
+err: __os_free(dbenv, argp);
return (ret);
}
@@ -219,18 +240,11 @@ __txn_ckp_recover(dbenv, dbtp, lsnp, op, info)
if ((ret = __txn_ckp_read(dbenv, dbtp->data, &argp)) != 0)
return (ret);
- /*
- * Check for 'restart' checkpoint record. This occurs when the
- * checkpoint lsn is equal to the lsn of the checkpoint record
- * and means that we could set the transaction ID back to 1, so
- * that we don't exhaust the transaction ID name space.
- */
- if (argp->ckp_lsn.file == lsnp->file &&
- argp->ckp_lsn.offset == lsnp->offset)
- __db_txnlist_gen(info, DB_REDO(op) ? -1 : 1);
+ if (op == DB_TXN_BACKWARD_ROLL)
+ __db_txnlist_ckp(dbenv, info, lsnp);
*lsnp = argp->last_ckp;
- __os_free(argp, 0);
+ __os_free(dbenv, argp);
return (DB_TXN_CKP);
}
@@ -250,7 +264,7 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info)
void *info;
{
__txn_child_args *argp;
- int ret;
+ int c_stat, p_stat, ret;
#ifdef DEBUG_RECOVER
(void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
@@ -272,17 +286,54 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info)
ret = __db_txnlist_lsnadd(dbenv,
info, &argp->c_lsn, TXNLIST_NEW);
} else if (op == DB_TXN_BACKWARD_ROLL) {
- if (__db_txnlist_find(info, argp->txnid->txnid) == 0)
- ret = __db_txnlist_add(dbenv, info, argp->child, 0);
- else
- ret = __db_txnlist_add(dbenv, info, argp->child, 1);
- } else
- ret = __db_txnlist_remove(info, argp->child);
+ /* Child might exist -- look for it. */
+ c_stat = __db_txnlist_find(dbenv, info, argp->child);
+ p_stat = __db_txnlist_find(dbenv, info, argp->txnid->txnid);
+
+ if (c_stat == TXN_EXPECTED) {
+ /*
+ * The open after this create succeeded. If the
+ * parent succeeded, we don't want to redo; if the
+ * parent aborted, we do want to undo.
+ */
+ ret = __db_txnlist_update(dbenv,
+ info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_IGNORE : TXN_ABORT,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat == TXN_UNEXPECTED) {
+ /*
+ * The open after this create failed. If the parent
+ * is rolling forward, we need to roll forward. If
+ * the parent failed, then we do not want to abort
+ * (because the file may not be the one in which we
+ * are interested).
+ */
+ ret = __db_txnlist_update(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_IGNORE,
+ NULL);
+ if (ret > 0)
+ ret = 0;
+ } else if (c_stat != TXN_IGNORE) {
+ ret = __db_txnlist_add(dbenv, info, argp->child,
+ p_stat == TXN_COMMIT ? TXN_COMMIT : TXN_ABORT,
+ NULL);
+ }
+ } else {
+ /* Forward Roll */
+ if ((ret =
+ __db_txnlist_remove(dbenv, info, argp->child)) != TXN_OK) {
+ __db_err(dbenv,
+ "Transaction not in list %x", argp->txnid->txnid);
+ ret = DB_NOTFOUND;
+ }
+ }
if (ret == 0)
*lsnp = argp->prev_lsn;
- __os_free(argp, 0);
+ __os_free(dbenv, argp);
return (ret);
}
@@ -291,13 +342,16 @@ __txn_child_recover(dbenv, dbtp, lsnp, op, info)
* __txn_restore_txn --
* Using only during XA recovery. If we find any transactions that are
* prepared, but not yet committed, then we need to restore the transaction's
- * state into the shared region, because the TM is going to issue a txn_abort
- * or txn_commit and we need to respond correctly.
+ * state into the shared region, because the TM is going to issue an abort
+ * or commit and we need to respond correctly.
*
* lsnp is the LSN of the returned LSN
* argp is the perpare record (in an appropriate structure)
+ *
+ * PUBLIC: int __txn_restore_txn __P((DB_ENV *,
+ * PUBLIC: DB_LSN *, __txn_xa_regop_args *));
*/
-static int
+int
__txn_restore_txn(dbenv, lsnp, argp)
DB_ENV *dbenv;
DB_LSN *lsnp;
@@ -317,8 +371,10 @@ __txn_restore_txn(dbenv, lsnp, argp)
/* Allocate a new transaction detail structure. */
if ((ret =
- __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0)
+ __db_shalloc(mgr->reginfo.addr, sizeof(TXN_DETAIL), 0, &td)) != 0) {
+ R_UNLOCK(dbenv, &mgr->reginfo);
return (ret);
+ }
/* Place transaction on active transaction list. */
SH_TAILQ_INSERT_HEAD(&region->active_txn, td, links, __txn_detail);
@@ -333,7 +389,48 @@ __txn_restore_txn(dbenv, lsnp, argp)
td->bqual = argp->bqual;
td->gtrid = argp->gtrid;
td->format = argp->formatID;
+ td->flags = 0;
+ F_SET(td, TXN_RESTORED);
+ region->stat.st_nrestores++;
+ region->stat.st_nactive++;
+ if (region->stat.st_nactive > region->stat.st_maxnactive)
+ region->stat.st_maxnactive = region->stat.st_nactive;
R_UNLOCK(dbenv, &mgr->reginfo);
return (0);
}
+
+/*
+ * __txn_recycle_recover --
+ * Recovery function for recycle.
+ *
+ * PUBLIC: int __txn_recycle_recover
+ * PUBLIC: __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ */
+int
+__txn_recycle_recover(dbenv, dbtp, lsnp, op, info)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ DB_LSN *lsnp;
+ db_recops op;
+ void *info;
+{
+ __txn_recycle_args *argp;
+ int ret;
+
+#ifdef DEBUG_RECOVER
+ (void)__txn_child_print(dbenv, dbtp, lsnp, op, info);
+#endif
+ if ((ret = __txn_recycle_read(dbenv, dbtp->data, &argp)) != 0)
+ return (ret);
+
+ COMPQUIET(lsnp, NULL);
+
+ if ((ret = __db_txnlist_gen(dbenv, info,
+ DB_UNDO(op) ? -1 : 1, argp->min, argp->max)) != 0)
+ return (ret);
+
+ __os_free(dbenv, argp);
+
+ return (0);
+}
diff --git a/bdb/txn/txn_recover.c b/bdb/txn/txn_recover.c
new file mode 100644
index 00000000000..732a82e5030
--- /dev/null
+++ b/bdb/txn/txn_recover.c
@@ -0,0 +1,306 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_recover.c,v 1.36 2002/08/19 16:59:15 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+#include "dbinc/db_page.h"
+#include "dbinc/log.h"
+#include "dbinc_auto/db_auto.h"
+#include "dbinc_auto/crdel_auto.h"
+#include "dbinc_auto/db_ext.h"
+
+/*
+ * __txn_continue
+ * Fill in the fields of the local transaction structure given
+ * the detail transaction structure.
+ *
+ * XXX
+ * I'm not sure that we work correctly with nested txns.
+ *
+ * PUBLIC: void __txn_continue __P((DB_ENV *, DB_TXN *, TXN_DETAIL *, size_t));
+ */
+void
+__txn_continue(env, txnp, td, off)
+ DB_ENV *env;
+ DB_TXN *txnp;
+ TXN_DETAIL *td;
+ size_t off;
+{
+ txnp->mgrp = env->tx_handle;
+ txnp->parent = NULL;
+ txnp->last_lsn = td->last_lsn;
+ txnp->txnid = td->txnid;
+ txnp->off = (roff_t)off;
+
+ txnp->abort = __txn_abort;
+ txnp->commit = __txn_commit;
+ txnp->discard = __txn_discard;
+ txnp->id = __txn_id;
+ txnp->prepare = __txn_prepare;
+
+ txnp->flags = 0;
+}
+
+/*
+ * __txn_map_gid
+ * Return the txn that corresponds to this global ID.
+ *
+ * PUBLIC: int __txn_map_gid __P((DB_ENV *,
+ * PUBLIC: u_int8_t *, TXN_DETAIL **, size_t *));
+ */
+int
+__txn_map_gid(dbenv, gid, tdp, offp)
+ DB_ENV *dbenv;
+ u_int8_t *gid;
+ TXN_DETAIL **tdp;
+ size_t *offp;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * Search the internal active transaction table to find the
+ * matching xid. If this is a performance hit, then we
+ * can create a hash table, but I doubt it's worth it.
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ for (*tdp = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ *tdp != NULL;
+ *tdp = SH_TAILQ_NEXT(*tdp, links, __txn_detail))
+ if (memcmp(gid, (*tdp)->xid, sizeof((*tdp)->xid)) == 0)
+ break;
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ if (*tdp == NULL)
+ return (EINVAL);
+
+ *offp = R_OFFSET(&mgr->reginfo, *tdp);
+ return (0);
+}
+
+/*
+ * __txn_recover --
+ * Public interface to retrieve the list of prepared, but not yet
+ * commited transactions. See __txn_get_prepared for details. This
+ * function and __db_xa_recover both wrap that one.
+ *
+ * PUBLIC: int __txn_recover
+ * PUBLIC: __P((DB_ENV *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_recover(dbenv, preplist, count, retp, flags)
+ DB_ENV *dbenv;
+ DB_PREPLIST *preplist;
+ long count, *retp;
+ u_int32_t flags;
+{
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(
+ dbenv, dbenv->tx_handle, "txn_recover", DB_INIT_TXN);
+
+ if (F_ISSET((DB_TXNREGION *)
+ ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary,
+ TXN_IN_RECOVERY)) {
+ __db_err(dbenv, "operation not permitted while in recovery");
+ return (EINVAL);
+ }
+ return (__txn_get_prepared(dbenv, NULL, preplist, count, retp, flags));
+}
+
+/*
+ * __txn_get_prepared --
+ * Returns a list of prepared (and for XA, heuristically completed)
+ * transactions (less than or equal to the count parameter). One of
+ * xids or txns must be set to point to an array of the appropriate type.
+ * The count parameter indicates the number of entries in the xids and/or
+ * txns array. The retp parameter will be set to indicate the number of
+ * entries returned in the xids/txns array. Flags indicates the operation,
+ * one of DB_FIRST or DB_NEXT.
+ *
+ * PUBLIC: int __txn_get_prepared __P((DB_ENV *,
+ * PUBLIC: XID *, DB_PREPLIST *, long, long *, u_int32_t));
+ */
+int
+__txn_get_prepared(dbenv, xids, txns, count, retp, flags)
+ DB_ENV *dbenv;
+ XID *xids;
+ DB_PREPLIST *txns;
+ long count; /* This is long for XA compatibility. */
+ long *retp;
+ u_int32_t flags;
+{
+ DBT data;
+ DB_LOGC *logc;
+ DB_LSN min, open_lsn;
+ DB_PREPLIST *prepp;
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *tmr;
+ TXN_DETAIL *td;
+ XID *xidp;
+ __txn_ckp_args *ckp_args;
+ long i;
+ int nrestores, open_files, ret, t_ret;
+ void *txninfo;
+
+ *retp = 0;
+
+ logc = NULL;
+ MAX_LSN(min);
+ prepp = txns;
+ xidp = xids;
+ nrestores = ret = 0;
+ open_files = 1;
+
+ /*
+ * If we are starting a scan, then we traverse the active transaction
+ * list once making sure that all transactions are marked as not having
+ * been collected. Then on each pass, we mark the ones we collected
+ * so that if we cannot collect them all at once, we can finish up
+ * next time with a continue.
+ */
+
+ mgr = dbenv->tx_handle;
+ tmr = mgr->reginfo.primary;
+
+ /*
+ * During this pass we need to figure out if we are going to need
+ * to open files. We need to open files if we've never collected
+ * before (in which case, none of the COLLECTED bits will be set)
+ * and the ones that we are collecting are restored (if they aren't
+ * restored, then we never crashed; just the main server did).
+ */
+ R_LOCK(dbenv, &mgr->reginfo);
+ if (flags == DB_FIRST) {
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (F_ISSET(td, TXN_RESTORED))
+ nrestores++;
+ if (F_ISSET(td, TXN_COLLECTED))
+ open_files = 0;
+ F_CLR(td, TXN_COLLECTED);
+ }
+ mgr->n_discards = 0;
+ } else
+ open_files = 0;
+
+ /* Now begin collecting active transactions. */
+ for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
+ td != NULL && *retp < count;
+ td = SH_TAILQ_NEXT(td, links, __txn_detail)) {
+ if (td->status != TXN_PREPARED || F_ISSET(td, TXN_COLLECTED))
+ continue;
+
+ if (xids != NULL) {
+ xidp->formatID = td->format;
+ xidp->gtrid_length = td->gtrid;
+ xidp->bqual_length = td->bqual;
+ memcpy(xidp->data, td->xid, sizeof(td->xid));
+ xidp++;
+ }
+
+ if (txns != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, sizeof(DB_TXN), &prepp->txn)) != 0)
+ goto err;
+ __txn_continue(dbenv,
+ prepp->txn, td, R_OFFSET(&mgr->reginfo, td));
+ F_SET(prepp->txn, TXN_MALLOC);
+ memcpy(prepp->gid, td->xid, sizeof(td->xid));
+ prepp++;
+ }
+
+ if (log_compare(&td->begin_lsn, &min) < 0)
+ min = td->begin_lsn;
+
+ (*retp)++;
+ F_SET(td, TXN_COLLECTED);
+ }
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ /*
+ * Now link all the transactions into the transaction manager's list.
+ */
+ if (txns != NULL) {
+ MUTEX_THREAD_LOCK(dbenv, mgr->mutexp);
+ for (i = 0; i < *retp; i++)
+ TAILQ_INSERT_TAIL(&mgr->txn_chain, txns[i].txn, links);
+ MUTEX_THREAD_UNLOCK(dbenv, mgr->mutexp);
+ }
+
+ if (open_files && nrestores && *retp != 0 && !IS_MAX_LSN(min)) {
+ /*
+ * Figure out the last checkpoint before the smallest
+ * start_lsn in the region.
+ */
+ F_SET((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+
+ memset(&data, 0, sizeof(data));
+ if ((ret = __txn_getckp(dbenv, &open_lsn)) == 0)
+ while (!IS_ZERO_LSN(open_lsn) && (ret =
+ logc->get(logc, &open_lsn, &data, DB_SET)) == 0 &&
+ log_compare(&min, &open_lsn) < 0) {
+ /* Format the log record. */
+ if ((ret = __txn_ckp_read(dbenv,
+ data.data, &ckp_args)) != 0) {
+ __db_err(dbenv,
+ "Invalid checkpoint record at [%lu][%lu]",
+ (u_long)open_lsn.file,
+ (u_long)open_lsn.offset);
+ goto err;
+ }
+ open_lsn = ckp_args->last_ckp;
+ __os_free(dbenv, ckp_args);
+ }
+
+ /*
+ * There are three ways by which we may have gotten here.
+ * - We got a DB_NOTFOUND -- we need to read the first
+ * log record.
+ * - We found a checkpoint before min. We're done.
+ * - We found a checkpoint after min who's last_ckp is 0. We
+ * need to start at the beginning of the log.
+ */
+ if ((ret == DB_NOTFOUND || IS_ZERO_LSN(open_lsn)) &&
+ (ret = logc->get(logc, &open_lsn, &data, DB_FIRST)) != 0) {
+ __db_err(dbenv, "No log records");
+ goto err;
+ }
+
+ if ((ret = __db_txnlist_init(dbenv, 0, 0, NULL, &txninfo)) != 0)
+ goto err;
+ ret = __env_openfiles(dbenv, logc,
+ txninfo, &data, &open_lsn, NULL, 0, 0);
+ if (txninfo != NULL)
+ __db_txnlist_end(dbenv, txninfo);
+ }
+
+err: F_CLR((DB_LOG *)dbenv->lg_handle, DBLOG_RECOVER);
+
+ if (logc != NULL && (t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
diff --git a/bdb/txn/txn_region.c b/bdb/txn/txn_region.c
index 77ce3d08f89..bf72d4f1d2c 100644
--- a/bdb/txn/txn_region.c
+++ b/bdb/txn/txn_region.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: txn_region.c,v 11.36 2001/01/11 18:19:55 bostic Exp $";
+static const char revid[] = "$Id: txn_region.c,v 11.73 2002/08/06 04:42:37 bostic Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -28,98 +28,13 @@ static const char revid[] = "$Id: txn_region.c,v 11.36 2001/01/11 18:19:55 bosti
#include <string.h>
#endif
-#ifdef HAVE_RPC
-#include "db_server.h"
-#endif
-
#include "db_int.h"
-#include "db_page.h"
-#include "log.h" /* for __log_lastckp */
-#include "txn.h"
-#include "db_am.h"
-
-#ifdef HAVE_RPC
-#include "gen_client_ext.h"
-#include "rpc_client_ext.h"
-#endif
+#include "dbinc/log.h"
+#include "dbinc/txn.h"
+static int __txn_findlastckp __P((DB_ENV *, DB_LSN *));
static int __txn_init __P((DB_ENV *, DB_TXNMGR *));
-static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
-static int __txn_set_tx_recover __P((DB_ENV *,
- int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
-static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
-
-/*
- * __txn_dbenv_create --
- * Transaction specific initialization of the DB_ENV structure.
- *
- * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
- */
-void
-__txn_dbenv_create(dbenv)
- DB_ENV *dbenv;
-{
- dbenv->tx_max = DEF_MAX_TXNS;
-
- dbenv->set_tx_max = __txn_set_tx_max;
- dbenv->set_tx_recover = __txn_set_tx_recover;
- dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
-
-#ifdef HAVE_RPC
- /*
- * If we have a client, overwrite what we just setup to point to
- * client functions.
- */
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
- dbenv->set_tx_max = __dbcl_set_tx_max;
- dbenv->set_tx_recover = __dbcl_set_tx_recover;
- dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
- }
-#endif
-}
-
-/*
- * __txn_set_tx_max --
- * Set the size of the transaction table.
- */
-static int
-__txn_set_tx_max(dbenv, tx_max)
- DB_ENV *dbenv;
- u_int32_t tx_max;
-{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
-
- dbenv->tx_max = tx_max;
- return (0);
-}
-
-/*
- * __txn_set_tx_recover --
- * Set the transaction abort recover function.
- */
-static int
-__txn_set_tx_recover(dbenv, tx_recover)
- DB_ENV *dbenv;
- int (*tx_recover) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
-{
- dbenv->tx_recover = tx_recover;
- return (0);
-}
-
-/*
- * __txn_set_tx_timestamp --
- * Set the transaction recovery timestamp.
- */
-static int
-__txn_set_tx_timestamp(dbenv, timestamp)
- DB_ENV *dbenv;
- time_t *timestamp;
-{
- ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
-
- dbenv->tx_timestamp = *timestamp;
- return (0);
-}
+static size_t __txn_region_size __P((DB_ENV *));
/*
* __txn_open --
@@ -148,7 +63,7 @@ __txn_open(dbenv)
if (F_ISSET(dbenv, DB_ENV_CREATE))
F_SET(&tmgrp->reginfo, REGION_CREATE_OK);
if ((ret = __db_r_attach(dbenv,
- &tmgrp->reginfo, TXN_REGION_SIZE(dbenv->tx_max))) != 0)
+ &tmgrp->reginfo, __txn_region_size(dbenv))) != 0)
goto err;
/* If we created the region, initialize it. */
@@ -161,14 +76,10 @@ __txn_open(dbenv)
R_ADDR(&tmgrp->reginfo, tmgrp->reginfo.rp->primary);
/* Acquire a mutex to protect the active TXN list. */
- if (F_ISSET(dbenv, DB_ENV_THREAD)) {
- if ((ret = __db_mutex_alloc(
- dbenv, &tmgrp->reginfo, &tmgrp->mutexp)) != 0)
- goto err;
- if ((ret = __db_mutex_init(
- dbenv, tmgrp->mutexp, 0, MUTEX_THREAD)) != 0)
- goto err;
- }
+ if (F_ISSET(dbenv, DB_ENV_THREAD) &&
+ (ret = __db_mutex_setup(dbenv, &tmgrp->reginfo, &tmgrp->mutexp,
+ MUTEX_ALLOC | MUTEX_NO_RLOCK | MUTEX_THREAD)) != 0)
+ goto err;
R_UNLOCK(dbenv, &tmgrp->reginfo);
@@ -184,7 +95,7 @@ err: if (tmgrp->reginfo.addr != NULL) {
}
if (tmgrp->mutexp != NULL)
__db_mutex_free(dbenv, &tmgrp->reginfo, tmgrp->mutexp);
- __os_free(tmgrp, sizeof(*tmgrp));
+ __os_free(dbenv, tmgrp);
return (ret);
}
@@ -200,15 +111,29 @@ __txn_init(dbenv, tmgrp)
DB_LSN last_ckp;
DB_TXNREGION *region;
int ret;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ u_int8_t *addr;
+#endif
- ZERO_LSN(last_ckp);
/*
- * If possible, fetch the last checkpoint LSN from the log system
- * so that the backwards chain of checkpoints is unbroken when
- * the environment is removed and recreated. [#2865]
+ * Find the last checkpoint in the log.
*/
- if (LOGGING_ON(dbenv) && (ret = __log_lastckp(dbenv, &last_ckp)) != 0)
- return (ret);
+ ZERO_LSN(last_ckp);
+ if (LOGGING_ON(dbenv)) {
+ /*
+ * The log system has already walked through the last
+ * file. Get the LSN of a checkpoint it may have found.
+ */
+ __log_get_cached_ckp_lsn(dbenv, &last_ckp);
+
+ /*
+ * If that didn't work, look backwards from the beginning of
+ * the last log file until we find the last checkpoint.
+ */
+ if (IS_ZERO_LSN(last_ckp) &&
+ (ret = __txn_findlastckp(dbenv, &last_ckp)) != 0)
+ return (ret);
+ }
if ((ret = __db_shalloc(tmgrp->reginfo.addr,
sizeof(DB_TXNREGION), 0, &tmgrp->reginfo.primary)) != 0) {
@@ -223,7 +148,7 @@ __txn_init(dbenv, tmgrp)
region->maxtxns = dbenv->tx_max;
region->last_txnid = TXN_MINIMUM;
- ZERO_LSN(region->pending_ckp);
+ region->cur_maxid = TXN_MAXIMUM;
region->last_ckp = last_ckp;
region->time_ckp = time(NULL);
@@ -233,25 +158,86 @@ __txn_init(dbenv, tmgrp)
*/
region->logtype = 0;
region->locktype = 0;
- region->naborts = 0;
- region->ncommits = 0;
- region->nbegins = 0;
- region->nactive = 0;
- region->maxnactive = 0;
- SH_TAILQ_INIT(&region->active_txn);
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+ SH_TAILQ_INIT(&region->active_txn);
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ /* Allocate room for the txn maintenance info and initialize it. */
+ if ((ret = __db_shalloc(tmgrp->reginfo.addr,
+ sizeof(REGMAINT) + TXN_MAINT_SIZE, 0, &addr)) != 0) {
+ __db_err(dbenv,
+ "Unable to allocate memory for mutex maintenance");
+ return (ret);
+ }
+ __db_maintinit(&tmgrp->reginfo, addr, TXN_MAINT_SIZE);
+ region->maint_off = R_OFFSET(&tmgrp->reginfo, addr);
+#endif
return (0);
}
/*
- * __txn_close --
- * Close a transaction region.
+ * __txn_findlastckp --
+ * Find the last checkpoint in the log, walking backwards from the
+ * beginning of the last log file. (The log system looked through
+ * the last log file when it started up.)
+ */
+static int
+__txn_findlastckp(dbenv, lsnp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+{
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ DBT dbt;
+ int ret, t_ret;
+ u_int32_t rectype;
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ /* Get the last LSN. */
+ memset(&dbt, 0, sizeof(dbt));
+ if ((ret = logc->get(logc, &lsn, &dbt, DB_LAST)) != 0)
+ goto err;
+
+ /*
+ * Twiddle the last LSN so it points to the beginning of the last
+ * file; we know there's no checkpoint after that, since the log
+ * system already looked there.
+ */
+ lsn.offset = 0;
+
+ /* Read backwards, looking for checkpoints. */
+ while ((ret = logc->get(logc, &lsn, &dbt, DB_PREV)) == 0) {
+ if (dbt.size < sizeof(u_int32_t))
+ continue;
+ memcpy(&rectype, dbt.data, sizeof(u_int32_t));
+ if (rectype == DB___txn_ckp) {
+ *lsnp = lsn;
+ break;
+ }
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ /*
+ * Not finding a checkpoint is not an error; there may not exist
+ * one in the log.
+ */
+ return ((ret == 0 || ret == DB_NOTFOUND) ? 0 : ret);
+}
+
+/*
+ * __txn_dbenv_refresh --
+ * Clean up after the transaction system on a close or failed open.
+ * Called only from __dbenv_refresh. (Formerly called __txn_close.)
*
- * PUBLIC: int __txn_close __P((DB_ENV *));
+ * PUBLIC: int __txn_dbenv_refresh __P((DB_ENV *));
*/
int
-__txn_close(dbenv)
+__txn_dbenv_refresh(dbenv)
DB_ENV *dbenv;
{
DB_TXN *txnp;
@@ -274,22 +260,23 @@ __txn_close(dbenv)
*/
if (TAILQ_FIRST(&tmgrp->txn_chain) != NULL) {
__db_err(dbenv,
- "Error: closing the transaction region with active transactions\n");
+ "Error: closing the transaction region with active transactions");
ret = EINVAL;
while ((txnp = TAILQ_FIRST(&tmgrp->txn_chain)) != NULL) {
txnid = txnp->txnid;
- if ((t_ret = txn_abort(txnp)) != 0) {
+ if ((t_ret = txnp->abort(txnp)) != 0) {
__db_err(dbenv,
- "Unable to abort transaction 0x%x: %s\n",
+ "Unable to abort transaction 0x%x: %s",
txnid, db_strerror(t_ret));
ret = __db_panic(dbenv, t_ret);
+ break;
}
}
}
/* Flush the log. */
if (LOGGING_ON(dbenv) &&
- (t_ret = log_flush(dbenv, NULL)) != 0 && ret == 0)
+ (t_ret = dbenv->log_flush(dbenv, NULL)) != 0 && ret == 0)
ret = t_ret;
/* Discard the per-thread lock. */
@@ -300,94 +287,88 @@ __txn_close(dbenv)
if ((t_ret = __db_r_detach(dbenv, &tmgrp->reginfo, 0)) != 0 && ret == 0)
ret = t_ret;
- __os_free(tmgrp, sizeof(*tmgrp));
+ __os_free(dbenv, tmgrp);
dbenv->tx_handle = NULL;
return (ret);
}
+/*
+ * __txn_region_size --
+ * Return the amount of space needed for the txn region. Make the
+ * region large enough to hold txn_max transaction detail structures
+ * plus some space to hold thread handles and the beginning of the
+ * shalloc region and anything we need for mutex system resource
+ * recording.
+ */
+static size_t
+__txn_region_size(dbenv)
+ DB_ENV *dbenv;
+{
+ size_t s;
+
+ s = sizeof(DB_TXNREGION) +
+ dbenv->tx_max * sizeof(TXN_DETAIL) + 10 * 1024;
+#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ s += sizeof(REGMAINT) + TXN_MAINT_SIZE;
+#endif
+ return (s);
+}
+
+/*
+ * __txn_region_destroy
+ * Destroy any region maintenance info.
+ *
+ * PUBLIC: void __txn_region_destroy __P((DB_ENV *, REGINFO *));
+ */
+void
+__txn_region_destroy(dbenv, infop)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+{
+ __db_shlocks_destroy(infop, (REGMAINT *)R_ADDR(infop,
+ ((DB_TXNREGION *)R_ADDR(infop, infop->rp->primary))->maint_off));
+
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(infop, NULL);
+}
+
+#ifdef CONFIG_TEST
+/*
+ * __txn_id_set --
+ * Set the current transaction ID and current maximum unused ID (for
+ * testing purposes only).
+ *
+ * PUBLIC: int __txn_id_set __P((DB_ENV *, u_int32_t, u_int32_t));
+ */
int
-txn_stat(dbenv, statp, db_malloc)
+__txn_id_set(dbenv, cur_txnid, max_txnid)
DB_ENV *dbenv;
- DB_TXN_STAT **statp;
- void *(*db_malloc) __P((size_t));
+ u_int32_t cur_txnid, max_txnid;
{
DB_TXNMGR *mgr;
DB_TXNREGION *region;
- DB_TXN_STAT *stats;
- TXN_DETAIL *txnp;
- size_t nbytes;
- u_int32_t nactive, ndx;
- int ret, slop;
-
-#ifdef HAVE_RPC
- if (F_ISSET(dbenv, DB_ENV_RPCCLIENT))
- return (__dbcl_txn_stat(dbenv, statp, db_malloc));
-#endif
-
- PANIC_CHECK(dbenv);
- ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, DB_INIT_TXN);
+ int ret;
- *statp = NULL;
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_id_set", DB_INIT_TXN);
- slop = 200;
mgr = dbenv->tx_handle;
region = mgr->reginfo.primary;
+ region->last_txnid = cur_txnid;
+ region->cur_maxid = max_txnid;
-retry: R_LOCK(dbenv, &mgr->reginfo);
- nactive = region->nactive;
- R_UNLOCK(dbenv, &mgr->reginfo);
-
- /*
- * Allocate extra active structures to handle any transactions that
- * are created while we have the region unlocked.
- */
- nbytes = sizeof(DB_TXN_STAT) + sizeof(DB_TXN_ACTIVE) * (nactive + slop);
- if ((ret = __os_malloc(dbenv, nbytes, db_malloc, &stats)) != 0)
- return (ret);
-
- R_LOCK(dbenv, &mgr->reginfo);
- stats->st_last_txnid = region->last_txnid;
- stats->st_last_ckp = region->last_ckp;
- stats->st_maxtxns = region->maxtxns;
- stats->st_naborts = region->naborts;
- stats->st_nbegins = region->nbegins;
- stats->st_ncommits = region->ncommits;
- stats->st_pending_ckp = region->pending_ckp;
- stats->st_time_ckp = region->time_ckp;
- stats->st_nactive = region->nactive;
- if (stats->st_nactive > nactive + 200) {
- R_UNLOCK(dbenv, &mgr->reginfo);
- slop *= 2;
- goto retry;
+ ret = 0;
+ if (cur_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Current ID value %lu below minimum",
+ cur_txnid);
+ ret = EINVAL;
}
- stats->st_maxnactive = region->maxnactive;
- stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1];
-
- ndx = 0;
- for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
- txnp != NULL;
- txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
- stats->st_txnarray[ndx].txnid = txnp->txnid;
- if (txnp->parent == INVALID_ROFF)
- stats->st_txnarray[ndx].parentid = TXN_INVALID_ID;
- else
- stats->st_txnarray[ndx].parentid =
- ((TXN_DETAIL *)R_ADDR(&mgr->reginfo,
- txnp->parent))->txnid;
- stats->st_txnarray[ndx].lsn = txnp->begin_lsn;
- ndx++;
-
- if (ndx >= stats->st_nactive)
- break;
+ if (max_txnid < TXN_MINIMUM) {
+ __db_err(dbenv, "Maximum ID value %lu below minimum",
+ max_txnid);
+ ret = EINVAL;
}
-
- stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait;
- stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait;
- stats->st_regsize = mgr->reginfo.rp->size;
-
- R_UNLOCK(dbenv, &mgr->reginfo);
-
- *statp = stats;
- return (0);
+ return (ret);
}
+#endif
diff --git a/bdb/txn/txn_stat.c b/bdb/txn/txn_stat.c
new file mode 100644
index 00000000000..f7d84e8f4c6
--- /dev/null
+++ b/bdb/txn/txn_stat.c
@@ -0,0 +1,102 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_stat.c,v 11.15 2002/04/26 23:00:36 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/txn.h"
+
+/*
+ * __txn_stat --
+ *
+ * PUBLIC: int __txn_stat __P((DB_ENV *, DB_TXN_STAT **, u_int32_t));
+ */
+int
+__txn_stat(dbenv, statp, flags)
+ DB_ENV *dbenv;
+ DB_TXN_STAT **statp;
+ u_int32_t flags;
+{
+ DB_TXNMGR *mgr;
+ DB_TXNREGION *region;
+ DB_TXN_STAT *stats;
+ TXN_DETAIL *txnp;
+ size_t nbytes;
+ u_int32_t ndx;
+ int ret;
+
+ PANIC_CHECK(dbenv);
+ ENV_REQUIRES_CONFIG(dbenv, dbenv->tx_handle, "txn_stat", DB_INIT_TXN);
+
+ *statp = NULL;
+ if ((ret = __db_fchk(dbenv,
+ "DB_ENV->txn_stat", flags, DB_STAT_CLEAR)) != 0)
+ return (ret);
+
+ mgr = dbenv->tx_handle;
+ region = mgr->reginfo.primary;
+
+ /*
+ * Allocate for the maximum active transactions -- the DB_TXN_ACTIVE
+ * struct is small and the maximum number of active transactions is
+ * not going to be that large. Don't have to lock anything to look
+ * at the region's maximum active transactions value, it's read-only
+ * and never changes after the region is created.
+ */
+ nbytes = sizeof(DB_TXN_STAT) + sizeof(DB_TXN_ACTIVE) * region->maxtxns;
+ if ((ret = __os_umalloc(dbenv, nbytes, &stats)) != 0)
+ return (ret);
+
+ R_LOCK(dbenv, &mgr->reginfo);
+ memcpy(stats, &region->stat, sizeof(*stats));
+ stats->st_last_txnid = region->last_txnid;
+ stats->st_last_ckp = region->last_ckp;
+ stats->st_time_ckp = region->time_ckp;
+ stats->st_txnarray = (DB_TXN_ACTIVE *)&stats[1];
+
+ ndx = 0;
+ for (txnp = SH_TAILQ_FIRST(&region->active_txn, __txn_detail);
+ txnp != NULL;
+ txnp = SH_TAILQ_NEXT(txnp, links, __txn_detail)) {
+ stats->st_txnarray[ndx].txnid = txnp->txnid;
+ if (txnp->parent == INVALID_ROFF)
+ stats->st_txnarray[ndx].parentid = TXN_INVALID;
+ else
+ stats->st_txnarray[ndx].parentid =
+ ((TXN_DETAIL *)R_ADDR(&mgr->reginfo,
+ txnp->parent))->txnid;
+ stats->st_txnarray[ndx].lsn = txnp->begin_lsn;
+ ndx++;
+ }
+
+ stats->st_region_wait = mgr->reginfo.rp->mutex.mutex_set_wait;
+ stats->st_region_nowait = mgr->reginfo.rp->mutex.mutex_set_nowait;
+ stats->st_regsize = mgr->reginfo.rp->size;
+ if (LF_ISSET(DB_STAT_CLEAR)) {
+ mgr->reginfo.rp->mutex.mutex_set_wait = 0;
+ mgr->reginfo.rp->mutex.mutex_set_nowait = 0;
+ memset(&region->stat, 0, sizeof(region->stat));
+ region->stat.st_maxtxns = region->maxtxns;
+ region->stat.st_maxnactive =
+ region->stat.st_nactive = stats->st_nactive;
+ }
+
+ R_UNLOCK(dbenv, &mgr->reginfo);
+
+ *statp = stats;
+ return (0);
+}
diff --git a/bdb/txn/txn_util.c b/bdb/txn/txn_util.c
new file mode 100644
index 00000000000..cbfbc419615
--- /dev/null
+++ b/bdb/txn/txn_util.c
@@ -0,0 +1,234 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001-2002
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "$Id: txn_util.c,v 11.18 2002/08/06 06:25:12 bostic Exp $";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "dbinc/db_shash.h"
+#include "dbinc/lock.h"
+#include "dbinc/txn.h"
+
+typedef struct __txn_event TXN_EVENT;
+struct __txn_event {
+ TXN_EVENT_T op;
+ TAILQ_ENTRY(__txn_event) links;
+ union {
+ struct {
+ /* Delayed remove. */
+ char *name;
+ u_int8_t *fileid;
+ } r;
+ struct {
+ /* Lock event. */
+ DB_LOCK lock;
+ u_int32_t locker;
+ DB *dbp;
+ } t;
+ } u;
+};
+
+/*
+ * __txn_remevent --
+ *
+ * Creates a remove event that can be added to the commit list.
+ *
+ * PUBLIC: int __txn_remevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, const char *, u_int8_t*));
+ */
+int
+__txn_remevent(dbenv, txn, name, fileid)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ const char *name;
+ u_int8_t *fileid;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ if ((ret = __os_strdup(dbenv, name, &e->u.r.name)) != 0)
+ goto err;
+
+ if (fileid != NULL) {
+ if ((ret = __os_calloc(dbenv,
+ 1, DB_FILE_ID_LEN, &e->u.r.fileid)) != 0)
+ return (ret);
+ memcpy(e->u.r.fileid, fileid, DB_FILE_ID_LEN);
+ }
+
+ e->op = TXN_REMOVE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+
+err: if (e != NULL)
+ __os_free(dbenv, e);
+
+ return (ret);
+}
+
+/*
+ * __txn_lockevent --
+ *
+ * Add a lockevent to the commit-queue. The lock event indicates a locker
+ * trade.
+ *
+ * PUBLIC: int __txn_lockevent __P((DB_ENV *,
+ * PUBLIC: DB_TXN *, DB *, DB_LOCK *, u_int32_t));
+ */
+int
+__txn_lockevent(dbenv, txn, dbp, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB *dbp;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ int ret;
+ TXN_EVENT *e;
+
+ if (!LOCKING_ON(dbenv))
+ return (0);
+
+ e = NULL;
+ if ((ret = __os_calloc(dbenv, 1, sizeof(TXN_EVENT), &e)) != 0)
+ return (ret);
+
+ e->u.t.locker = locker;
+ e->u.t.lock = *lock;
+ e->u.t.dbp = dbp;
+ e->op = TXN_TRADE;
+ TAILQ_INSERT_TAIL(&txn->events, e, links);
+
+ return (0);
+}
+
+/*
+ * __txn_remlock --
+ * Remove a lock event because the locker is going away. We can remove
+ * by lock (using offset) or by locker_id (or by both).
+ *
+ * PUBLIC: void __txn_remlock __P((DB_ENV *, DB_TXN *, DB_LOCK *, u_int32_t));
+ */
+void
+__txn_remlock(dbenv, txn, lock, locker)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DB_LOCK *lock;
+ u_int32_t locker;
+{
+ TXN_EVENT *e, *next_e;
+
+ for (e = TAILQ_FIRST(&txn->events); e != NULL; e = next_e) {
+ next_e = TAILQ_NEXT(e, links);
+ if ((e->op != TXN_TRADE && e->op != TXN_TRADED) ||
+ (e->u.t.lock.off != lock->off && e->u.t.locker != locker))
+ continue;
+ TAILQ_REMOVE(&txn->events, e, links);
+ __os_free(dbenv, e);
+ }
+
+ return;
+}
+
+/*
+ * __txn_doevents --
+ * Process the list of events associated with a transaction. On commit,
+ * apply the events; on abort, just toss the entries.
+ *
+ * PUBLIC: int __txn_doevents __P((DB_ENV *, DB_TXN *, int, int));
+ */
+#define DO_TRADE do { \
+ memset(&req, 0, sizeof(req)); \
+ req.lock = e->u.t.lock; \
+ req.op = DB_LOCK_TRADE; \
+ t_ret = __lock_vec(dbenv, e->u.t.locker, 0, &req, 1, NULL); \
+ if (t_ret == 0) \
+ e->u.t.dbp->cur_lid = e->u.t.locker; \
+ else if (t_ret == DB_NOTFOUND) \
+ t_ret = 0; \
+ if (t_ret != 0 && ret == 0) \
+ ret = t_ret; \
+ e->op = TXN_TRADED; \
+} while (0)
+
+int
+__txn_doevents(dbenv, txn, is_commit, preprocess)
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ int is_commit, preprocess;
+{
+ DB_LOCKREQ req;
+ TXN_EVENT *e;
+ int ret, t_ret;
+
+ ret = 0;
+
+ /*
+ * This phase only gets called if we have a phase where we
+ * release read locks. Since not all paths will call this
+ * phase, we have to check for it below as well. So, when
+ * we do the trade, we update the opcode of the entry so that
+ * we don't try the trade again.
+ */
+ if (preprocess) {
+ for (e = TAILQ_FIRST(&txn->events);
+ e != NULL; e = TAILQ_NEXT(e, links)) {
+ if (e->op != TXN_TRADE)
+ continue;
+ DO_TRADE;
+ }
+ return (ret);
+ }
+
+ while ((e = TAILQ_FIRST(&txn->events)) != NULL) {
+ TAILQ_REMOVE(&txn->events, e, links);
+ if (!is_commit)
+ goto dofree;
+ switch (e->op) {
+ case TXN_REMOVE:
+ if (e->u.r.fileid != NULL) {
+ if ((t_ret = dbenv->memp_nameop(dbenv,
+ e->u.r.fileid,
+ NULL, e->u.r.name, NULL)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.fileid);
+ } else if ((t_ret =
+ __os_unlink(dbenv, e->u.r.name)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, e->u.r.name);
+ break;
+ case TXN_TRADE:
+ DO_TRADE;
+ /* Fall through */
+ case TXN_TRADED:
+ /* Downgrade the lock. */
+ if ((t_ret = __lock_downgrade(dbenv,
+ &e->u.t.lock, DB_LOCK_READ, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ break;
+ default:
+ /* This had better never happen. */
+ DB_ASSERT(0);
+ }
+dofree: __os_free(dbenv, e);
+ }
+
+ return (ret);
+}
diff --git a/bdb/xa/xa.c b/bdb/xa/xa.c
index b13a6d503b3..6667d14c2bf 100644
--- a/bdb/xa/xa.c
+++ b/bdb/xa/xa.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: xa.c,v 11.10 2000/12/14 07:39:14 ubell Exp $";
+static const char revid[] = "$Id: xa.c,v 11.23 2002/08/29 14:22:25 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -19,11 +19,7 @@ static const char revid[] = "$Id: xa.c,v 11.10 2000/12/14 07:39:14 ubell Exp $";
#endif
#include "db_int.h"
-#include "db_page.h"
-#include "log.h"
-#include "txn.h"
-#include "db_am.h"
-#include "db_dispatch.h"
+#include "dbinc/txn.h"
static int __db_xa_close __P((char *, int, long));
static int __db_xa_commit __P((XID *, int, long));
@@ -35,8 +31,7 @@ static int __db_xa_prepare __P((XID *, int, long));
static int __db_xa_recover __P((XID *, long, int, long));
static int __db_xa_rollback __P((XID *, int, long));
static int __db_xa_start __P((XID *, int, long));
-static void __xa_txn_end __P((DB_ENV *));
-static void __xa_txn_init __P((DB_ENV *, TXN_DETAIL *, size_t));
+static void __xa_txn_end __P((DB_TXN *));
/*
* Possible flag values:
@@ -158,7 +153,7 @@ __db_xa_close(xa_info, rmid, flags)
/* Discard space held for the current transaction. */
if (env->xa_txn != NULL)
- __os_free(env->xa_txn, sizeof(DB_TXN));
+ __os_free(env, env->xa_txn);
/* Close the environment. */
if ((t_ret = env->close(env, 0)) != 0 && ret == 0)
@@ -220,7 +215,7 @@ __db_xa_start(xid, rmid, flags)
return (XA_RBOTHER);
/* Now, fill in the global transaction structure. */
- __xa_txn_init(env, td, off);
+ __txn_continue(env, env->xa_txn, td, off);
td->xa_status = TXN_XA_STARTED;
} else {
if (__txn_xa_begin(env, env->xa_txn) != 0)
@@ -327,15 +322,15 @@ __db_xa_prepare(xid, rmid, flags)
return (XAER_PROTO);
/* Now, fill in the global transaction structure. */
- __xa_txn_init(env, td, off);
+ __txn_continue(env, env->xa_txn, td, off);
- if (txn_prepare(env->xa_txn) != 0)
+ if (env->xa_txn->prepare(env->xa_txn, (u_int8_t *)xid->data) != 0)
return (XAER_RMERR);
td->xa_status = TXN_XA_PREPARED;
/* No fatal value that would require an XAER_RMFAIL. */
- __xa_txn_end(env);
+ __xa_txn_end(env->xa_txn);
return (XA_OK);
}
@@ -385,13 +380,13 @@ __db_xa_commit(xid, rmid, flags)
return (XAER_PROTO);
/* Now, fill in the global transaction structure. */
- __xa_txn_init(env, td, off);
+ __txn_continue(env, env->xa_txn, td, off);
- if (txn_commit(env->xa_txn, 0) != 0)
+ if (env->xa_txn->commit(env->xa_txn, 0) != 0)
return (XAER_RMERR);
/* No fatal value that would require an XAER_RMFAIL. */
- __xa_txn_end(env);
+ __xa_txn_end(env->xa_txn);
return (XA_OK);
}
@@ -409,118 +404,26 @@ __db_xa_recover(xids, count, rmid, flags)
long count, flags;
int rmid;
{
- __txn_xa_regop_args *argp;
- DBT data;
DB_ENV *env;
- DB_LOG *log;
- XID *xidp;
- int err, ret;
- u_int32_t rectype, txnid;
-
- ret = 0;
- xidp = xids;
+ u_int32_t newflags;
+ long rval;
/* If the environment is closed, then we're done. */
if (__db_rmid_to_env(rmid, &env) != 0)
return (XAER_PROTO);
- /*
- * If we are starting a scan, then we need to figure out where
- * to begin. If we are not starting a scan, we'll start from
- * wherever the log cursor is. Since XA apps cannot be threaded,
- * we don't have to worry about someone else having moved it.
- */
- log = env->lg_handle;
- if (LF_ISSET(TMSTARTRSCAN)) {
- if ((err = __log_findckp(env, &log->xa_first)) == DB_NOTFOUND) {
- /*
- * If there were no log files, then we have no
- * transactions to return, so we simply return 0.
- */
- return (0);
- }
- if ((err = __db_txnlist_init(env, &log->xa_info)) != 0)
- return (XAER_RMERR);
- } else {
- /*
- * If we are not starting a scan, the log cursor had
- * better be set.
- */
- if (IS_ZERO_LSN(log->xa_lsn))
- return (XAER_PROTO);
- }
-
- /*
- * At this point log->xa_first contains the point in the log
- * to which we need to roll back. If we are starting a scan,
- * we'll start at the last record; if we're continuing a scan,
- * we'll have to start at log->xa_lsn.
- */
-
- memset(&data, 0, sizeof(data));
- for (err = log_get(env, &log->xa_lsn, &data,
- LF_ISSET(TMSTARTRSCAN) ? DB_LAST : DB_SET);
- err == 0 && log_compare(&log->xa_lsn, &log->xa_first) > 0;
- err = log_get(env, &log->xa_lsn, &data, DB_PREV)) {
- memcpy(&rectype, data.data, sizeof(rectype));
-
- /*
- * The only record type we care about is an DB_txn_xa_regop.
- * If it's a commit, we have to add it to a txnlist. If it's
- * a prepare, and we don't have a commit, then we return it.
- * We are redoing some of what's in the xa_regop_recovery
- * code, but we have to do it here so we can get at the xid
- * in the record.
- */
- if (rectype != DB_txn_xa_regop && rectype != DB_txn_regop)
- continue;
-
- memcpy(&txnid, (u_int8_t *)data.data + sizeof(rectype),
- sizeof(txnid));
- err = __db_txnlist_find(log->xa_info, txnid);
- switch (rectype) {
- case DB_txn_regop:
- if (err == DB_NOTFOUND)
- __db_txnlist_add(env, log->xa_info, txnid, 0);
- err = 0;
- break;
- case DB_txn_xa_regop:
- /*
- * This transaction is committed, so we needn't read
- * the record and do anything.
- */
- if (err == 0)
- break;
- if ((err =
- __txn_xa_regop_read(env, data.data, &argp)) != 0) {
- ret = XAER_RMERR;
- goto out;
- }
-
- xidp->formatID = argp->formatID;
- xidp->gtrid_length = argp->gtrid;
- xidp->bqual_length = argp->bqual;
- memcpy(xidp->data, argp->xid.data, argp->xid.size);
- ret++;
- xidp++;
- __os_free(argp, sizeof(*argp));
- if (ret == count)
- goto done;
- break;
- }
- }
-
- if (err != 0 && err != DB_NOTFOUND)
- goto out;
-
-done: if (LF_ISSET(TMENDRSCAN)) {
- ZERO_LSN(log->xa_lsn);
- ZERO_LSN(log->xa_first);
+ if (LF_ISSET(TMSTARTRSCAN))
+ newflags = DB_FIRST;
+ else if (LF_ISSET(TMENDRSCAN))
+ newflags = DB_LAST;
+ else
+ newflags = DB_NEXT;
-out: __db_txnlist_end(env, log->xa_info);
- log->xa_info = NULL;
- }
- return (ret);
+ rval = 0;
+ if (__txn_get_prepared(env, xids, NULL, count, &rval, newflags) != 0)
+ return (XAER_RMERR);
+ else
+ return (rval);
}
/*
@@ -560,12 +463,12 @@ __db_xa_rollback(xid, rmid, flags)
return (XAER_PROTO);
/* Now, fill in the global transaction structure. */
- __xa_txn_init(env, td, off);
- if (txn_abort(env->xa_txn) != 0)
+ __txn_continue(env, env->xa_txn, td, off);
+ if (env->xa_txn->abort(env->xa_txn) != 0)
return (XAER_RMERR);
/* No fatal value that would require an XAER_RMFAIL. */
- __xa_txn_end(env);
+ __xa_txn_end(env->xa_txn);
return (XA_OK);
}
@@ -624,38 +527,13 @@ __db_xa_complete(handle, retval, rmid, flags)
}
/*
- * __xa_txn_init --
- * Fill in the fields of the local transaction structure given
- * the detail transaction structure.
- */
-static void
-__xa_txn_init(env, td, off)
- DB_ENV *env;
- TXN_DETAIL *td;
- size_t off;
-{
- DB_TXN *txn;
-
- txn = env->xa_txn;
- txn->mgrp = env->tx_handle;
- txn->parent = NULL;
- txn->last_lsn = td->last_lsn;
- txn->txnid = td->txnid;
- txn->off = off;
- txn->flags = 0;
-}
-
-/*
* __xa_txn_end --
- * Invalidate a transaction structure that was generated by xa_txn_init.
+ * Invalidate a transaction structure that was generated by __txn_continue.
*/
static void
-__xa_txn_end(env)
- DB_ENV *env;
-{
+__xa_txn_end(txn)
DB_TXN *txn;
-
- txn = env->xa_txn;
+{
if (txn != NULL)
txn->txnid = TXN_INVALID;
}
diff --git a/bdb/xa/xa_db.c b/bdb/xa/xa_db.c
index ba3dbfeddbb..b84bb1c9fa9 100644
--- a/bdb/xa/xa_db.c
+++ b/bdb/xa/xa_db.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1998, 1999, 2000
+ * Copyright (c) 1998-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: xa_db.c,v 11.9 2000/09/06 18:57:59 ubell Exp $";
+static const char revid[] = "$Id: xa_db.c,v 11.21 2002/08/29 14:22:25 margo Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -16,15 +16,14 @@ static const char revid[] = "$Id: xa_db.c,v 11.9 2000/09/06 18:57:59 ubell Exp $
#endif
#include "db_int.h"
-#include "xa.h"
-#include "xa_ext.h"
-#include "txn.h"
+#include "dbinc/xa.h"
+#include "dbinc/txn.h"
static int __xa_close __P((DB *, u_int32_t));
static int __xa_cursor __P((DB *, DB_TXN *, DBC **, u_int32_t));
static int __xa_del __P((DB *, DB_TXN *, DBT *, u_int32_t));
static int __xa_get __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
-static int __xa_open __P((DB *,
+static int __xa_open __P((DB *, DB_TXN *,
const char *, const char *, DBTYPE, u_int32_t, int));
static int __xa_put __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
@@ -33,7 +32,7 @@ typedef struct __xa_methods {
int (*cursor) __P((DB *, DB_TXN *, DBC **, u_int32_t));
int (*del) __P((DB *, DB_TXN *, DBT *, u_int32_t));
int (*get) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
- int (*open) __P((DB *,
+ int (*open) __P((DB *, DB_TXN *,
const char *, const char *, DBTYPE, u_int32_t, int));
int (*put) __P((DB *, DB_TXN *, DBT *, DBT *, u_int32_t));
} XA_METHODS;
@@ -73,8 +72,9 @@ __db_xa_create(dbp)
*/
static int
-__xa_open(dbp, name, subdb, type, flags, mode)
+__xa_open(dbp, txn, name, subdb, type, flags, mode)
DB *dbp;
+ DB_TXN *txn;
const char *name, *subdb;
DBTYPE type;
u_int32_t flags;
@@ -85,7 +85,7 @@ __xa_open(dbp, name, subdb, type, flags, mode)
xam = (XA_METHODS *)dbp->xa_internal;
- if ((ret = xam->open(dbp, name, subdb, type, flags, mode)) != 0)
+ if ((ret = xam->open(dbp, txn, name, subdb, type, flags, mode)) != 0)
return (ret);
xam->cursor = dbp->cursor;
@@ -109,7 +109,7 @@ __xa_cursor(dbp, txn, dbcp, flags)
{
DB_TXN *t;
- t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
if (t->txnid == TXN_INVALID)
t = NULL;
@@ -125,7 +125,7 @@ __xa_del(dbp, txn, key, flags)
{
DB_TXN *t;
- t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
if (t->txnid == TXN_INVALID)
t = NULL;
@@ -141,7 +141,7 @@ __xa_close(dbp, flags)
real_close = ((XA_METHODS *)dbp->xa_internal)->close;
- __os_free(dbp->xa_internal, sizeof(XA_METHODS));
+ __os_free(dbp->dbenv, dbp->xa_internal);
dbp->xa_internal = NULL;
return (real_close(dbp, flags));
@@ -156,7 +156,7 @@ __xa_get(dbp, txn, key, data, flags)
{
DB_TXN *t;
- t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
if (t->txnid == TXN_INVALID)
t = NULL;
@@ -173,7 +173,7 @@ __xa_put(dbp, txn, key, data, flags)
{
DB_TXN *t;
- t = txn != NULL && txn == dbp->open_txn ? txn : dbp->dbenv->xa_txn;
+ t = txn != NULL ? txn : dbp->dbenv->xa_txn;
if (t->txnid == TXN_INVALID)
t = NULL;
diff --git a/bdb/xa/xa_map.c b/bdb/xa/xa_map.c
index 1af268477db..42fa4b20ed2 100644
--- a/bdb/xa/xa_map.c
+++ b/bdb/xa/xa_map.c
@@ -1,14 +1,14 @@
/*-
* See the file LICENSE for redistribution information.
*
- * Copyright (c) 1996, 1997, 1998, 1999, 2000
+ * Copyright (c) 1996-2002
* Sleepycat Software. All rights reserved.
*/
#include "db_config.h"
#ifndef lint
-static const char revid[] = "$Id: xa_map.c,v 11.5 2000/11/30 00:58:46 ubell Exp $";
+static const char revid[] = "$Id: xa_map.c,v 11.19 2002/09/03 14:58:27 sue Exp $";
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
@@ -18,7 +18,7 @@ static const char revid[] = "$Id: xa_map.c,v 11.5 2000/11/30 00:58:46 ubell Exp
#endif
#include "db_int.h"
-#include "txn.h"
+#include "dbinc/txn.h"
/*
* This file contains all the mapping information that we need to support
@@ -72,31 +72,9 @@ __db_xid_to_txn(dbenv, xid, offp)
XID *xid;
size_t *offp;
{
- DB_TXNMGR *mgr;
- DB_TXNREGION *tmr;
struct __txn_detail *td;
- mgr = dbenv->tx_handle;
- tmr = mgr->reginfo.primary;
-
- /*
- * Search the internal active transaction table to find the
- * matching xid. If this is a performance hit, then we
- * can create a hash table, but I doubt it's worth it.
- */
- R_LOCK(dbenv, &mgr->reginfo);
- for (td = SH_TAILQ_FIRST(&tmr->active_txn, __txn_detail);
- td != NULL;
- td = SH_TAILQ_NEXT(td, links, __txn_detail))
- if (memcmp(xid->data, td->xid, XIDDATASIZE) == 0)
- break;
- R_UNLOCK(dbenv, &mgr->reginfo);
-
- if (td == NULL)
- return (EINVAL);
-
- *offp = R_OFFSET(&mgr->reginfo, td);
- return (0);
+ return (__txn_map_gid(dbenv, (u_int8_t *)xid->data, &td, offp));
}
/*